diff --git a/.flake8 b/.flake8 index 32986c792..90316de21 100644 --- a/.flake8 +++ b/.flake8 @@ -1,28 +1,29 @@ # -*- coding: utf-8 -*- -# -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# https://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -# Generated by synthtool. DO NOT EDIT! +# [flake8] +# TODO(https://github.com/googleapis/gapic-generator-python/issues/2333): +# Resolve flake8 lint issues ignore = E203, E231, E266, E501, W503 exclude = - # Exclude generated code. - **/proto/** + # TODO(https://github.com/googleapis/gapic-generator-python/issues/2333): + # Ensure that generated code passes flake8 lint **/gapic/** **/services/** **/types/** + # Exclude Protobuf gencode *_pb2.py # Standard linting exemptions. diff --git a/MANIFEST.in b/MANIFEST.in index d6814cd60..dae249ec8 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,25 +1,20 @@ # -*- coding: utf-8 -*- -# -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# https://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -# Generated by synthtool. DO NOT EDIT! +# include README.rst LICENSE -recursive-include google *.json *.proto py.typed +recursive-include google *.py *.pyi *.json *.proto py.typed recursive-include tests * global-exclude *.py[co] global-exclude __pycache__ - -# Exclude scripts for samples readmegen -prune scripts/readme-gen diff --git a/google/cloud/_storage_v2/services/storage/async_client.py b/google/cloud/_storage_v2/services/storage/async_client.py index 4244fa791..c71ee0472 100644 --- a/google/cloud/_storage_v2/services/storage/async_client.py +++ b/google/cloud/_storage_v2/services/storage/async_client.py @@ -75,8 +75,8 @@ class StorageAsyncClient: The Cloud Storage gRPC API allows applications to read and write data through the abstractions of buckets and objects. For a - description of these abstractions please see - https://cloud.google.com/storage/docs. + description of these abstractions please see `Cloud Storage + documentation `__. Resources are named as follows: @@ -85,17 +85,24 @@ class StorageAsyncClient: ``projects/my-string-id``. - Buckets are named using string names of the form: - ``projects/{project}/buckets/{bucket}`` For globally unique - buckets, ``_`` may be substituted for the project. + ``projects/{project}/buckets/{bucket}``. For globally unique + buckets, ``_`` might be substituted for the project. - Objects are uniquely identified by their name along with the name of the bucket they belong to, as separate strings in this API. For example: - ReadObjectRequest { bucket: 'projects/\_/buckets/my-bucket' - object: 'my-object' } Note that object names can contain ``/`` - characters, which are treated as any other character (no special - directory semantics). + :: + + ``` + ReadObjectRequest { + bucket: 'projects/_/buckets/my-bucket' + object: 'my-object' + } + ``` + + Note that object names can contain ``/`` characters, which are + treated as any other character (no special directory semantics). """ _client: StorageClient @@ -324,7 +331,28 @@ async def delete_bucket( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Permanently deletes an empty bucket. + r"""Permanently deletes an empty bucket. The request fails if there + are any live or noncurrent objects in the bucket, but the + request succeeds if the bucket only contains soft-deleted + objects or incomplete uploads, such as ongoing XML API multipart + uploads. Does not permanently delete soft-deleted objects. + + When this API is used to delete a bucket containing an object + that has a soft delete policy enabled, the object becomes soft + deleted, and the ``softDeleteTime`` and ``hardDeleteTime`` + properties are set on the object. + + Objects and multipart uploads that were in the bucket at the + time of deletion are also retained for the specified retention + duration. When a soft-deleted bucket reaches the end of its + retention duration, it is permanently deleted. The + ``hardDeleteTime`` of the bucket always equals or exceeds the + expiration time of the last soft-deleted object in the bucket. + + **IAM Permissions**: + + Requires ``storage.buckets.delete`` IAM permission on the + bucket. .. code-block:: python @@ -351,7 +379,8 @@ async def sample_delete_bucket(): Args: request (Optional[Union[google.cloud._storage_v2.types.DeleteBucketRequest, dict]]): - The request object. Request message for DeleteBucket. + The request object. Request message for + [DeleteBucket][google.storage.v2.Storage.DeleteBucket]. name (:class:`str`): Required. Name of a bucket to delete. This corresponds to the ``name`` field @@ -428,6 +457,16 @@ async def get_bucket( ) -> storage.Bucket: r"""Returns metadata for the specified bucket. + **IAM Permissions**: + + Requires ``storage.buckets.get`` IAM permission on the bucket. + Additionally, to return specific bucket metadata, the + authenticated user must have the following permissions: + + - To return the IAM policies: ``storage.buckets.getIamPolicy`` + - To return the bucket IP filtering rules: + ``storage.buckets.getIpFilter`` + .. code-block:: python # This snippet has been automatically generated and should be regarded as a @@ -456,7 +495,8 @@ async def sample_get_bucket(): Args: request (Optional[Union[google.cloud._storage_v2.types.GetBucketRequest, dict]]): - The request object. Request message for GetBucket. + The request object. Request message for + [GetBucket][google.storage.v2.Storage.GetBucket]. name (:class:`str`): Required. Name of a bucket. This corresponds to the ``name`` field @@ -542,6 +582,17 @@ async def create_bucket( ) -> storage.Bucket: r"""Creates a new bucket. + **IAM Permissions**: + + Requires ``storage.buckets.create`` IAM permission on the + bucket. Additionally, to enable specific bucket features, the + authenticated user must have the following permissions: + + - To enable object retention using the ``enableObjectRetention`` + query parameter: ``storage.buckets.enableObjectRetention`` + - To set the bucket IP filtering rules: + ``storage.buckets.setIpFilter`` + .. code-block:: python # This snippet has been automatically generated and should be regarded as a @@ -571,10 +622,11 @@ async def sample_create_bucket(): Args: request (Optional[Union[google.cloud._storage_v2.types.CreateBucketRequest, dict]]): - The request object. Request message for CreateBucket. + The request object. Request message for + [CreateBucket][google.storage.v2.Storage.CreateBucket]. parent (:class:`str`): - Required. The project to which this bucket will belong. - This field must either be empty or ``projects/_``. The + Required. The project to which this bucket belongs. This + field must either be empty or ``projects/_``. The project ID that owns this bucket should be specified in the ``bucket.project`` field. @@ -584,8 +636,8 @@ async def sample_create_bucket(): bucket (:class:`google.cloud._storage_v2.types.Bucket`): Optional. Properties of the new bucket being inserted. The name of the bucket is specified in the ``bucket_id`` - field. Populating ``bucket.name`` field will result in - an error. The project of the bucket must be specified in + field. Populating ``bucket.name`` field results in an + error. The project of the bucket must be specified in the ``bucket.project`` field. This field must be in ``projects/{projectIdentifier}`` format, {projectIdentifier} can be the project ID or project @@ -596,10 +648,10 @@ async def sample_create_bucket(): on the ``request`` instance; if ``request`` is provided, this should not be set. bucket_id (:class:`str`): - Required. The ID to use for this bucket, which will - become the final component of the bucket's resource - name. For example, the value ``foo`` might result in a - bucket with the name ``projects/123456/buckets/foo``. + Required. The ID to use for this bucket, which becomes + the final component of the bucket's resource name. For + example, the value ``foo`` might result in a bucket with + the name ``projects/123456/buckets/foo``. This corresponds to the ``bucket_id`` field on the ``request`` instance; if ``request`` is provided, this @@ -689,7 +741,18 @@ async def list_buckets( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListBucketsAsyncPager: - r"""Retrieves a list of buckets for a given project. + r"""Retrieves a list of buckets for a given project, ordered + lexicographically by name. + + **IAM Permissions**: + + Requires ``storage.buckets.list`` IAM permission on the bucket. + Additionally, to enable specific bucket features, the + authenticated user must have the following permissions: + + - To list the IAM policies: ``storage.buckets.getIamPolicy`` + - To list the bucket IP filtering rules: + ``storage.buckets.getIpFilter`` .. code-block:: python @@ -720,7 +783,8 @@ async def sample_list_buckets(): Args: request (Optional[Union[google.cloud._storage_v2.types.ListBucketsRequest, dict]]): - The request object. Request message for ListBuckets. + The request object. Request message for + [ListBuckets][google.storage.v2.Storage.ListBuckets]. parent (:class:`str`): Required. The project whose buckets we are listing. @@ -738,11 +802,11 @@ async def sample_list_buckets(): Returns: google.cloud._storage_v2.services.storage.pagers.ListBucketsAsyncPager: - The result of a call to - Buckets.ListBuckets - Iterating over this object will yield - results and resolve additional pages - automatically. + Response message for + [ListBuckets][google.storage.v2.Storage.ListBuckets]. + + Iterating over this object will yield results and + resolve additional pages automatically. """ # Create or coerce a protobuf request object. @@ -820,7 +884,25 @@ async def lock_bucket_retention_policy( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> storage.Bucket: - r"""Locks retention policy on a bucket. + r"""Permanently locks the retention policy that is currently applied + to the specified bucket. + + Caution: Locking a bucket is an irreversible action. Once you + lock a bucket: + + - You cannot remove the retention policy from the bucket. + - You cannot decrease the retention period for the policy. + + Once locked, you must delete the entire bucket in order to + remove the bucket's retention policy. However, before you can + delete the bucket, you must delete all the objects in the + bucket, which is only possible if all the objects have reached + the retention period set by the retention policy. + + **IAM Permissions**: + + Requires ``storage.buckets.update`` IAM permission on the + bucket. .. code-block:: python @@ -852,7 +934,7 @@ async def sample_lock_bucket_retention_policy(): Args: request (Optional[Union[google.cloud._storage_v2.types.LockBucketRetentionPolicyRequest, dict]]): The request object. Request message for - LockBucketRetentionPolicyRequest. + [LockBucketRetentionPolicy][google.storage.v2.Storage.LockBucketRetentionPolicy]. bucket (:class:`str`): Required. Name of a bucket. This corresponds to the ``bucket`` field @@ -934,12 +1016,18 @@ async def get_iam_policy( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Gets the IAM policy for a specified bucket. The ``resource`` - field in the request should be ``projects/_/buckets/{bucket}`` - for a bucket, or + r"""Gets the IAM policy for a specified bucket or managed folder. + The ``resource`` field in the request should be + ``projects/_/buckets/{bucket}`` for a bucket, or ``projects/_/buckets/{bucket}/managedFolders/{managedFolder}`` for a managed folder. + **IAM Permissions**: + + Requires ``storage.buckets.getIamPolicy`` on the bucket or + ``storage.managedFolders.getIamPolicy`` IAM permission on the + managed folder. + .. code-block:: python # This snippet has been automatically generated and should be regarded as a @@ -1089,9 +1177,9 @@ async def set_iam_policy( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Updates an IAM policy for the specified bucket. The ``resource`` - field in the request should be ``projects/_/buckets/{bucket}`` - for a bucket, or + r"""Updates an IAM policy for the specified bucket or managed + folder. The ``resource`` field in the request should be + ``projects/_/buckets/{bucket}`` for a bucket, or ``projects/_/buckets/{bucket}/managedFolders/{managedFolder}`` for a managed folder. @@ -1393,8 +1481,20 @@ async def update_bucket( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> storage.Bucket: - r"""Updates a bucket. Equivalent to JSON API's - storage.buckets.patch method. + r"""Updates a bucket. Changes to the bucket are readable immediately + after writing, but configuration changes might take time to + propagate. This method supports ``patch`` semantics. + + **IAM Permissions**: + + Requires ``storage.buckets.update`` IAM permission on the + bucket. Additionally, to enable specific bucket features, the + authenticated user must have the following permissions: + + - To set bucket IP filtering rules: + ``storage.buckets.setIpFilter`` + - To update public access prevention policies or access control + lists (ACLs): ``storage.buckets.setIamPolicy`` .. code-block:: python @@ -1423,10 +1523,12 @@ async def sample_update_bucket(): Args: request (Optional[Union[google.cloud._storage_v2.types.UpdateBucketRequest, dict]]): - The request object. Request for UpdateBucket method. + The request object. Request for + [UpdateBucket][google.storage.v2.Storage.UpdateBucket] + method. bucket (:class:`google.cloud._storage_v2.types.Bucket`): Required. The bucket to update. The bucket's ``name`` - field will be used to identify the bucket. + field is used to identify the bucket. This corresponds to the ``bucket`` field on the ``request`` instance; if ``request`` is provided, this @@ -1438,7 +1540,7 @@ async def sample_update_bucket(): "update" function, specify a single field with the value ``*``. Note: not recommended. If a new field is introduced at a later time, an older client updating - with the ``*`` may accidentally reset the new field's + with the ``*`` might accidentally reset the new field's value. Not specifying any fields is an error. @@ -1523,8 +1625,19 @@ async def compose_object( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> storage.Object: - r"""Concatenates a list of existing objects into a new - object in the same bucket. + r"""Concatenates a list of existing objects into a new object in the + same bucket. The existing source objects are unaffected by this + operation. + + **IAM Permissions**: + + Requires the ``storage.objects.create`` and + ``storage.objects.get`` IAM permissions to use this method. If + the new composite object overwrites an existing object, the + authenticated user must also have the ``storage.objects.delete`` + permission. If the request body includes the retention property, + the authenticated user must also have the + ``storage.objects.setRetention`` IAM permission. .. code-block:: python @@ -1553,7 +1666,8 @@ async def sample_compose_object(): Args: request (Optional[Union[google.cloud._storage_v2.types.ComposeObjectRequest, dict]]): - The request object. Request message for ComposeObject. + The request object. Request message for + [ComposeObject][google.storage.v2.Storage.ComposeObject]. retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1617,15 +1731,13 @@ async def delete_object( ) -> None: r"""Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation - parameter is used, or if `soft - delete `__ is - not enabled for the bucket. When this API is used to delete an - object from a bucket that has soft delete policy enabled, the - object becomes soft deleted, and the ``softDeleteTime`` and - ``hardDeleteTime`` properties are set on the object. This API - cannot be used to permanently delete soft-deleted objects. - Soft-deleted objects are permanently deleted according to their - ``hardDeleteTime``. + parameter is used, or if soft delete is not enabled for the + bucket. When this API is used to delete an object from a bucket + that has soft delete policy enabled, the object becomes soft + deleted, and the ``softDeleteTime`` and ``hardDeleteTime`` + properties are set on the object. This API cannot be used to + permanently delete soft-deleted objects. Soft-deleted objects + are permanently deleted according to their ``hardDeleteTime``. You can use the [``RestoreObject``][google.storage.v2.Storage.RestoreObject] API @@ -1634,9 +1746,8 @@ async def delete_object( **IAM Permissions**: - Requires ``storage.objects.delete`` `IAM - permission `__ - on the bucket. + Requires ``storage.objects.delete`` IAM permission on the + bucket. .. code-block:: python @@ -1664,8 +1775,8 @@ async def sample_delete_object(): Args: request (Optional[Union[google.cloud._storage_v2.types.DeleteObjectRequest, dict]]): - The request object. Message for deleting an object. ``bucket`` and - ``object`` **must** be set. + The request object. Request message for deleting an + object. bucket (:class:`str`): Required. Name of the bucket in which the object resides. @@ -1765,7 +1876,44 @@ async def restore_object( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> storage.Object: - r"""Restores a soft-deleted object. + r"""Restores a soft-deleted object. When a soft-deleted object is + restored, a new copy of that object is created in the same + bucket and inherits the same metadata as the soft-deleted + object. The inherited metadata is the metadata that existed when + the original object became soft deleted, with the following + exceptions: + + - The ``createTime`` of the new object is set to the time at + which the soft-deleted object was restored. + - The ``softDeleteTime`` and ``hardDeleteTime`` values are + cleared. + - A new generation is assigned and the metageneration is reset + to 1. + - If the soft-deleted object was in a bucket that had Autoclass + enabled, the new object is restored to Standard storage. + - The restored object inherits the bucket's default object ACL, + unless ``copySourceAcl`` is ``true``. + + If a live object using the same name already exists in the + bucket and becomes overwritten, the live object becomes a + noncurrent object if Object Versioning is enabled on the bucket. + If Object Versioning is not enabled, the live object becomes + soft deleted. + + **IAM Permissions**: + + Requires the following IAM permissions to use this method: + + - ``storage.objects.restore`` + - ``storage.objects.create`` + - ``storage.objects.delete`` (only required if overwriting an + existing object) + - ``storage.objects.getIamPolicy`` (only required if + ``projection`` is ``full`` and the relevant bucket has uniform + bucket-level access disabled) + - ``storage.objects.setIamPolicy`` (only required if + ``copySourceAcl`` is ``true`` and the relevant bucket has + uniform bucket-level access disabled) .. code-block:: python @@ -1797,8 +1945,10 @@ async def sample_restore_object(): Args: request (Optional[Union[google.cloud._storage_v2.types.RestoreObjectRequest, dict]]): - The request object. Message for restoring an object. ``bucket``, ``object``, - and ``generation`` **must** be set. + The request object. Request message for + [RestoreObject][google.storage.v2.Storage.RestoreObject]. + ``bucket``, ``object``, and ``generation`` **must** be + set. bucket (:class:`str`): Required. Name of the bucket in which the object resides. @@ -1903,11 +2053,11 @@ async def cancel_resumable_write( r"""Cancels an in-progress resumable upload. Any attempts to write to the resumable upload after - cancelling the upload will fail. + cancelling the upload fail. - The behavior for currently in progress write operations - is not guaranteed - they could either complete before - the cancellation or fail if the cancellation completes + The behavior for any in-progress write operations is not + guaranteed; they could either complete before the + cancellation or fail if the cancellation completes first. .. code-block:: python @@ -1938,8 +2088,8 @@ async def sample_cancel_resumable_write(): Args: request (Optional[Union[google.cloud._storage_v2.types.CancelResumableWriteRequest, dict]]): - The request object. Message for canceling an in-progress resumable upload. - ``upload_id`` **must** be set. + The request object. Request message for + [CancelResumableWrite][google.storage.v2.Storage.CancelResumableWrite]. upload_id (:class:`str`): Required. The upload_id of the resumable upload to cancel. This should be copied from the ``upload_id`` @@ -1959,7 +2109,7 @@ async def sample_cancel_resumable_write(): Returns: google.cloud._storage_v2.types.CancelResumableWriteResponse: Empty response message for canceling - an in-progress resumable upload, will be + an in-progress resumable upload, is extended as needed. """ @@ -2035,10 +2185,9 @@ async def get_object( **IAM Permissions**: - Requires ``storage.objects.get`` `IAM - permission `__ - on the bucket. To return object ACLs, the authenticated user - must also have the ``storage.objects.getIamPolicy`` permission. + Requires ``storage.objects.get`` IAM permission on the bucket. + To return object ACLs, the authenticated user must also have the + ``storage.objects.getIamPolicy`` permission. .. code-block:: python @@ -2069,7 +2218,8 @@ async def sample_get_object(): Args: request (Optional[Union[google.cloud._storage_v2.types.GetObjectRequest, dict]]): - The request object. Request message for GetObject. + The request object. Request message for + [GetObject][google.storage.v2.Storage.GetObject]. bucket (:class:`str`): Required. Name of the bucket in which the object resides. @@ -2177,9 +2327,7 @@ def read_object( **IAM Permissions**: - Requires ``storage.objects.get`` `IAM - permission `__ - on the bucket. + Requires ``storage.objects.get`` IAM permission on the bucket. .. code-block:: python @@ -2211,7 +2359,8 @@ async def sample_read_object(): Args: request (Optional[Union[google.cloud._storage_v2.types.ReadObjectRequest, dict]]): - The request object. Request message for ReadObject. + The request object. Request message for + [ReadObject][google.storage.v2.Storage.ReadObject]. bucket (:class:`str`): Required. The name of the bucket containing the object to read. @@ -2245,7 +2394,9 @@ async def sample_read_object(): Returns: AsyncIterable[google.cloud._storage_v2.types.ReadObjectResponse]: - Response message for ReadObject. + Response message for + [ReadObject][google.storage.v2.Storage.ReadObject]. + """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have @@ -2316,25 +2467,17 @@ def bidi_read_object( ) -> Awaitable[AsyncIterable[storage.BidiReadObjectResponse]]: r"""Reads an object's data. - This is a bi-directional API with the added support for reading - multiple ranges within one stream both within and across - multiple messages. If the server encountered an error for any of - the inputs, the stream will be closed with the relevant error - code. Because the API allows for multiple outstanding requests, - when the stream is closed the error response will contain a - BidiReadObjectRangesError proto in the error extension - describing the error for each outstanding read_id. + This bi-directional API reads data from an object, allowing you + to request multiple data ranges within a single stream, even + across several messages. If an error occurs with any request, + the stream closes with a relevant error code. Since you can have + multiple outstanding requests, the error response includes a + ``BidiReadObjectRangesError`` field detailing the specific error + for each pending ``read_id``. **IAM Permissions**: - Requires ``storage.objects.get`` - - `IAM - permission `__ - on the bucket. - - This API is currently in preview and is not yet available for - general use. + Requires ``storage.objects.get`` IAM permission on the bucket. .. code-block:: python @@ -2374,7 +2517,8 @@ def request_generator(): Args: requests (AsyncIterator[`google.cloud._storage_v2.types.BidiReadObjectRequest`]): - The request object AsyncIterator. Request message for BidiReadObject. + The request object AsyncIterator. Request message for + [BidiReadObject][google.storage.v2.Storage.BidiReadObject]. retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2385,7 +2529,9 @@ def request_generator(): Returns: AsyncIterable[google.cloud._storage_v2.types.BidiReadObjectResponse]: - Response message for BidiReadObject. + Response message for + [BidiReadObject][google.storage.v2.Storage.BidiReadObject]. + """ # Wrap the RPC method; this adds retry and timeout information, @@ -2425,8 +2571,13 @@ async def update_object( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> storage.Object: - r"""Updates an object's metadata. - Equivalent to JSON API's storage.objects.patch. + r"""Updates an object's metadata. Equivalent to JSON API's + ``storage.objects.patch`` method. + + **IAM Permissions**: + + Requires ``storage.objects.update`` IAM permission on the + bucket. .. code-block:: python @@ -2455,7 +2606,8 @@ async def sample_update_object(): Args: request (Optional[Union[google.cloud._storage_v2.types.UpdateObjectRequest, dict]]): - The request object. Request message for UpdateObject. + The request object. Request message for + [UpdateObject][google.storage.v2.Storage.UpdateObject]. object_ (:class:`google.cloud._storage_v2.types.Object`): Required. The object to update. The object's bucket and name fields are @@ -2476,7 +2628,7 @@ async def sample_update_object(): "update" function, specify a single field with the value ``*``. Note: not recommended. If a new field is introduced at a later time, an older client updating - with the ``*`` may accidentally reset the new field's + with the ``*`` might accidentally reset the new field's value. Not specifying any fields is an error. @@ -2581,32 +2733,32 @@ async def write_object( - Check the result Status of the stream, to determine if writing can be resumed on this stream or must be restarted from scratch (by calling ``StartResumableWrite()``). The resumable - errors are DEADLINE_EXCEEDED, INTERNAL, and UNAVAILABLE. For - each case, the client should use binary exponential backoff - before retrying. Additionally, writes can be resumed after - RESOURCE_EXHAUSTED errors, but only after taking appropriate - measures, which may include reducing aggregate send rate - across clients and/or requesting a quota increase for your - project. + errors are ``DEADLINE_EXCEEDED``, ``INTERNAL``, and + ``UNAVAILABLE``. For each case, the client should use binary + exponential backoff before retrying. Additionally, writes can + be resumed after ``RESOURCE_EXHAUSTED`` errors, but only after + taking appropriate measures, which might include reducing + aggregate send rate across clients and/or requesting a quota + increase for your project. - If the call to ``WriteObject`` returns ``ABORTED``, that indicates concurrent attempts to update the resumable write, caused either by multiple racing clients or by a single client where the previous request was timed out on the client side but nonetheless reached the server. In this case the client - should take steps to prevent further concurrent writes (e.g., - increase the timeouts, stop using more than one process to - perform the upload, etc.), and then should follow the steps - below for resuming the upload. + should take steps to prevent further concurrent writes. For + example, increase the timeouts and stop using more than one + process to perform the upload. Follow the steps below for + resuming the upload. - For resumable errors, the client should call ``QueryWriteStatus()`` and then continue writing from the - returned ``persisted_size``. This may be less than the amount - of data the client previously sent. Note also that it is - acceptable to send data starting at an offset earlier than the - returned ``persisted_size``; in this case, the service will - skip data at offsets that were already persisted (without + returned ``persisted_size``. This might be less than the + amount of data the client previously sent. Note also that it + is acceptable to send data starting at an offset earlier than + the returned ``persisted_size``; in this case, the service + skips data at offsets that were already persisted (without checking that it matches the previously written data), and write only the data starting from the persisted offset. Even - though the data isn't written, it may still incur a + though the data isn't written, it might still incur a performance cost over resuming at the correct write offset. This behavior can make client-side handling simpler in some cases. @@ -2614,27 +2766,26 @@ async def write_object( message, unless the object is being finished with ``finish_write`` set to ``true``. - The service will not view the object as complete until the + The service does not view the object as complete until the client has sent a ``WriteObjectRequest`` with ``finish_write`` set to ``true``. Sending any requests on a stream after sending - a request with ``finish_write`` set to ``true`` will cause an - error. The client **should** check the response it receives to - determine how much data the service was able to commit and - whether the service views the object as complete. + a request with ``finish_write`` set to ``true`` causes an error. + The client must check the response it receives to determine how + much data the service is able to commit and whether the service + views the object as complete. - Attempting to resume an already finalized object will result in - an OK status, with a ``WriteObjectResponse`` containing the + Attempting to resume an already finalized object results in an + ``OK`` status, with a ``WriteObjectResponse`` containing the finalized object's metadata. - Alternatively, the BidiWriteObject operation may be used to + Alternatively, you can use the ``BidiWriteObject`` operation to write an object with controls over flushing and the ability to fetch the ability to determine the current persisted size. **IAM Permissions**: - Requires ``storage.objects.create`` `IAM - permission `__ - on the bucket. + Requires ``storage.objects.create`` IAM permission on the + bucket. .. code-block:: python @@ -2675,7 +2826,8 @@ def request_generator(): Args: requests (AsyncIterator[`google.cloud._storage_v2.types.WriteObjectRequest`]): - The request object AsyncIterator. Request message for WriteObject. + The request object AsyncIterator. Request message for + [WriteObject][google.storage.v2.Storage.WriteObject]. retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2686,7 +2838,9 @@ def request_generator(): Returns: google.cloud._storage_v2.types.WriteObjectResponse: - Response message for WriteObject. + Response message for + [WriteObject][google.storage.v2.Storage.WriteObject]. + """ # Wrap the RPC method; this adds retry and timeout information, @@ -2719,20 +2873,20 @@ def bidi_write_object( ) -> Awaitable[AsyncIterable[storage.BidiWriteObjectResponse]]: r"""Stores a new object and metadata. - This is similar to the WriteObject call with the added support - for manual flushing of persisted state, and the ability to - determine current persisted size without closing the stream. - - The client may specify one or both of the ``state_lookup`` and - ``flush`` fields in each BidiWriteObjectRequest. If ``flush`` is - specified, the data written so far will be persisted to storage. - If ``state_lookup`` is specified, the service will respond with - a BidiWriteObjectResponse that contains the persisted size. If - both ``flush`` and ``state_lookup`` are specified, the flush - will always occur before a ``state_lookup``, so that both may be - set in the same request and the returned state will be the state - of the object post-flush. When the stream is closed, a - BidiWriteObjectResponse will always be sent to the client, + This is similar to the ``WriteObject`` call with the added + support for manual flushing of persisted state, and the ability + to determine current persisted size without closing the stream. + + The client might specify one or both of the ``state_lookup`` and + ``flush`` fields in each ``BidiWriteObjectRequest``. If + ``flush`` is specified, the data written so far is persisted to + storage. If ``state_lookup`` is specified, the service responds + with a ``BidiWriteObjectResponse`` that contains the persisted + size. If both ``flush`` and ``state_lookup`` are specified, the + flush always occurs before a ``state_lookup``, so that both + might be set in the same request and the returned state is the + state of the object post-flush. When the stream is closed, a + ``BidiWriteObjectResponse`` is always sent to the client, regardless of the value of ``state_lookup``. .. code-block:: python @@ -2775,7 +2929,8 @@ def request_generator(): Args: requests (AsyncIterator[`google.cloud._storage_v2.types.BidiWriteObjectRequest`]): - The request object AsyncIterator. Request message for BidiWriteObject. + The request object AsyncIterator. Request message for + [BidiWriteObject][google.storage.v2.Storage.BidiWriteObject]. retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2822,11 +2977,10 @@ async def list_objects( **IAM Permissions**: - The authenticated user requires ``storage.objects.list`` `IAM - permission `__ - to use this method. To return object ACLs, the authenticated - user must also have the ``storage.objects.getIamPolicy`` - permission. + The authenticated user requires ``storage.objects.list`` IAM + permission to use this method. To return object ACLs, the + authenticated user must also have the + ``storage.objects.getIamPolicy`` permission. .. code-block:: python @@ -2857,7 +3011,8 @@ async def sample_list_objects(): Args: request (Optional[Union[google.cloud._storage_v2.types.ListObjectsRequest, dict]]): - The request object. Request message for ListObjects. + The request object. Request message for + [ListObjects][google.storage.v2.Storage.ListObjects]. parent (:class:`str`): Required. Name of the bucket in which to look for objects. @@ -2990,17 +3145,20 @@ async def sample_rewrite_object(): Args: request (Optional[Union[google.cloud._storage_v2.types.RewriteObjectRequest, dict]]): - The request object. Request message for RewriteObject. If the source object - is encrypted using a Customer-Supplied Encryption Key - the key information must be provided in the - copy_source_encryption_algorithm, - copy_source_encryption_key_bytes, and - copy_source_encryption_key_sha256_bytes fields. If the - destination object should be encrypted the keying + The request object. Request message for + [RewriteObject][google.storage.v2.Storage.RewriteObject]. + If the source object is encrypted using a + Customer-Supplied Encryption Key the key information + must be provided in the + ``copy_source_encryption_algorithm``, + ``copy_source_encryption_key_bytes``, and + ``copy_source_encryption_key_sha256_bytes`` fields. If + the destination object should be encrypted the keying information should be provided in the - encryption_algorithm, encryption_key_bytes, and - encryption_key_sha256_bytes fields of the - common_object_request_params.customer_encryption field. + ``encryption_algorithm``, ``encryption_key_bytes``, and + ``encryption_key_sha256_bytes`` fields of the + ``common_object_request_params.customer_encryption`` + field. retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -3063,18 +3221,16 @@ async def start_resumable_write( metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> storage.StartResumableWriteResponse: r"""Starts a resumable write operation. This method is part of the - `Resumable - upload `__ - feature. This allows you to upload large objects in multiple - chunks, which is more resilient to network interruptions than a - single upload. The validity duration of the write operation, and - the consequences of it becoming invalid, are service-dependent. + Resumable upload feature. This allows you to upload large + objects in multiple chunks, which is more resilient to network + interruptions than a single upload. The validity duration of the + write operation, and the consequences of it becoming invalid, + are service-dependent. **IAM Permissions**: - Requires ``storage.objects.create`` `IAM - permission `__ - on the bucket. + Requires ``storage.objects.create`` IAM permission on the + bucket. .. code-block:: python @@ -3103,7 +3259,8 @@ async def sample_start_resumable_write(): Args: request (Optional[Union[google.cloud._storage_v2.types.StartResumableWriteRequest, dict]]): - The request object. Request message StartResumableWrite. + The request object. Request message for + [StartResumableWrite][google.storage.v2.Storage.StartResumableWrite]. retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -3114,7 +3271,9 @@ async def sample_start_resumable_write(): Returns: google.cloud._storage_v2.types.StartResumableWriteResponse: - Response object for StartResumableWrite. + Response object for + [StartResumableWrite][google.storage.v2.Storage.StartResumableWrite]. + """ # Create or coerce a protobuf request object. # - Use the request object if provided (there's no risk of modifying the input as @@ -3166,11 +3325,10 @@ async def query_write_status( metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> storage.QueryWriteStatusResponse: r"""Determines the ``persisted_size`` of an object that is being - written. This method is part of the `resumable - upload `__ - feature. The returned value is the size of the object that has - been persisted so far. The value can be used as the - ``write_offset`` for the next ``Write()`` call. + written. This method is part of the resumable upload feature. + The returned value is the size of the object that has been + persisted so far. The value can be used as the ``write_offset`` + for the next ``Write()`` call. If the object does not exist, meaning if it was deleted, or the first ``Write()`` has not yet reached the service, this method @@ -3212,7 +3370,8 @@ async def sample_query_write_status(): Args: request (Optional[Union[google.cloud._storage_v2.types.QueryWriteStatusRequest, dict]]): - The request object. Request object for ``QueryWriteStatus``. + The request object. Request object for + [QueryWriteStatus][google.storage.v2.Storage.QueryWriteStatus]. upload_id (:class:`str`): Required. The name of the resume token for the object whose write status @@ -3231,7 +3390,9 @@ async def sample_query_write_status(): Returns: google.cloud._storage_v2.types.QueryWriteStatusResponse: - Response object for QueryWriteStatus. + Response object for + [QueryWriteStatus][google.storage.v2.Storage.QueryWriteStatus]. + """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have @@ -3301,8 +3462,20 @@ async def move_object( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> storage.Object: - r"""Moves the source object to the destination object in - the same bucket. + r"""Moves the source object to the destination object in the same + bucket. This operation moves a source object to a destination + object in the same bucket by renaming the object. The move + itself is an atomic transaction, ensuring all steps either + complete successfully or no changes are made. + + **IAM Permissions**: + + Requires the following IAM permissions to use this method: + + - ``storage.objects.move`` + - ``storage.objects.create`` + - ``storage.objects.delete`` (only required if overwriting an + existing object) .. code-block:: python @@ -3334,7 +3507,8 @@ async def sample_move_object(): Args: request (Optional[Union[google.cloud._storage_v2.types.MoveObjectRequest, dict]]): - The request object. Request message for MoveObject. + The request object. Request message for + [MoveObject][google.storage.v2.Storage.MoveObject]. bucket (:class:`str`): Required. Name of the bucket in which the object resides. diff --git a/google/cloud/_storage_v2/services/storage/client.py b/google/cloud/_storage_v2/services/storage/client.py index 82596dcd2..16c76a01f 100644 --- a/google/cloud/_storage_v2/services/storage/client.py +++ b/google/cloud/_storage_v2/services/storage/client.py @@ -115,8 +115,8 @@ class StorageClient(metaclass=StorageClientMeta): The Cloud Storage gRPC API allows applications to read and write data through the abstractions of buckets and objects. For a - description of these abstractions please see - https://cloud.google.com/storage/docs. + description of these abstractions please see `Cloud Storage + documentation `__. Resources are named as follows: @@ -125,17 +125,24 @@ class StorageClient(metaclass=StorageClientMeta): ``projects/my-string-id``. - Buckets are named using string names of the form: - ``projects/{project}/buckets/{bucket}`` For globally unique - buckets, ``_`` may be substituted for the project. + ``projects/{project}/buckets/{bucket}``. For globally unique + buckets, ``_`` might be substituted for the project. - Objects are uniquely identified by their name along with the name of the bucket they belong to, as separate strings in this API. For example: - ReadObjectRequest { bucket: 'projects/\_/buckets/my-bucket' - object: 'my-object' } Note that object names can contain ``/`` - characters, which are treated as any other character (no special - directory semantics). + :: + + ``` + ReadObjectRequest { + bucket: 'projects/_/buckets/my-bucket' + object: 'my-object' + } + ``` + + Note that object names can contain ``/`` characters, which are + treated as any other character (no special directory semantics). """ @staticmethod @@ -763,7 +770,28 @@ def delete_bucket( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Permanently deletes an empty bucket. + r"""Permanently deletes an empty bucket. The request fails if there + are any live or noncurrent objects in the bucket, but the + request succeeds if the bucket only contains soft-deleted + objects or incomplete uploads, such as ongoing XML API multipart + uploads. Does not permanently delete soft-deleted objects. + + When this API is used to delete a bucket containing an object + that has a soft delete policy enabled, the object becomes soft + deleted, and the ``softDeleteTime`` and ``hardDeleteTime`` + properties are set on the object. + + Objects and multipart uploads that were in the bucket at the + time of deletion are also retained for the specified retention + duration. When a soft-deleted bucket reaches the end of its + retention duration, it is permanently deleted. The + ``hardDeleteTime`` of the bucket always equals or exceeds the + expiration time of the last soft-deleted object in the bucket. + + **IAM Permissions**: + + Requires ``storage.buckets.delete`` IAM permission on the + bucket. .. code-block:: python @@ -790,7 +818,8 @@ def sample_delete_bucket(): Args: request (Union[google.cloud._storage_v2.types.DeleteBucketRequest, dict]): - The request object. Request message for DeleteBucket. + The request object. Request message for + [DeleteBucket][google.storage.v2.Storage.DeleteBucket]. name (str): Required. Name of a bucket to delete. This corresponds to the ``name`` field @@ -864,6 +893,16 @@ def get_bucket( ) -> storage.Bucket: r"""Returns metadata for the specified bucket. + **IAM Permissions**: + + Requires ``storage.buckets.get`` IAM permission on the bucket. + Additionally, to return specific bucket metadata, the + authenticated user must have the following permissions: + + - To return the IAM policies: ``storage.buckets.getIamPolicy`` + - To return the bucket IP filtering rules: + ``storage.buckets.getIpFilter`` + .. code-block:: python # This snippet has been automatically generated and should be regarded as a @@ -892,7 +931,8 @@ def sample_get_bucket(): Args: request (Union[google.cloud._storage_v2.types.GetBucketRequest, dict]): - The request object. Request message for GetBucket. + The request object. Request message for + [GetBucket][google.storage.v2.Storage.GetBucket]. name (str): Required. Name of a bucket. This corresponds to the ``name`` field @@ -975,6 +1015,17 @@ def create_bucket( ) -> storage.Bucket: r"""Creates a new bucket. + **IAM Permissions**: + + Requires ``storage.buckets.create`` IAM permission on the + bucket. Additionally, to enable specific bucket features, the + authenticated user must have the following permissions: + + - To enable object retention using the ``enableObjectRetention`` + query parameter: ``storage.buckets.enableObjectRetention`` + - To set the bucket IP filtering rules: + ``storage.buckets.setIpFilter`` + .. code-block:: python # This snippet has been automatically generated and should be regarded as a @@ -1004,10 +1055,11 @@ def sample_create_bucket(): Args: request (Union[google.cloud._storage_v2.types.CreateBucketRequest, dict]): - The request object. Request message for CreateBucket. + The request object. Request message for + [CreateBucket][google.storage.v2.Storage.CreateBucket]. parent (str): - Required. The project to which this bucket will belong. - This field must either be empty or ``projects/_``. The + Required. The project to which this bucket belongs. This + field must either be empty or ``projects/_``. The project ID that owns this bucket should be specified in the ``bucket.project`` field. @@ -1017,8 +1069,8 @@ def sample_create_bucket(): bucket (google.cloud._storage_v2.types.Bucket): Optional. Properties of the new bucket being inserted. The name of the bucket is specified in the ``bucket_id`` - field. Populating ``bucket.name`` field will result in - an error. The project of the bucket must be specified in + field. Populating ``bucket.name`` field results in an + error. The project of the bucket must be specified in the ``bucket.project`` field. This field must be in ``projects/{projectIdentifier}`` format, {projectIdentifier} can be the project ID or project @@ -1029,10 +1081,10 @@ def sample_create_bucket(): on the ``request`` instance; if ``request`` is provided, this should not be set. bucket_id (str): - Required. The ID to use for this bucket, which will - become the final component of the bucket's resource - name. For example, the value ``foo`` might result in a - bucket with the name ``projects/123456/buckets/foo``. + Required. The ID to use for this bucket, which becomes + the final component of the bucket's resource name. For + example, the value ``foo`` might result in a bucket with + the name ``projects/123456/buckets/foo``. This corresponds to the ``bucket_id`` field on the ``request`` instance; if ``request`` is provided, this @@ -1119,7 +1171,18 @@ def list_buckets( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListBucketsPager: - r"""Retrieves a list of buckets for a given project. + r"""Retrieves a list of buckets for a given project, ordered + lexicographically by name. + + **IAM Permissions**: + + Requires ``storage.buckets.list`` IAM permission on the bucket. + Additionally, to enable specific bucket features, the + authenticated user must have the following permissions: + + - To list the IAM policies: ``storage.buckets.getIamPolicy`` + - To list the bucket IP filtering rules: + ``storage.buckets.getIpFilter`` .. code-block:: python @@ -1150,7 +1213,8 @@ def sample_list_buckets(): Args: request (Union[google.cloud._storage_v2.types.ListBucketsRequest, dict]): - The request object. Request message for ListBuckets. + The request object. Request message for + [ListBuckets][google.storage.v2.Storage.ListBuckets]. parent (str): Required. The project whose buckets we are listing. @@ -1168,11 +1232,11 @@ def sample_list_buckets(): Returns: google.cloud._storage_v2.services.storage.pagers.ListBucketsPager: - The result of a call to - Buckets.ListBuckets - Iterating over this object will yield - results and resolve additional pages - automatically. + Response message for + [ListBuckets][google.storage.v2.Storage.ListBuckets]. + + Iterating over this object will yield results and + resolve additional pages automatically. """ # Create or coerce a protobuf request object. @@ -1247,7 +1311,25 @@ def lock_bucket_retention_policy( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> storage.Bucket: - r"""Locks retention policy on a bucket. + r"""Permanently locks the retention policy that is currently applied + to the specified bucket. + + Caution: Locking a bucket is an irreversible action. Once you + lock a bucket: + + - You cannot remove the retention policy from the bucket. + - You cannot decrease the retention period for the policy. + + Once locked, you must delete the entire bucket in order to + remove the bucket's retention policy. However, before you can + delete the bucket, you must delete all the objects in the + bucket, which is only possible if all the objects have reached + the retention period set by the retention policy. + + **IAM Permissions**: + + Requires ``storage.buckets.update`` IAM permission on the + bucket. .. code-block:: python @@ -1279,7 +1361,7 @@ def sample_lock_bucket_retention_policy(): Args: request (Union[google.cloud._storage_v2.types.LockBucketRetentionPolicyRequest, dict]): The request object. Request message for - LockBucketRetentionPolicyRequest. + [LockBucketRetentionPolicy][google.storage.v2.Storage.LockBucketRetentionPolicy]. bucket (str): Required. Name of a bucket. This corresponds to the ``bucket`` field @@ -1360,12 +1442,18 @@ def get_iam_policy( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Gets the IAM policy for a specified bucket. The ``resource`` - field in the request should be ``projects/_/buckets/{bucket}`` - for a bucket, or + r"""Gets the IAM policy for a specified bucket or managed folder. + The ``resource`` field in the request should be + ``projects/_/buckets/{bucket}`` for a bucket, or ``projects/_/buckets/{bucket}/managedFolders/{managedFolder}`` for a managed folder. + **IAM Permissions**: + + Requires ``storage.buckets.getIamPolicy`` on the bucket or + ``storage.managedFolders.getIamPolicy`` IAM permission on the + managed folder. + .. code-block:: python # This snippet has been automatically generated and should be regarded as a @@ -1516,9 +1604,9 @@ def set_iam_policy( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Updates an IAM policy for the specified bucket. The ``resource`` - field in the request should be ``projects/_/buckets/{bucket}`` - for a bucket, or + r"""Updates an IAM policy for the specified bucket or managed + folder. The ``resource`` field in the request should be + ``projects/_/buckets/{bucket}`` for a bucket, or ``projects/_/buckets/{bucket}/managedFolders/{managedFolder}`` for a managed folder. @@ -1822,8 +1910,20 @@ def update_bucket( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> storage.Bucket: - r"""Updates a bucket. Equivalent to JSON API's - storage.buckets.patch method. + r"""Updates a bucket. Changes to the bucket are readable immediately + after writing, but configuration changes might take time to + propagate. This method supports ``patch`` semantics. + + **IAM Permissions**: + + Requires ``storage.buckets.update`` IAM permission on the + bucket. Additionally, to enable specific bucket features, the + authenticated user must have the following permissions: + + - To set bucket IP filtering rules: + ``storage.buckets.setIpFilter`` + - To update public access prevention policies or access control + lists (ACLs): ``storage.buckets.setIamPolicy`` .. code-block:: python @@ -1852,10 +1952,12 @@ def sample_update_bucket(): Args: request (Union[google.cloud._storage_v2.types.UpdateBucketRequest, dict]): - The request object. Request for UpdateBucket method. + The request object. Request for + [UpdateBucket][google.storage.v2.Storage.UpdateBucket] + method. bucket (google.cloud._storage_v2.types.Bucket): Required. The bucket to update. The bucket's ``name`` - field will be used to identify the bucket. + field is used to identify the bucket. This corresponds to the ``bucket`` field on the ``request`` instance; if ``request`` is provided, this @@ -1867,7 +1969,7 @@ def sample_update_bucket(): "update" function, specify a single field with the value ``*``. Note: not recommended. If a new field is introduced at a later time, an older client updating - with the ``*`` may accidentally reset the new field's + with the ``*`` might accidentally reset the new field's value. Not specifying any fields is an error. @@ -1949,8 +2051,19 @@ def compose_object( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> storage.Object: - r"""Concatenates a list of existing objects into a new - object in the same bucket. + r"""Concatenates a list of existing objects into a new object in the + same bucket. The existing source objects are unaffected by this + operation. + + **IAM Permissions**: + + Requires the ``storage.objects.create`` and + ``storage.objects.get`` IAM permissions to use this method. If + the new composite object overwrites an existing object, the + authenticated user must also have the ``storage.objects.delete`` + permission. If the request body includes the retention property, + the authenticated user must also have the + ``storage.objects.setRetention`` IAM permission. .. code-block:: python @@ -1979,7 +2092,8 @@ def sample_compose_object(): Args: request (Union[google.cloud._storage_v2.types.ComposeObjectRequest, dict]): - The request object. Request message for ComposeObject. + The request object. Request message for + [ComposeObject][google.storage.v2.Storage.ComposeObject]. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2041,15 +2155,13 @@ def delete_object( ) -> None: r"""Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation - parameter is used, or if `soft - delete `__ is - not enabled for the bucket. When this API is used to delete an - object from a bucket that has soft delete policy enabled, the - object becomes soft deleted, and the ``softDeleteTime`` and - ``hardDeleteTime`` properties are set on the object. This API - cannot be used to permanently delete soft-deleted objects. - Soft-deleted objects are permanently deleted according to their - ``hardDeleteTime``. + parameter is used, or if soft delete is not enabled for the + bucket. When this API is used to delete an object from a bucket + that has soft delete policy enabled, the object becomes soft + deleted, and the ``softDeleteTime`` and ``hardDeleteTime`` + properties are set on the object. This API cannot be used to + permanently delete soft-deleted objects. Soft-deleted objects + are permanently deleted according to their ``hardDeleteTime``. You can use the [``RestoreObject``][google.storage.v2.Storage.RestoreObject] API @@ -2058,9 +2170,8 @@ def delete_object( **IAM Permissions**: - Requires ``storage.objects.delete`` `IAM - permission `__ - on the bucket. + Requires ``storage.objects.delete`` IAM permission on the + bucket. .. code-block:: python @@ -2088,8 +2199,8 @@ def sample_delete_object(): Args: request (Union[google.cloud._storage_v2.types.DeleteObjectRequest, dict]): - The request object. Message for deleting an object. ``bucket`` and - ``object`` **must** be set. + The request object. Request message for deleting an + object. bucket (str): Required. Name of the bucket in which the object resides. @@ -2186,7 +2297,44 @@ def restore_object( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> storage.Object: - r"""Restores a soft-deleted object. + r"""Restores a soft-deleted object. When a soft-deleted object is + restored, a new copy of that object is created in the same + bucket and inherits the same metadata as the soft-deleted + object. The inherited metadata is the metadata that existed when + the original object became soft deleted, with the following + exceptions: + + - The ``createTime`` of the new object is set to the time at + which the soft-deleted object was restored. + - The ``softDeleteTime`` and ``hardDeleteTime`` values are + cleared. + - A new generation is assigned and the metageneration is reset + to 1. + - If the soft-deleted object was in a bucket that had Autoclass + enabled, the new object is restored to Standard storage. + - The restored object inherits the bucket's default object ACL, + unless ``copySourceAcl`` is ``true``. + + If a live object using the same name already exists in the + bucket and becomes overwritten, the live object becomes a + noncurrent object if Object Versioning is enabled on the bucket. + If Object Versioning is not enabled, the live object becomes + soft deleted. + + **IAM Permissions**: + + Requires the following IAM permissions to use this method: + + - ``storage.objects.restore`` + - ``storage.objects.create`` + - ``storage.objects.delete`` (only required if overwriting an + existing object) + - ``storage.objects.getIamPolicy`` (only required if + ``projection`` is ``full`` and the relevant bucket has uniform + bucket-level access disabled) + - ``storage.objects.setIamPolicy`` (only required if + ``copySourceAcl`` is ``true`` and the relevant bucket has + uniform bucket-level access disabled) .. code-block:: python @@ -2218,8 +2366,10 @@ def sample_restore_object(): Args: request (Union[google.cloud._storage_v2.types.RestoreObjectRequest, dict]): - The request object. Message for restoring an object. ``bucket``, ``object``, - and ``generation`` **must** be set. + The request object. Request message for + [RestoreObject][google.storage.v2.Storage.RestoreObject]. + ``bucket``, ``object``, and ``generation`` **must** be + set. bucket (str): Required. Name of the bucket in which the object resides. @@ -2321,11 +2471,11 @@ def cancel_resumable_write( r"""Cancels an in-progress resumable upload. Any attempts to write to the resumable upload after - cancelling the upload will fail. + cancelling the upload fail. - The behavior for currently in progress write operations - is not guaranteed - they could either complete before - the cancellation or fail if the cancellation completes + The behavior for any in-progress write operations is not + guaranteed; they could either complete before the + cancellation or fail if the cancellation completes first. .. code-block:: python @@ -2356,8 +2506,8 @@ def sample_cancel_resumable_write(): Args: request (Union[google.cloud._storage_v2.types.CancelResumableWriteRequest, dict]): - The request object. Message for canceling an in-progress resumable upload. - ``upload_id`` **must** be set. + The request object. Request message for + [CancelResumableWrite][google.storage.v2.Storage.CancelResumableWrite]. upload_id (str): Required. The upload_id of the resumable upload to cancel. This should be copied from the ``upload_id`` @@ -2377,7 +2527,7 @@ def sample_cancel_resumable_write(): Returns: google.cloud._storage_v2.types.CancelResumableWriteResponse: Empty response message for canceling - an in-progress resumable upload, will be + an in-progress resumable upload, is extended as needed. """ @@ -2450,10 +2600,9 @@ def get_object( **IAM Permissions**: - Requires ``storage.objects.get`` `IAM - permission `__ - on the bucket. To return object ACLs, the authenticated user - must also have the ``storage.objects.getIamPolicy`` permission. + Requires ``storage.objects.get`` IAM permission on the bucket. + To return object ACLs, the authenticated user must also have the + ``storage.objects.getIamPolicy`` permission. .. code-block:: python @@ -2484,7 +2633,8 @@ def sample_get_object(): Args: request (Union[google.cloud._storage_v2.types.GetObjectRequest, dict]): - The request object. Request message for GetObject. + The request object. Request message for + [GetObject][google.storage.v2.Storage.GetObject]. bucket (str): Required. Name of the bucket in which the object resides. @@ -2589,9 +2739,7 @@ def read_object( **IAM Permissions**: - Requires ``storage.objects.get`` `IAM - permission `__ - on the bucket. + Requires ``storage.objects.get`` IAM permission on the bucket. .. code-block:: python @@ -2623,7 +2771,8 @@ def sample_read_object(): Args: request (Union[google.cloud._storage_v2.types.ReadObjectRequest, dict]): - The request object. Request message for ReadObject. + The request object. Request message for + [ReadObject][google.storage.v2.Storage.ReadObject]. bucket (str): Required. The name of the bucket containing the object to read. @@ -2657,7 +2806,9 @@ def sample_read_object(): Returns: Iterable[google.cloud._storage_v2.types.ReadObjectResponse]: - Response message for ReadObject. + Response message for + [ReadObject][google.storage.v2.Storage.ReadObject]. + """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have @@ -2725,25 +2876,17 @@ def bidi_read_object( ) -> Iterable[storage.BidiReadObjectResponse]: r"""Reads an object's data. - This is a bi-directional API with the added support for reading - multiple ranges within one stream both within and across - multiple messages. If the server encountered an error for any of - the inputs, the stream will be closed with the relevant error - code. Because the API allows for multiple outstanding requests, - when the stream is closed the error response will contain a - BidiReadObjectRangesError proto in the error extension - describing the error for each outstanding read_id. + This bi-directional API reads data from an object, allowing you + to request multiple data ranges within a single stream, even + across several messages. If an error occurs with any request, + the stream closes with a relevant error code. Since you can have + multiple outstanding requests, the error response includes a + ``BidiReadObjectRangesError`` field detailing the specific error + for each pending ``read_id``. **IAM Permissions**: - Requires ``storage.objects.get`` - - `IAM - permission `__ - on the bucket. - - This API is currently in preview and is not yet available for - general use. + Requires ``storage.objects.get`` IAM permission on the bucket. .. code-block:: python @@ -2783,7 +2926,8 @@ def request_generator(): Args: requests (Iterator[google.cloud._storage_v2.types.BidiReadObjectRequest]): - The request object iterator. Request message for BidiReadObject. + The request object iterator. Request message for + [BidiReadObject][google.storage.v2.Storage.BidiReadObject]. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2794,7 +2938,9 @@ def request_generator(): Returns: Iterable[google.cloud._storage_v2.types.BidiReadObjectResponse]: - Response message for BidiReadObject. + Response message for + [BidiReadObject][google.storage.v2.Storage.BidiReadObject]. + """ # Wrap the RPC method; this adds retry and timeout information, @@ -2832,8 +2978,13 @@ def update_object( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> storage.Object: - r"""Updates an object's metadata. - Equivalent to JSON API's storage.objects.patch. + r"""Updates an object's metadata. Equivalent to JSON API's + ``storage.objects.patch`` method. + + **IAM Permissions**: + + Requires ``storage.objects.update`` IAM permission on the + bucket. .. code-block:: python @@ -2862,7 +3013,8 @@ def sample_update_object(): Args: request (Union[google.cloud._storage_v2.types.UpdateObjectRequest, dict]): - The request object. Request message for UpdateObject. + The request object. Request message for + [UpdateObject][google.storage.v2.Storage.UpdateObject]. object_ (google.cloud._storage_v2.types.Object): Required. The object to update. The object's bucket and name fields are @@ -2883,7 +3035,7 @@ def sample_update_object(): "update" function, specify a single field with the value ``*``. Note: not recommended. If a new field is introduced at a later time, an older client updating - with the ``*`` may accidentally reset the new field's + with the ``*`` might accidentally reset the new field's value. Not specifying any fields is an error. @@ -2985,32 +3137,32 @@ def write_object( - Check the result Status of the stream, to determine if writing can be resumed on this stream or must be restarted from scratch (by calling ``StartResumableWrite()``). The resumable - errors are DEADLINE_EXCEEDED, INTERNAL, and UNAVAILABLE. For - each case, the client should use binary exponential backoff - before retrying. Additionally, writes can be resumed after - RESOURCE_EXHAUSTED errors, but only after taking appropriate - measures, which may include reducing aggregate send rate - across clients and/or requesting a quota increase for your - project. + errors are ``DEADLINE_EXCEEDED``, ``INTERNAL``, and + ``UNAVAILABLE``. For each case, the client should use binary + exponential backoff before retrying. Additionally, writes can + be resumed after ``RESOURCE_EXHAUSTED`` errors, but only after + taking appropriate measures, which might include reducing + aggregate send rate across clients and/or requesting a quota + increase for your project. - If the call to ``WriteObject`` returns ``ABORTED``, that indicates concurrent attempts to update the resumable write, caused either by multiple racing clients or by a single client where the previous request was timed out on the client side but nonetheless reached the server. In this case the client - should take steps to prevent further concurrent writes (e.g., - increase the timeouts, stop using more than one process to - perform the upload, etc.), and then should follow the steps - below for resuming the upload. + should take steps to prevent further concurrent writes. For + example, increase the timeouts and stop using more than one + process to perform the upload. Follow the steps below for + resuming the upload. - For resumable errors, the client should call ``QueryWriteStatus()`` and then continue writing from the - returned ``persisted_size``. This may be less than the amount - of data the client previously sent. Note also that it is - acceptable to send data starting at an offset earlier than the - returned ``persisted_size``; in this case, the service will - skip data at offsets that were already persisted (without + returned ``persisted_size``. This might be less than the + amount of data the client previously sent. Note also that it + is acceptable to send data starting at an offset earlier than + the returned ``persisted_size``; in this case, the service + skips data at offsets that were already persisted (without checking that it matches the previously written data), and write only the data starting from the persisted offset. Even - though the data isn't written, it may still incur a + though the data isn't written, it might still incur a performance cost over resuming at the correct write offset. This behavior can make client-side handling simpler in some cases. @@ -3018,27 +3170,26 @@ def write_object( message, unless the object is being finished with ``finish_write`` set to ``true``. - The service will not view the object as complete until the + The service does not view the object as complete until the client has sent a ``WriteObjectRequest`` with ``finish_write`` set to ``true``. Sending any requests on a stream after sending - a request with ``finish_write`` set to ``true`` will cause an - error. The client **should** check the response it receives to - determine how much data the service was able to commit and - whether the service views the object as complete. + a request with ``finish_write`` set to ``true`` causes an error. + The client must check the response it receives to determine how + much data the service is able to commit and whether the service + views the object as complete. - Attempting to resume an already finalized object will result in - an OK status, with a ``WriteObjectResponse`` containing the + Attempting to resume an already finalized object results in an + ``OK`` status, with a ``WriteObjectResponse`` containing the finalized object's metadata. - Alternatively, the BidiWriteObject operation may be used to + Alternatively, you can use the ``BidiWriteObject`` operation to write an object with controls over flushing and the ability to fetch the ability to determine the current persisted size. **IAM Permissions**: - Requires ``storage.objects.create`` `IAM - permission `__ - on the bucket. + Requires ``storage.objects.create`` IAM permission on the + bucket. .. code-block:: python @@ -3079,7 +3230,8 @@ def request_generator(): Args: requests (Iterator[google.cloud._storage_v2.types.WriteObjectRequest]): - The request object iterator. Request message for WriteObject. + The request object iterator. Request message for + [WriteObject][google.storage.v2.Storage.WriteObject]. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -3090,7 +3242,9 @@ def request_generator(): Returns: google.cloud._storage_v2.types.WriteObjectResponse: - Response message for WriteObject. + Response message for + [WriteObject][google.storage.v2.Storage.WriteObject]. + """ # Wrap the RPC method; this adds retry and timeout information, @@ -3121,20 +3275,20 @@ def bidi_write_object( ) -> Iterable[storage.BidiWriteObjectResponse]: r"""Stores a new object and metadata. - This is similar to the WriteObject call with the added support - for manual flushing of persisted state, and the ability to - determine current persisted size without closing the stream. - - The client may specify one or both of the ``state_lookup`` and - ``flush`` fields in each BidiWriteObjectRequest. If ``flush`` is - specified, the data written so far will be persisted to storage. - If ``state_lookup`` is specified, the service will respond with - a BidiWriteObjectResponse that contains the persisted size. If - both ``flush`` and ``state_lookup`` are specified, the flush - will always occur before a ``state_lookup``, so that both may be - set in the same request and the returned state will be the state - of the object post-flush. When the stream is closed, a - BidiWriteObjectResponse will always be sent to the client, + This is similar to the ``WriteObject`` call with the added + support for manual flushing of persisted state, and the ability + to determine current persisted size without closing the stream. + + The client might specify one or both of the ``state_lookup`` and + ``flush`` fields in each ``BidiWriteObjectRequest``. If + ``flush`` is specified, the data written so far is persisted to + storage. If ``state_lookup`` is specified, the service responds + with a ``BidiWriteObjectResponse`` that contains the persisted + size. If both ``flush`` and ``state_lookup`` are specified, the + flush always occurs before a ``state_lookup``, so that both + might be set in the same request and the returned state is the + state of the object post-flush. When the stream is closed, a + ``BidiWriteObjectResponse`` is always sent to the client, regardless of the value of ``state_lookup``. .. code-block:: python @@ -3177,7 +3331,8 @@ def request_generator(): Args: requests (Iterator[google.cloud._storage_v2.types.BidiWriteObjectRequest]): - The request object iterator. Request message for BidiWriteObject. + The request object iterator. Request message for + [BidiWriteObject][google.storage.v2.Storage.BidiWriteObject]. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -3222,11 +3377,10 @@ def list_objects( **IAM Permissions**: - The authenticated user requires ``storage.objects.list`` `IAM - permission `__ - to use this method. To return object ACLs, the authenticated - user must also have the ``storage.objects.getIamPolicy`` - permission. + The authenticated user requires ``storage.objects.list`` IAM + permission to use this method. To return object ACLs, the + authenticated user must also have the + ``storage.objects.getIamPolicy`` permission. .. code-block:: python @@ -3257,7 +3411,8 @@ def sample_list_objects(): Args: request (Union[google.cloud._storage_v2.types.ListObjectsRequest, dict]): - The request object. Request message for ListObjects. + The request object. Request message for + [ListObjects][google.storage.v2.Storage.ListObjects]. parent (str): Required. Name of the bucket in which to look for objects. @@ -3387,17 +3542,20 @@ def sample_rewrite_object(): Args: request (Union[google.cloud._storage_v2.types.RewriteObjectRequest, dict]): - The request object. Request message for RewriteObject. If the source object - is encrypted using a Customer-Supplied Encryption Key - the key information must be provided in the - copy_source_encryption_algorithm, - copy_source_encryption_key_bytes, and - copy_source_encryption_key_sha256_bytes fields. If the - destination object should be encrypted the keying + The request object. Request message for + [RewriteObject][google.storage.v2.Storage.RewriteObject]. + If the source object is encrypted using a + Customer-Supplied Encryption Key the key information + must be provided in the + ``copy_source_encryption_algorithm``, + ``copy_source_encryption_key_bytes``, and + ``copy_source_encryption_key_sha256_bytes`` fields. If + the destination object should be encrypted the keying information should be provided in the - encryption_algorithm, encryption_key_bytes, and - encryption_key_sha256_bytes fields of the - common_object_request_params.customer_encryption field. + ``encryption_algorithm``, ``encryption_key_bytes``, and + ``encryption_key_sha256_bytes`` fields of the + ``common_object_request_params.customer_encryption`` + field. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -3458,18 +3616,16 @@ def start_resumable_write( metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> storage.StartResumableWriteResponse: r"""Starts a resumable write operation. This method is part of the - `Resumable - upload `__ - feature. This allows you to upload large objects in multiple - chunks, which is more resilient to network interruptions than a - single upload. The validity duration of the write operation, and - the consequences of it becoming invalid, are service-dependent. + Resumable upload feature. This allows you to upload large + objects in multiple chunks, which is more resilient to network + interruptions than a single upload. The validity duration of the + write operation, and the consequences of it becoming invalid, + are service-dependent. **IAM Permissions**: - Requires ``storage.objects.create`` `IAM - permission `__ - on the bucket. + Requires ``storage.objects.create`` IAM permission on the + bucket. .. code-block:: python @@ -3498,7 +3654,8 @@ def sample_start_resumable_write(): Args: request (Union[google.cloud._storage_v2.types.StartResumableWriteRequest, dict]): - The request object. Request message StartResumableWrite. + The request object. Request message for + [StartResumableWrite][google.storage.v2.Storage.StartResumableWrite]. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -3509,7 +3666,9 @@ def sample_start_resumable_write(): Returns: google.cloud._storage_v2.types.StartResumableWriteResponse: - Response object for StartResumableWrite. + Response object for + [StartResumableWrite][google.storage.v2.Storage.StartResumableWrite]. + """ # Create or coerce a protobuf request object. # - Use the request object if provided (there's no risk of modifying the input as @@ -3559,11 +3718,10 @@ def query_write_status( metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> storage.QueryWriteStatusResponse: r"""Determines the ``persisted_size`` of an object that is being - written. This method is part of the `resumable - upload `__ - feature. The returned value is the size of the object that has - been persisted so far. The value can be used as the - ``write_offset`` for the next ``Write()`` call. + written. This method is part of the resumable upload feature. + The returned value is the size of the object that has been + persisted so far. The value can be used as the ``write_offset`` + for the next ``Write()`` call. If the object does not exist, meaning if it was deleted, or the first ``Write()`` has not yet reached the service, this method @@ -3605,7 +3763,8 @@ def sample_query_write_status(): Args: request (Union[google.cloud._storage_v2.types.QueryWriteStatusRequest, dict]): - The request object. Request object for ``QueryWriteStatus``. + The request object. Request object for + [QueryWriteStatus][google.storage.v2.Storage.QueryWriteStatus]. upload_id (str): Required. The name of the resume token for the object whose write status @@ -3624,7 +3783,9 @@ def sample_query_write_status(): Returns: google.cloud._storage_v2.types.QueryWriteStatusResponse: - Response object for QueryWriteStatus. + Response object for + [QueryWriteStatus][google.storage.v2.Storage.QueryWriteStatus]. + """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have @@ -3691,8 +3852,20 @@ def move_object( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> storage.Object: - r"""Moves the source object to the destination object in - the same bucket. + r"""Moves the source object to the destination object in the same + bucket. This operation moves a source object to a destination + object in the same bucket by renaming the object. The move + itself is an atomic transaction, ensuring all steps either + complete successfully or no changes are made. + + **IAM Permissions**: + + Requires the following IAM permissions to use this method: + + - ``storage.objects.move`` + - ``storage.objects.create`` + - ``storage.objects.delete`` (only required if overwriting an + existing object) .. code-block:: python @@ -3724,7 +3897,8 @@ def sample_move_object(): Args: request (Union[google.cloud._storage_v2.types.MoveObjectRequest, dict]): - The request object. Request message for MoveObject. + The request object. Request message for + [MoveObject][google.storage.v2.Storage.MoveObject]. bucket (str): Required. Name of the bucket in which the object resides. diff --git a/google/cloud/_storage_v2/services/storage/transports/grpc.py b/google/cloud/_storage_v2/services/storage/transports/grpc.py index 73f88c164..fae2d7949 100644 --- a/google/cloud/_storage_v2/services/storage/transports/grpc.py +++ b/google/cloud/_storage_v2/services/storage/transports/grpc.py @@ -120,8 +120,8 @@ class StorageGrpcTransport(StorageTransport): The Cloud Storage gRPC API allows applications to read and write data through the abstractions of buckets and objects. For a - description of these abstractions please see - https://cloud.google.com/storage/docs. + description of these abstractions please see `Cloud Storage + documentation `__. Resources are named as follows: @@ -130,17 +130,24 @@ class StorageGrpcTransport(StorageTransport): ``projects/my-string-id``. - Buckets are named using string names of the form: - ``projects/{project}/buckets/{bucket}`` For globally unique - buckets, ``_`` may be substituted for the project. + ``projects/{project}/buckets/{bucket}``. For globally unique + buckets, ``_`` might be substituted for the project. - Objects are uniquely identified by their name along with the name of the bucket they belong to, as separate strings in this API. For example: - ReadObjectRequest { bucket: 'projects/\_/buckets/my-bucket' - object: 'my-object' } Note that object names can contain ``/`` - characters, which are treated as any other character (no special - directory semantics). + :: + + ``` + ReadObjectRequest { + bucket: 'projects/_/buckets/my-bucket' + object: 'my-object' + } + ``` + + Note that object names can contain ``/`` characters, which are + treated as any other character (no special directory semantics). This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation @@ -355,7 +362,28 @@ def grpc_channel(self) -> grpc.Channel: def delete_bucket(self) -> Callable[[storage.DeleteBucketRequest], empty_pb2.Empty]: r"""Return a callable for the delete bucket method over gRPC. - Permanently deletes an empty bucket. + Permanently deletes an empty bucket. The request fails if there + are any live or noncurrent objects in the bucket, but the + request succeeds if the bucket only contains soft-deleted + objects or incomplete uploads, such as ongoing XML API multipart + uploads. Does not permanently delete soft-deleted objects. + + When this API is used to delete a bucket containing an object + that has a soft delete policy enabled, the object becomes soft + deleted, and the ``softDeleteTime`` and ``hardDeleteTime`` + properties are set on the object. + + Objects and multipart uploads that were in the bucket at the + time of deletion are also retained for the specified retention + duration. When a soft-deleted bucket reaches the end of its + retention duration, it is permanently deleted. The + ``hardDeleteTime`` of the bucket always equals or exceeds the + expiration time of the last soft-deleted object in the bucket. + + **IAM Permissions**: + + Requires ``storage.buckets.delete`` IAM permission on the + bucket. Returns: Callable[[~.DeleteBucketRequest], @@ -381,6 +409,16 @@ def get_bucket(self) -> Callable[[storage.GetBucketRequest], storage.Bucket]: Returns metadata for the specified bucket. + **IAM Permissions**: + + Requires ``storage.buckets.get`` IAM permission on the bucket. + Additionally, to return specific bucket metadata, the + authenticated user must have the following permissions: + + - To return the IAM policies: ``storage.buckets.getIamPolicy`` + - To return the bucket IP filtering rules: + ``storage.buckets.getIpFilter`` + Returns: Callable[[~.GetBucketRequest], ~.Bucket]: @@ -405,6 +443,17 @@ def create_bucket(self) -> Callable[[storage.CreateBucketRequest], storage.Bucke Creates a new bucket. + **IAM Permissions**: + + Requires ``storage.buckets.create`` IAM permission on the + bucket. Additionally, to enable specific bucket features, the + authenticated user must have the following permissions: + + - To enable object retention using the ``enableObjectRetention`` + query parameter: ``storage.buckets.enableObjectRetention`` + - To set the bucket IP filtering rules: + ``storage.buckets.setIpFilter`` + Returns: Callable[[~.CreateBucketRequest], ~.Bucket]: @@ -429,7 +478,18 @@ def list_buckets( ) -> Callable[[storage.ListBucketsRequest], storage.ListBucketsResponse]: r"""Return a callable for the list buckets method over gRPC. - Retrieves a list of buckets for a given project. + Retrieves a list of buckets for a given project, ordered + lexicographically by name. + + **IAM Permissions**: + + Requires ``storage.buckets.list`` IAM permission on the bucket. + Additionally, to enable specific bucket features, the + authenticated user must have the following permissions: + + - To list the IAM policies: ``storage.buckets.getIamPolicy`` + - To list the bucket IP filtering rules: + ``storage.buckets.getIpFilter`` Returns: Callable[[~.ListBucketsRequest], @@ -455,7 +515,25 @@ def lock_bucket_retention_policy( ) -> Callable[[storage.LockBucketRetentionPolicyRequest], storage.Bucket]: r"""Return a callable for the lock bucket retention policy method over gRPC. - Locks retention policy on a bucket. + Permanently locks the retention policy that is currently applied + to the specified bucket. + + Caution: Locking a bucket is an irreversible action. Once you + lock a bucket: + + - You cannot remove the retention policy from the bucket. + - You cannot decrease the retention period for the policy. + + Once locked, you must delete the entire bucket in order to + remove the bucket's retention policy. However, before you can + delete the bucket, you must delete all the objects in the + bucket, which is only possible if all the objects have reached + the retention period set by the retention policy. + + **IAM Permissions**: + + Requires ``storage.buckets.update`` IAM permission on the + bucket. Returns: Callable[[~.LockBucketRetentionPolicyRequest], @@ -483,12 +561,18 @@ def get_iam_policy( ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: r"""Return a callable for the get iam policy method over gRPC. - Gets the IAM policy for a specified bucket. The ``resource`` - field in the request should be ``projects/_/buckets/{bucket}`` - for a bucket, or + Gets the IAM policy for a specified bucket or managed folder. + The ``resource`` field in the request should be + ``projects/_/buckets/{bucket}`` for a bucket, or ``projects/_/buckets/{bucket}/managedFolders/{managedFolder}`` for a managed folder. + **IAM Permissions**: + + Requires ``storage.buckets.getIamPolicy`` on the bucket or + ``storage.managedFolders.getIamPolicy`` IAM permission on the + managed folder. + Returns: Callable[[~.GetIamPolicyRequest], ~.Policy]: @@ -513,9 +597,9 @@ def set_iam_policy( ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: r"""Return a callable for the set iam policy method over gRPC. - Updates an IAM policy for the specified bucket. The ``resource`` - field in the request should be ``projects/_/buckets/{bucket}`` - for a bucket, or + Updates an IAM policy for the specified bucket or managed + folder. The ``resource`` field in the request should be + ``projects/_/buckets/{bucket}`` for a bucket, or ``projects/_/buckets/{bucket}/managedFolders/{managedFolder}`` for a managed folder. @@ -577,8 +661,20 @@ def test_iam_permissions( def update_bucket(self) -> Callable[[storage.UpdateBucketRequest], storage.Bucket]: r"""Return a callable for the update bucket method over gRPC. - Updates a bucket. Equivalent to JSON API's - storage.buckets.patch method. + Updates a bucket. Changes to the bucket are readable immediately + after writing, but configuration changes might take time to + propagate. This method supports ``patch`` semantics. + + **IAM Permissions**: + + Requires ``storage.buckets.update`` IAM permission on the + bucket. Additionally, to enable specific bucket features, the + authenticated user must have the following permissions: + + - To set bucket IP filtering rules: + ``storage.buckets.setIpFilter`` + - To update public access prevention policies or access control + lists (ACLs): ``storage.buckets.setIamPolicy`` Returns: Callable[[~.UpdateBucketRequest], @@ -604,8 +700,19 @@ def compose_object( ) -> Callable[[storage.ComposeObjectRequest], storage.Object]: r"""Return a callable for the compose object method over gRPC. - Concatenates a list of existing objects into a new - object in the same bucket. + Concatenates a list of existing objects into a new object in the + same bucket. The existing source objects are unaffected by this + operation. + + **IAM Permissions**: + + Requires the ``storage.objects.create`` and + ``storage.objects.get`` IAM permissions to use this method. If + the new composite object overwrites an existing object, the + authenticated user must also have the ``storage.objects.delete`` + permission. If the request body includes the retention property, + the authenticated user must also have the + ``storage.objects.setRetention`` IAM permission. Returns: Callable[[~.ComposeObjectRequest], @@ -631,15 +738,13 @@ def delete_object(self) -> Callable[[storage.DeleteObjectRequest], empty_pb2.Emp Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation - parameter is used, or if `soft - delete `__ is - not enabled for the bucket. When this API is used to delete an - object from a bucket that has soft delete policy enabled, the - object becomes soft deleted, and the ``softDeleteTime`` and - ``hardDeleteTime`` properties are set on the object. This API - cannot be used to permanently delete soft-deleted objects. - Soft-deleted objects are permanently deleted according to their - ``hardDeleteTime``. + parameter is used, or if soft delete is not enabled for the + bucket. When this API is used to delete an object from a bucket + that has soft delete policy enabled, the object becomes soft + deleted, and the ``softDeleteTime`` and ``hardDeleteTime`` + properties are set on the object. This API cannot be used to + permanently delete soft-deleted objects. Soft-deleted objects + are permanently deleted according to their ``hardDeleteTime``. You can use the [``RestoreObject``][google.storage.v2.Storage.RestoreObject] API @@ -648,9 +753,8 @@ def delete_object(self) -> Callable[[storage.DeleteObjectRequest], empty_pb2.Emp **IAM Permissions**: - Requires ``storage.objects.delete`` `IAM - permission `__ - on the bucket. + Requires ``storage.objects.delete`` IAM permission on the + bucket. Returns: Callable[[~.DeleteObjectRequest], @@ -676,7 +780,44 @@ def restore_object( ) -> Callable[[storage.RestoreObjectRequest], storage.Object]: r"""Return a callable for the restore object method over gRPC. - Restores a soft-deleted object. + Restores a soft-deleted object. When a soft-deleted object is + restored, a new copy of that object is created in the same + bucket and inherits the same metadata as the soft-deleted + object. The inherited metadata is the metadata that existed when + the original object became soft deleted, with the following + exceptions: + + - The ``createTime`` of the new object is set to the time at + which the soft-deleted object was restored. + - The ``softDeleteTime`` and ``hardDeleteTime`` values are + cleared. + - A new generation is assigned and the metageneration is reset + to 1. + - If the soft-deleted object was in a bucket that had Autoclass + enabled, the new object is restored to Standard storage. + - The restored object inherits the bucket's default object ACL, + unless ``copySourceAcl`` is ``true``. + + If a live object using the same name already exists in the + bucket and becomes overwritten, the live object becomes a + noncurrent object if Object Versioning is enabled on the bucket. + If Object Versioning is not enabled, the live object becomes + soft deleted. + + **IAM Permissions**: + + Requires the following IAM permissions to use this method: + + - ``storage.objects.restore`` + - ``storage.objects.create`` + - ``storage.objects.delete`` (only required if overwriting an + existing object) + - ``storage.objects.getIamPolicy`` (only required if + ``projection`` is ``full`` and the relevant bucket has uniform + bucket-level access disabled) + - ``storage.objects.setIamPolicy`` (only required if + ``copySourceAcl`` is ``true`` and the relevant bucket has + uniform bucket-level access disabled) Returns: Callable[[~.RestoreObjectRequest], @@ -707,11 +848,11 @@ def cancel_resumable_write( Cancels an in-progress resumable upload. Any attempts to write to the resumable upload after - cancelling the upload will fail. + cancelling the upload fail. - The behavior for currently in progress write operations - is not guaranteed - they could either complete before - the cancellation or fail if the cancellation completes + The behavior for any in-progress write operations is not + guaranteed; they could either complete before the + cancellation or fail if the cancellation completes first. Returns: @@ -740,10 +881,9 @@ def get_object(self) -> Callable[[storage.GetObjectRequest], storage.Object]: **IAM Permissions**: - Requires ``storage.objects.get`` `IAM - permission `__ - on the bucket. To return object ACLs, the authenticated user - must also have the ``storage.objects.getIamPolicy`` permission. + Requires ``storage.objects.get`` IAM permission on the bucket. + To return object ACLs, the authenticated user must also have the + ``storage.objects.getIamPolicy`` permission. Returns: Callable[[~.GetObjectRequest], @@ -773,9 +913,7 @@ def read_object( **IAM Permissions**: - Requires ``storage.objects.get`` `IAM - permission `__ - on the bucket. + Requires ``storage.objects.get`` IAM permission on the bucket. Returns: Callable[[~.ReadObjectRequest], @@ -803,25 +941,17 @@ def bidi_read_object( Reads an object's data. - This is a bi-directional API with the added support for reading - multiple ranges within one stream both within and across - multiple messages. If the server encountered an error for any of - the inputs, the stream will be closed with the relevant error - code. Because the API allows for multiple outstanding requests, - when the stream is closed the error response will contain a - BidiReadObjectRangesError proto in the error extension - describing the error for each outstanding read_id. + This bi-directional API reads data from an object, allowing you + to request multiple data ranges within a single stream, even + across several messages. If an error occurs with any request, + the stream closes with a relevant error code. Since you can have + multiple outstanding requests, the error response includes a + ``BidiReadObjectRangesError`` field detailing the specific error + for each pending ``read_id``. **IAM Permissions**: - Requires ``storage.objects.get`` - - `IAM - permission `__ - on the bucket. - - This API is currently in preview and is not yet available for - general use. + Requires ``storage.objects.get`` IAM permission on the bucket. Returns: Callable[[~.BidiReadObjectRequest], @@ -845,8 +975,13 @@ def bidi_read_object( def update_object(self) -> Callable[[storage.UpdateObjectRequest], storage.Object]: r"""Return a callable for the update object method over gRPC. - Updates an object's metadata. - Equivalent to JSON API's storage.objects.patch. + Updates an object's metadata. Equivalent to JSON API's + ``storage.objects.patch`` method. + + **IAM Permissions**: + + Requires ``storage.objects.update`` IAM permission on the + bucket. Returns: Callable[[~.UpdateObjectRequest], @@ -892,32 +1027,32 @@ def write_object( - Check the result Status of the stream, to determine if writing can be resumed on this stream or must be restarted from scratch (by calling ``StartResumableWrite()``). The resumable - errors are DEADLINE_EXCEEDED, INTERNAL, and UNAVAILABLE. For - each case, the client should use binary exponential backoff - before retrying. Additionally, writes can be resumed after - RESOURCE_EXHAUSTED errors, but only after taking appropriate - measures, which may include reducing aggregate send rate - across clients and/or requesting a quota increase for your - project. + errors are ``DEADLINE_EXCEEDED``, ``INTERNAL``, and + ``UNAVAILABLE``. For each case, the client should use binary + exponential backoff before retrying. Additionally, writes can + be resumed after ``RESOURCE_EXHAUSTED`` errors, but only after + taking appropriate measures, which might include reducing + aggregate send rate across clients and/or requesting a quota + increase for your project. - If the call to ``WriteObject`` returns ``ABORTED``, that indicates concurrent attempts to update the resumable write, caused either by multiple racing clients or by a single client where the previous request was timed out on the client side but nonetheless reached the server. In this case the client - should take steps to prevent further concurrent writes (e.g., - increase the timeouts, stop using more than one process to - perform the upload, etc.), and then should follow the steps - below for resuming the upload. + should take steps to prevent further concurrent writes. For + example, increase the timeouts and stop using more than one + process to perform the upload. Follow the steps below for + resuming the upload. - For resumable errors, the client should call ``QueryWriteStatus()`` and then continue writing from the - returned ``persisted_size``. This may be less than the amount - of data the client previously sent. Note also that it is - acceptable to send data starting at an offset earlier than the - returned ``persisted_size``; in this case, the service will - skip data at offsets that were already persisted (without + returned ``persisted_size``. This might be less than the + amount of data the client previously sent. Note also that it + is acceptable to send data starting at an offset earlier than + the returned ``persisted_size``; in this case, the service + skips data at offsets that were already persisted (without checking that it matches the previously written data), and write only the data starting from the persisted offset. Even - though the data isn't written, it may still incur a + though the data isn't written, it might still incur a performance cost over resuming at the correct write offset. This behavior can make client-side handling simpler in some cases. @@ -925,27 +1060,26 @@ def write_object( message, unless the object is being finished with ``finish_write`` set to ``true``. - The service will not view the object as complete until the + The service does not view the object as complete until the client has sent a ``WriteObjectRequest`` with ``finish_write`` set to ``true``. Sending any requests on a stream after sending - a request with ``finish_write`` set to ``true`` will cause an - error. The client **should** check the response it receives to - determine how much data the service was able to commit and - whether the service views the object as complete. + a request with ``finish_write`` set to ``true`` causes an error. + The client must check the response it receives to determine how + much data the service is able to commit and whether the service + views the object as complete. - Attempting to resume an already finalized object will result in - an OK status, with a ``WriteObjectResponse`` containing the + Attempting to resume an already finalized object results in an + ``OK`` status, with a ``WriteObjectResponse`` containing the finalized object's metadata. - Alternatively, the BidiWriteObject operation may be used to + Alternatively, you can use the ``BidiWriteObject`` operation to write an object with controls over flushing and the ability to fetch the ability to determine the current persisted size. **IAM Permissions**: - Requires ``storage.objects.create`` `IAM - permission `__ - on the bucket. + Requires ``storage.objects.create`` IAM permission on the + bucket. Returns: Callable[[~.WriteObjectRequest], @@ -973,20 +1107,20 @@ def bidi_write_object( Stores a new object and metadata. - This is similar to the WriteObject call with the added support - for manual flushing of persisted state, and the ability to - determine current persisted size without closing the stream. - - The client may specify one or both of the ``state_lookup`` and - ``flush`` fields in each BidiWriteObjectRequest. If ``flush`` is - specified, the data written so far will be persisted to storage. - If ``state_lookup`` is specified, the service will respond with - a BidiWriteObjectResponse that contains the persisted size. If - both ``flush`` and ``state_lookup`` are specified, the flush - will always occur before a ``state_lookup``, so that both may be - set in the same request and the returned state will be the state - of the object post-flush. When the stream is closed, a - BidiWriteObjectResponse will always be sent to the client, + This is similar to the ``WriteObject`` call with the added + support for manual flushing of persisted state, and the ability + to determine current persisted size without closing the stream. + + The client might specify one or both of the ``state_lookup`` and + ``flush`` fields in each ``BidiWriteObjectRequest``. If + ``flush`` is specified, the data written so far is persisted to + storage. If ``state_lookup`` is specified, the service responds + with a ``BidiWriteObjectResponse`` that contains the persisted + size. If both ``flush`` and ``state_lookup`` are specified, the + flush always occurs before a ``state_lookup``, so that both + might be set in the same request and the returned state is the + state of the object post-flush. When the stream is closed, a + ``BidiWriteObjectResponse`` is always sent to the client, regardless of the value of ``state_lookup``. Returns: @@ -1017,11 +1151,10 @@ def list_objects( **IAM Permissions**: - The authenticated user requires ``storage.objects.list`` `IAM - permission `__ - to use this method. To return object ACLs, the authenticated - user must also have the ``storage.objects.getIamPolicy`` - permission. + The authenticated user requires ``storage.objects.list`` IAM + permission to use this method. To return object ACLs, the + authenticated user must also have the + ``storage.objects.getIamPolicy`` permission. Returns: Callable[[~.ListObjectsRequest], @@ -1077,18 +1210,16 @@ def start_resumable_write( r"""Return a callable for the start resumable write method over gRPC. Starts a resumable write operation. This method is part of the - `Resumable - upload `__ - feature. This allows you to upload large objects in multiple - chunks, which is more resilient to network interruptions than a - single upload. The validity duration of the write operation, and - the consequences of it becoming invalid, are service-dependent. + Resumable upload feature. This allows you to upload large + objects in multiple chunks, which is more resilient to network + interruptions than a single upload. The validity duration of the + write operation, and the consequences of it becoming invalid, + are service-dependent. **IAM Permissions**: - Requires ``storage.objects.create`` `IAM - permission `__ - on the bucket. + Requires ``storage.objects.create`` IAM permission on the + bucket. Returns: Callable[[~.StartResumableWriteRequest], @@ -1115,11 +1246,10 @@ def query_write_status( r"""Return a callable for the query write status method over gRPC. Determines the ``persisted_size`` of an object that is being - written. This method is part of the `resumable - upload `__ - feature. The returned value is the size of the object that has - been persisted so far. The value can be used as the - ``write_offset`` for the next ``Write()`` call. + written. This method is part of the resumable upload feature. + The returned value is the size of the object that has been + persisted so far. The value can be used as the ``write_offset`` + for the next ``Write()`` call. If the object does not exist, meaning if it was deleted, or the first ``Write()`` has not yet reached the service, this method @@ -1155,8 +1285,20 @@ def query_write_status( def move_object(self) -> Callable[[storage.MoveObjectRequest], storage.Object]: r"""Return a callable for the move object method over gRPC. - Moves the source object to the destination object in - the same bucket. + Moves the source object to the destination object in the same + bucket. This operation moves a source object to a destination + object in the same bucket by renaming the object. The move + itself is an atomic transaction, ensuring all steps either + complete successfully or no changes are made. + + **IAM Permissions**: + + Requires the following IAM permissions to use this method: + + - ``storage.objects.move`` + - ``storage.objects.create`` + - ``storage.objects.delete`` (only required if overwriting an + existing object) Returns: Callable[[~.MoveObjectRequest], diff --git a/google/cloud/_storage_v2/services/storage/transports/grpc_asyncio.py b/google/cloud/_storage_v2/services/storage/transports/grpc_asyncio.py index f0c9e5721..be54eb3b0 100644 --- a/google/cloud/_storage_v2/services/storage/transports/grpc_asyncio.py +++ b/google/cloud/_storage_v2/services/storage/transports/grpc_asyncio.py @@ -126,8 +126,8 @@ class StorageGrpcAsyncIOTransport(StorageTransport): The Cloud Storage gRPC API allows applications to read and write data through the abstractions of buckets and objects. For a - description of these abstractions please see - https://cloud.google.com/storage/docs. + description of these abstractions please see `Cloud Storage + documentation `__. Resources are named as follows: @@ -136,17 +136,24 @@ class StorageGrpcAsyncIOTransport(StorageTransport): ``projects/my-string-id``. - Buckets are named using string names of the form: - ``projects/{project}/buckets/{bucket}`` For globally unique - buckets, ``_`` may be substituted for the project. + ``projects/{project}/buckets/{bucket}``. For globally unique + buckets, ``_`` might be substituted for the project. - Objects are uniquely identified by their name along with the name of the bucket they belong to, as separate strings in this API. For example: - ReadObjectRequest { bucket: 'projects/\_/buckets/my-bucket' - object: 'my-object' } Note that object names can contain ``/`` - characters, which are treated as any other character (no special - directory semantics). + :: + + ``` + ReadObjectRequest { + bucket: 'projects/_/buckets/my-bucket' + object: 'my-object' + } + ``` + + Note that object names can contain ``/`` characters, which are + treated as any other character (no special directory semantics). This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation @@ -365,7 +372,28 @@ def delete_bucket( ) -> Callable[[storage.DeleteBucketRequest], Awaitable[empty_pb2.Empty]]: r"""Return a callable for the delete bucket method over gRPC. - Permanently deletes an empty bucket. + Permanently deletes an empty bucket. The request fails if there + are any live or noncurrent objects in the bucket, but the + request succeeds if the bucket only contains soft-deleted + objects or incomplete uploads, such as ongoing XML API multipart + uploads. Does not permanently delete soft-deleted objects. + + When this API is used to delete a bucket containing an object + that has a soft delete policy enabled, the object becomes soft + deleted, and the ``softDeleteTime`` and ``hardDeleteTime`` + properties are set on the object. + + Objects and multipart uploads that were in the bucket at the + time of deletion are also retained for the specified retention + duration. When a soft-deleted bucket reaches the end of its + retention duration, it is permanently deleted. The + ``hardDeleteTime`` of the bucket always equals or exceeds the + expiration time of the last soft-deleted object in the bucket. + + **IAM Permissions**: + + Requires ``storage.buckets.delete`` IAM permission on the + bucket. Returns: Callable[[~.DeleteBucketRequest], @@ -393,6 +421,16 @@ def get_bucket( Returns metadata for the specified bucket. + **IAM Permissions**: + + Requires ``storage.buckets.get`` IAM permission on the bucket. + Additionally, to return specific bucket metadata, the + authenticated user must have the following permissions: + + - To return the IAM policies: ``storage.buckets.getIamPolicy`` + - To return the bucket IP filtering rules: + ``storage.buckets.getIpFilter`` + Returns: Callable[[~.GetBucketRequest], Awaitable[~.Bucket]]: @@ -419,6 +457,17 @@ def create_bucket( Creates a new bucket. + **IAM Permissions**: + + Requires ``storage.buckets.create`` IAM permission on the + bucket. Additionally, to enable specific bucket features, the + authenticated user must have the following permissions: + + - To enable object retention using the ``enableObjectRetention`` + query parameter: ``storage.buckets.enableObjectRetention`` + - To set the bucket IP filtering rules: + ``storage.buckets.setIpFilter`` + Returns: Callable[[~.CreateBucketRequest], Awaitable[~.Bucket]]: @@ -443,7 +492,18 @@ def list_buckets( ) -> Callable[[storage.ListBucketsRequest], Awaitable[storage.ListBucketsResponse]]: r"""Return a callable for the list buckets method over gRPC. - Retrieves a list of buckets for a given project. + Retrieves a list of buckets for a given project, ordered + lexicographically by name. + + **IAM Permissions**: + + Requires ``storage.buckets.list`` IAM permission on the bucket. + Additionally, to enable specific bucket features, the + authenticated user must have the following permissions: + + - To list the IAM policies: ``storage.buckets.getIamPolicy`` + - To list the bucket IP filtering rules: + ``storage.buckets.getIpFilter`` Returns: Callable[[~.ListBucketsRequest], @@ -471,7 +531,25 @@ def lock_bucket_retention_policy( ]: r"""Return a callable for the lock bucket retention policy method over gRPC. - Locks retention policy on a bucket. + Permanently locks the retention policy that is currently applied + to the specified bucket. + + Caution: Locking a bucket is an irreversible action. Once you + lock a bucket: + + - You cannot remove the retention policy from the bucket. + - You cannot decrease the retention period for the policy. + + Once locked, you must delete the entire bucket in order to + remove the bucket's retention policy. However, before you can + delete the bucket, you must delete all the objects in the + bucket, which is only possible if all the objects have reached + the retention period set by the retention policy. + + **IAM Permissions**: + + Requires ``storage.buckets.update`` IAM permission on the + bucket. Returns: Callable[[~.LockBucketRetentionPolicyRequest], @@ -499,12 +577,18 @@ def get_iam_policy( ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], Awaitable[policy_pb2.Policy]]: r"""Return a callable for the get iam policy method over gRPC. - Gets the IAM policy for a specified bucket. The ``resource`` - field in the request should be ``projects/_/buckets/{bucket}`` - for a bucket, or + Gets the IAM policy for a specified bucket or managed folder. + The ``resource`` field in the request should be + ``projects/_/buckets/{bucket}`` for a bucket, or ``projects/_/buckets/{bucket}/managedFolders/{managedFolder}`` for a managed folder. + **IAM Permissions**: + + Requires ``storage.buckets.getIamPolicy`` on the bucket or + ``storage.managedFolders.getIamPolicy`` IAM permission on the + managed folder. + Returns: Callable[[~.GetIamPolicyRequest], Awaitable[~.Policy]]: @@ -529,9 +613,9 @@ def set_iam_policy( ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], Awaitable[policy_pb2.Policy]]: r"""Return a callable for the set iam policy method over gRPC. - Updates an IAM policy for the specified bucket. The ``resource`` - field in the request should be ``projects/_/buckets/{bucket}`` - for a bucket, or + Updates an IAM policy for the specified bucket or managed + folder. The ``resource`` field in the request should be + ``projects/_/buckets/{bucket}`` for a bucket, or ``projects/_/buckets/{bucket}/managedFolders/{managedFolder}`` for a managed folder. @@ -595,8 +679,20 @@ def update_bucket( ) -> Callable[[storage.UpdateBucketRequest], Awaitable[storage.Bucket]]: r"""Return a callable for the update bucket method over gRPC. - Updates a bucket. Equivalent to JSON API's - storage.buckets.patch method. + Updates a bucket. Changes to the bucket are readable immediately + after writing, but configuration changes might take time to + propagate. This method supports ``patch`` semantics. + + **IAM Permissions**: + + Requires ``storage.buckets.update`` IAM permission on the + bucket. Additionally, to enable specific bucket features, the + authenticated user must have the following permissions: + + - To set bucket IP filtering rules: + ``storage.buckets.setIpFilter`` + - To update public access prevention policies or access control + lists (ACLs): ``storage.buckets.setIamPolicy`` Returns: Callable[[~.UpdateBucketRequest], @@ -622,8 +718,19 @@ def compose_object( ) -> Callable[[storage.ComposeObjectRequest], Awaitable[storage.Object]]: r"""Return a callable for the compose object method over gRPC. - Concatenates a list of existing objects into a new - object in the same bucket. + Concatenates a list of existing objects into a new object in the + same bucket. The existing source objects are unaffected by this + operation. + + **IAM Permissions**: + + Requires the ``storage.objects.create`` and + ``storage.objects.get`` IAM permissions to use this method. If + the new composite object overwrites an existing object, the + authenticated user must also have the ``storage.objects.delete`` + permission. If the request body includes the retention property, + the authenticated user must also have the + ``storage.objects.setRetention`` IAM permission. Returns: Callable[[~.ComposeObjectRequest], @@ -651,15 +758,13 @@ def delete_object( Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation - parameter is used, or if `soft - delete `__ is - not enabled for the bucket. When this API is used to delete an - object from a bucket that has soft delete policy enabled, the - object becomes soft deleted, and the ``softDeleteTime`` and - ``hardDeleteTime`` properties are set on the object. This API - cannot be used to permanently delete soft-deleted objects. - Soft-deleted objects are permanently deleted according to their - ``hardDeleteTime``. + parameter is used, or if soft delete is not enabled for the + bucket. When this API is used to delete an object from a bucket + that has soft delete policy enabled, the object becomes soft + deleted, and the ``softDeleteTime`` and ``hardDeleteTime`` + properties are set on the object. This API cannot be used to + permanently delete soft-deleted objects. Soft-deleted objects + are permanently deleted according to their ``hardDeleteTime``. You can use the [``RestoreObject``][google.storage.v2.Storage.RestoreObject] API @@ -668,9 +773,8 @@ def delete_object( **IAM Permissions**: - Requires ``storage.objects.delete`` `IAM - permission `__ - on the bucket. + Requires ``storage.objects.delete`` IAM permission on the + bucket. Returns: Callable[[~.DeleteObjectRequest], @@ -696,7 +800,44 @@ def restore_object( ) -> Callable[[storage.RestoreObjectRequest], Awaitable[storage.Object]]: r"""Return a callable for the restore object method over gRPC. - Restores a soft-deleted object. + Restores a soft-deleted object. When a soft-deleted object is + restored, a new copy of that object is created in the same + bucket and inherits the same metadata as the soft-deleted + object. The inherited metadata is the metadata that existed when + the original object became soft deleted, with the following + exceptions: + + - The ``createTime`` of the new object is set to the time at + which the soft-deleted object was restored. + - The ``softDeleteTime`` and ``hardDeleteTime`` values are + cleared. + - A new generation is assigned and the metageneration is reset + to 1. + - If the soft-deleted object was in a bucket that had Autoclass + enabled, the new object is restored to Standard storage. + - The restored object inherits the bucket's default object ACL, + unless ``copySourceAcl`` is ``true``. + + If a live object using the same name already exists in the + bucket and becomes overwritten, the live object becomes a + noncurrent object if Object Versioning is enabled on the bucket. + If Object Versioning is not enabled, the live object becomes + soft deleted. + + **IAM Permissions**: + + Requires the following IAM permissions to use this method: + + - ``storage.objects.restore`` + - ``storage.objects.create`` + - ``storage.objects.delete`` (only required if overwriting an + existing object) + - ``storage.objects.getIamPolicy`` (only required if + ``projection`` is ``full`` and the relevant bucket has uniform + bucket-level access disabled) + - ``storage.objects.setIamPolicy`` (only required if + ``copySourceAcl`` is ``true`` and the relevant bucket has + uniform bucket-level access disabled) Returns: Callable[[~.RestoreObjectRequest], @@ -728,11 +869,11 @@ def cancel_resumable_write( Cancels an in-progress resumable upload. Any attempts to write to the resumable upload after - cancelling the upload will fail. + cancelling the upload fail. - The behavior for currently in progress write operations - is not guaranteed - they could either complete before - the cancellation or fail if the cancellation completes + The behavior for any in-progress write operations is not + guaranteed; they could either complete before the + cancellation or fail if the cancellation completes first. Returns: @@ -763,10 +904,9 @@ def get_object( **IAM Permissions**: - Requires ``storage.objects.get`` `IAM - permission `__ - on the bucket. To return object ACLs, the authenticated user - must also have the ``storage.objects.getIamPolicy`` permission. + Requires ``storage.objects.get`` IAM permission on the bucket. + To return object ACLs, the authenticated user must also have the + ``storage.objects.getIamPolicy`` permission. Returns: Callable[[~.GetObjectRequest], @@ -796,9 +936,7 @@ def read_object( **IAM Permissions**: - Requires ``storage.objects.get`` `IAM - permission `__ - on the bucket. + Requires ``storage.objects.get`` IAM permission on the bucket. Returns: Callable[[~.ReadObjectRequest], @@ -828,25 +966,17 @@ def bidi_read_object( Reads an object's data. - This is a bi-directional API with the added support for reading - multiple ranges within one stream both within and across - multiple messages. If the server encountered an error for any of - the inputs, the stream will be closed with the relevant error - code. Because the API allows for multiple outstanding requests, - when the stream is closed the error response will contain a - BidiReadObjectRangesError proto in the error extension - describing the error for each outstanding read_id. + This bi-directional API reads data from an object, allowing you + to request multiple data ranges within a single stream, even + across several messages. If an error occurs with any request, + the stream closes with a relevant error code. Since you can have + multiple outstanding requests, the error response includes a + ``BidiReadObjectRangesError`` field detailing the specific error + for each pending ``read_id``. **IAM Permissions**: - Requires ``storage.objects.get`` - - `IAM - permission `__ - on the bucket. - - This API is currently in preview and is not yet available for - general use. + Requires ``storage.objects.get`` IAM permission on the bucket. Returns: Callable[[~.BidiReadObjectRequest], @@ -872,8 +1002,13 @@ def update_object( ) -> Callable[[storage.UpdateObjectRequest], Awaitable[storage.Object]]: r"""Return a callable for the update object method over gRPC. - Updates an object's metadata. - Equivalent to JSON API's storage.objects.patch. + Updates an object's metadata. Equivalent to JSON API's + ``storage.objects.patch`` method. + + **IAM Permissions**: + + Requires ``storage.objects.update`` IAM permission on the + bucket. Returns: Callable[[~.UpdateObjectRequest], @@ -919,32 +1054,32 @@ def write_object( - Check the result Status of the stream, to determine if writing can be resumed on this stream or must be restarted from scratch (by calling ``StartResumableWrite()``). The resumable - errors are DEADLINE_EXCEEDED, INTERNAL, and UNAVAILABLE. For - each case, the client should use binary exponential backoff - before retrying. Additionally, writes can be resumed after - RESOURCE_EXHAUSTED errors, but only after taking appropriate - measures, which may include reducing aggregate send rate - across clients and/or requesting a quota increase for your - project. + errors are ``DEADLINE_EXCEEDED``, ``INTERNAL``, and + ``UNAVAILABLE``. For each case, the client should use binary + exponential backoff before retrying. Additionally, writes can + be resumed after ``RESOURCE_EXHAUSTED`` errors, but only after + taking appropriate measures, which might include reducing + aggregate send rate across clients and/or requesting a quota + increase for your project. - If the call to ``WriteObject`` returns ``ABORTED``, that indicates concurrent attempts to update the resumable write, caused either by multiple racing clients or by a single client where the previous request was timed out on the client side but nonetheless reached the server. In this case the client - should take steps to prevent further concurrent writes (e.g., - increase the timeouts, stop using more than one process to - perform the upload, etc.), and then should follow the steps - below for resuming the upload. + should take steps to prevent further concurrent writes. For + example, increase the timeouts and stop using more than one + process to perform the upload. Follow the steps below for + resuming the upload. - For resumable errors, the client should call ``QueryWriteStatus()`` and then continue writing from the - returned ``persisted_size``. This may be less than the amount - of data the client previously sent. Note also that it is - acceptable to send data starting at an offset earlier than the - returned ``persisted_size``; in this case, the service will - skip data at offsets that were already persisted (without + returned ``persisted_size``. This might be less than the + amount of data the client previously sent. Note also that it + is acceptable to send data starting at an offset earlier than + the returned ``persisted_size``; in this case, the service + skips data at offsets that were already persisted (without checking that it matches the previously written data), and write only the data starting from the persisted offset. Even - though the data isn't written, it may still incur a + though the data isn't written, it might still incur a performance cost over resuming at the correct write offset. This behavior can make client-side handling simpler in some cases. @@ -952,27 +1087,26 @@ def write_object( message, unless the object is being finished with ``finish_write`` set to ``true``. - The service will not view the object as complete until the + The service does not view the object as complete until the client has sent a ``WriteObjectRequest`` with ``finish_write`` set to ``true``. Sending any requests on a stream after sending - a request with ``finish_write`` set to ``true`` will cause an - error. The client **should** check the response it receives to - determine how much data the service was able to commit and - whether the service views the object as complete. + a request with ``finish_write`` set to ``true`` causes an error. + The client must check the response it receives to determine how + much data the service is able to commit and whether the service + views the object as complete. - Attempting to resume an already finalized object will result in - an OK status, with a ``WriteObjectResponse`` containing the + Attempting to resume an already finalized object results in an + ``OK`` status, with a ``WriteObjectResponse`` containing the finalized object's metadata. - Alternatively, the BidiWriteObject operation may be used to + Alternatively, you can use the ``BidiWriteObject`` operation to write an object with controls over flushing and the ability to fetch the ability to determine the current persisted size. **IAM Permissions**: - Requires ``storage.objects.create`` `IAM - permission `__ - on the bucket. + Requires ``storage.objects.create`` IAM permission on the + bucket. Returns: Callable[[~.WriteObjectRequest], @@ -1002,20 +1136,20 @@ def bidi_write_object( Stores a new object and metadata. - This is similar to the WriteObject call with the added support - for manual flushing of persisted state, and the ability to - determine current persisted size without closing the stream. - - The client may specify one or both of the ``state_lookup`` and - ``flush`` fields in each BidiWriteObjectRequest. If ``flush`` is - specified, the data written so far will be persisted to storage. - If ``state_lookup`` is specified, the service will respond with - a BidiWriteObjectResponse that contains the persisted size. If - both ``flush`` and ``state_lookup`` are specified, the flush - will always occur before a ``state_lookup``, so that both may be - set in the same request and the returned state will be the state - of the object post-flush. When the stream is closed, a - BidiWriteObjectResponse will always be sent to the client, + This is similar to the ``WriteObject`` call with the added + support for manual flushing of persisted state, and the ability + to determine current persisted size without closing the stream. + + The client might specify one or both of the ``state_lookup`` and + ``flush`` fields in each ``BidiWriteObjectRequest``. If + ``flush`` is specified, the data written so far is persisted to + storage. If ``state_lookup`` is specified, the service responds + with a ``BidiWriteObjectResponse`` that contains the persisted + size. If both ``flush`` and ``state_lookup`` are specified, the + flush always occurs before a ``state_lookup``, so that both + might be set in the same request and the returned state is the + state of the object post-flush. When the stream is closed, a + ``BidiWriteObjectResponse`` is always sent to the client, regardless of the value of ``state_lookup``. Returns: @@ -1046,11 +1180,10 @@ def list_objects( **IAM Permissions**: - The authenticated user requires ``storage.objects.list`` `IAM - permission `__ - to use this method. To return object ACLs, the authenticated - user must also have the ``storage.objects.getIamPolicy`` - permission. + The authenticated user requires ``storage.objects.list`` IAM + permission to use this method. To return object ACLs, the + authenticated user must also have the + ``storage.objects.getIamPolicy`` permission. Returns: Callable[[~.ListObjectsRequest], @@ -1107,18 +1240,16 @@ def start_resumable_write( r"""Return a callable for the start resumable write method over gRPC. Starts a resumable write operation. This method is part of the - `Resumable - upload `__ - feature. This allows you to upload large objects in multiple - chunks, which is more resilient to network interruptions than a - single upload. The validity duration of the write operation, and - the consequences of it becoming invalid, are service-dependent. + Resumable upload feature. This allows you to upload large + objects in multiple chunks, which is more resilient to network + interruptions than a single upload. The validity duration of the + write operation, and the consequences of it becoming invalid, + are service-dependent. **IAM Permissions**: - Requires ``storage.objects.create`` `IAM - permission `__ - on the bucket. + Requires ``storage.objects.create`` IAM permission on the + bucket. Returns: Callable[[~.StartResumableWriteRequest], @@ -1147,11 +1278,10 @@ def query_write_status( r"""Return a callable for the query write status method over gRPC. Determines the ``persisted_size`` of an object that is being - written. This method is part of the `resumable - upload `__ - feature. The returned value is the size of the object that has - been persisted so far. The value can be used as the - ``write_offset`` for the next ``Write()`` call. + written. This method is part of the resumable upload feature. + The returned value is the size of the object that has been + persisted so far. The value can be used as the ``write_offset`` + for the next ``Write()`` call. If the object does not exist, meaning if it was deleted, or the first ``Write()`` has not yet reached the service, this method @@ -1189,8 +1319,20 @@ def move_object( ) -> Callable[[storage.MoveObjectRequest], Awaitable[storage.Object]]: r"""Return a callable for the move object method over gRPC. - Moves the source object to the destination object in - the same bucket. + Moves the source object to the destination object in the same + bucket. This operation moves a source object to a destination + object in the same bucket by renaming the object. The move + itself is an atomic transaction, ensuring all steps either + complete successfully or no changes are made. + + **IAM Permissions**: + + Requires the following IAM permissions to use this method: + + - ``storage.objects.move`` + - ``storage.objects.create`` + - ``storage.objects.delete`` (only required if overwriting an + existing object) Returns: Callable[[~.MoveObjectRequest], diff --git a/google/cloud/_storage_v2/types/storage.py b/google/cloud/_storage_v2/types/storage.py index 6e4e01855..8602610be 100644 --- a/google/cloud/_storage_v2/types/storage.py +++ b/google/cloud/_storage_v2/types/storage.py @@ -90,7 +90,9 @@ class DeleteBucketRequest(proto.Message): - r"""Request message for DeleteBucket. + r"""Request message for + [DeleteBucket][google.storage.v2.Storage.DeleteBucket]. + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields @@ -126,7 +128,9 @@ class DeleteBucketRequest(proto.Message): class GetBucketRequest(proto.Message): - r"""Request message for GetBucket. + r"""Request message for + [GetBucket][google.storage.v2.Storage.GetBucket]. + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields @@ -134,21 +138,20 @@ class GetBucketRequest(proto.Message): name (str): Required. Name of a bucket. if_metageneration_match (int): - If set, and if the bucket's current - metageneration does not match the specified - value, the request will return an error. + If set, only gets the bucket metadata if its + metageneration matches this value. This field is a member of `oneof`_ ``_if_metageneration_match``. if_metageneration_not_match (int): If set, and if the bucket's current metageneration matches the specified value, the - request will return an error. + request returns an error. This field is a member of `oneof`_ ``_if_metageneration_not_match``. read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. A "\*" field may be - used to indicate all fields. If no mask is specified, will - default to all fields. + Mask specifying which fields to read. A ``*`` field might be + used to indicate all fields. If no mask is specified, it + defaults to all fields. This field is a member of `oneof`_ ``_read_mask``. """ @@ -176,40 +179,39 @@ class GetBucketRequest(proto.Message): class CreateBucketRequest(proto.Message): - r"""Request message for CreateBucket. + r"""Request message for + [CreateBucket][google.storage.v2.Storage.CreateBucket]. Attributes: parent (str): - Required. The project to which this bucket will belong. This + Required. The project to which this bucket belongs. This field must either be empty or ``projects/_``. The project ID that owns this bucket should be specified in the ``bucket.project`` field. bucket (google.cloud._storage_v2.types.Bucket): Optional. Properties of the new bucket being inserted. The name of the bucket is specified in the ``bucket_id`` field. - Populating ``bucket.name`` field will result in an error. - The project of the bucket must be specified in the + Populating ``bucket.name`` field results in an error. The + project of the bucket must be specified in the ``bucket.project`` field. This field must be in ``projects/{projectIdentifier}`` format, {projectIdentifier} can be the project ID or project number. The ``parent`` field must be either empty or ``projects/_``. bucket_id (str): - Required. The ID to use for this bucket, which will become - the final component of the bucket's resource name. For - example, the value ``foo`` might result in a bucket with the - name ``projects/123456/buckets/foo``. + Required. The ID to use for this bucket, which becomes the + final component of the bucket's resource name. For example, + the value ``foo`` might result in a bucket with the name + ``projects/123456/buckets/foo``. predefined_acl (str): - Optional. Apply a predefined set of access - controls to this bucket. Valid values are - "authenticatedRead", "private", - "projectPrivate", "publicRead", or - "publicReadWrite". + Optional. Apply a predefined set of access controls to this + bucket. Valid values are ``authenticatedRead``, ``private``, + ``projectPrivate``, ``publicRead``, or ``publicReadWrite``. predefined_default_object_acl (str): - Optional. Apply a predefined set of default - object access controls to this bucket. Valid - values are "authenticatedRead", - "bucketOwnerFullControl", "bucketOwnerRead", - "private", "projectPrivate", or "publicRead". + Optional. Apply a predefined set of default object access + controls to this bucket. Valid values are + ``authenticatedRead``, ``bucketOwnerFullControl``, + ``bucketOwnerRead``, ``private``, ``projectPrivate``, or + ``publicRead``. enable_object_retention (bool): Optional. If true, enable object retention on the bucket. @@ -243,7 +245,9 @@ class CreateBucketRequest(proto.Message): class ListBucketsRequest(proto.Message): - r"""Request message for ListBuckets. + r"""Request message for + [ListBuckets][google.storage.v2.Storage.ListBuckets]. + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields @@ -253,10 +257,10 @@ class ListBucketsRequest(proto.Message): listing. page_size (int): Optional. Maximum number of buckets to return in a single - response. The service will use this parameter or 1,000 - items, whichever is smaller. If "acl" is present in the - read_mask, the service will use this parameter of 200 items, - whichever is smaller. + response. The service uses this parameter or ``1,000`` + items, whichever is smaller. If ``acl`` is present in the + ``read_mask``, the service uses this parameter of ``200`` + items, whichever is smaller. page_token (str): Optional. A previously-returned page token representing part of the larger set of results @@ -266,12 +270,15 @@ class ListBucketsRequest(proto.Message): names begin with this prefix. read_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields to read from each result. If no - mask is specified, will default to all fields except - items.owner, items.acl, and items.default_object_acl. - - - may be used to mean "all fields". + mask is specified, it defaults to all fields except + ``items. owner``, ``items.acl``, and + ``items.default_object_acl``. ``*`` might be used to mean + "all fields". This field is a member of `oneof`_ ``_read_mask``. + return_partial_success (bool): + Optional. Allows listing of buckets, even if + there are buckets that are unreachable. """ parent: str = proto.Field( @@ -296,10 +303,15 @@ class ListBucketsRequest(proto.Message): optional=True, message=field_mask_pb2.FieldMask, ) + return_partial_success: bool = proto.Field( + proto.BOOL, + number=9, + ) class ListBucketsResponse(proto.Message): - r"""The result of a call to Buckets.ListBuckets + r"""Response message for + [ListBuckets][google.storage.v2.Storage.ListBuckets]. Attributes: buckets (MutableSequence[google.cloud._storage_v2.types.Bucket]): @@ -309,6 +321,13 @@ class ListBucketsResponse(proto.Message): large result sets. Provide this value in a subsequent request to return the next page of results. + unreachable (MutableSequence[str]): + Unreachable resources. This field can only be present if the + caller specified return_partial_success to be true in the + request to receive indications of temporarily missing + resources. unreachable might be: unreachable = [ + "projects/*/buckets/bucket1", "projects/*/buckets/bucket2", + "projects/\_/buckets/bucket3", ] """ @property @@ -324,10 +343,15 @@ def raw_page(self): proto.STRING, number=2, ) + unreachable: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=3, + ) class LockBucketRetentionPolicyRequest(proto.Message): - r"""Request message for LockBucketRetentionPolicyRequest. + r"""Request message for + [LockBucketRetentionPolicy][google.storage.v2.Storage.LockBucketRetentionPolicy]. Attributes: bucket (str): @@ -349,43 +373,43 @@ class LockBucketRetentionPolicyRequest(proto.Message): class UpdateBucketRequest(proto.Message): - r"""Request for UpdateBucket method. + r"""Request for [UpdateBucket][google.storage.v2.Storage.UpdateBucket] + method. + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields Attributes: bucket (google.cloud._storage_v2.types.Bucket): Required. The bucket to update. The bucket's ``name`` field - will be used to identify the bucket. + is used to identify the bucket. if_metageneration_match (int): - If set, will only modify the bucket if its - metageneration matches this value. + If set, the request modifies the bucket if + its metageneration matches this value. This field is a member of `oneof`_ ``_if_metageneration_match``. if_metageneration_not_match (int): - If set, will only modify the bucket if its - metageneration does not match this value. + If set, the request modifies the bucket if + its metageneration doesn't match this value. This field is a member of `oneof`_ ``_if_metageneration_not_match``. predefined_acl (str): - Optional. Apply a predefined set of access - controls to this bucket. Valid values are - "authenticatedRead", "private", - "projectPrivate", "publicRead", or - "publicReadWrite". + Optional. Apply a predefined set of access controls to this + bucket. Valid values are ``authenticatedRead``, ``private``, + ``projectPrivate``, ``publicRead``, or ``publicReadWrite``. predefined_default_object_acl (str): - Optional. Apply a predefined set of default - object access controls to this bucket. Valid - values are "authenticatedRead", - "bucketOwnerFullControl", "bucketOwnerRead", - "private", "projectPrivate", or "publicRead". + Optional. Apply a predefined set of default object access + controls to this bucket. Valid values are + ``authenticatedRead``, ``bucketOwnerFullControl``, + ``bucketOwnerRead``, ``private``, ``projectPrivate``, or + ``publicRead``. update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. List of fields to be updated. To specify ALL fields, equivalent to the JSON API's "update" function, specify a single field with the value ``*``. Note: not recommended. If a new field is introduced at a later - time, an older client updating with the ``*`` may + time, an older client updating with the ``*`` might accidentally reset the new field's value. Not specifying any fields is an error. @@ -422,7 +446,9 @@ class UpdateBucketRequest(proto.Message): class ComposeObjectRequest(proto.Message): - r"""Request message for ComposeObject. + r"""Request message for + [ComposeObject][google.storage.v2.Storage.ComposeObject]. + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields @@ -430,14 +456,13 @@ class ComposeObjectRequest(proto.Message): destination (google.cloud._storage_v2.types.Object): Required. Properties of the resulting object. source_objects (MutableSequence[google.cloud._storage_v2.types.ComposeObjectRequest.SourceObject]): - Optional. The list of source objects that - will be concatenated into a single object. + Optional. The list of source objects that is + concatenated into a single object. destination_predefined_acl (str): - Optional. Apply a predefined set of access - controls to the destination object. Valid values - are "authenticatedRead", - "bucketOwnerFullControl", "bucketOwnerRead", - "private", "projectPrivate", or "publicRead". + Optional. Apply a predefined set of access controls to the + destination object. Valid values are ``authenticatedRead``, + ``bucketOwnerFullControl``, ``bucketOwnerRead``, + ``private``, ``projectPrivate``, or ``publicRead``. if_generation_match (int): Makes the operation conditional on whether the object's current generation matches the @@ -455,15 +480,15 @@ class ComposeObjectRequest(proto.Message): kms_key (str): Optional. Resource name of the Cloud KMS key, of the form ``projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key``, - that will be used to encrypt the object. Overrides the - object metadata's ``kms_key_name`` value, if any. + that is used to encrypt the object. Overrides the object + metadata's ``kms_key_name`` value, if any. common_object_request_params (google.cloud._storage_v2.types.CommonObjectRequestParams): Optional. A set of parameters common to Storage API requests concerning an object. object_checksums (google.cloud._storage_v2.types.ObjectChecksums): Optional. The checksums of the complete - object. This will be validated against the - combined checksums of the component objects. + object. This is validated against the combined + checksums of the component objects. """ class SourceObject(proto.Message): @@ -492,7 +517,7 @@ class ObjectPreconditions(proto.Message): generation of the source object that would be used matches this value. If this value and a generation are both specified, they must be the - same value or the call will fail. + same value or the call fails. This field is a member of `oneof`_ ``_if_generation_match``. """ @@ -558,9 +583,7 @@ class ObjectPreconditions(proto.Message): class DeleteObjectRequest(proto.Message): - r"""Message for deleting an object. ``bucket`` and ``object`` **must** - be set. - + r"""Request message for deleting an object. .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields @@ -650,8 +673,9 @@ class DeleteObjectRequest(proto.Message): class RestoreObjectRequest(proto.Message): - r"""Message for restoring an object. ``bucket``, ``object``, and - ``generation`` **must** be set. + r"""Request message for + [RestoreObject][google.storage.v2.Storage.RestoreObject]. + ``bucket``, ``object``, and ``generation`` **must** be set. .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields @@ -704,9 +728,9 @@ class RestoreObjectRequest(proto.Message): This field is a member of `oneof`_ ``_if_metageneration_not_match``. copy_source_acl (bool): If false or unset, the bucket's default - object ACL will be used. If true, copy the - source object's access controls. Return an error - if bucket has UBLA enabled. + object ACL is used. If true, copy the source + object's access controls. Return an error if + bucket has UBLA enabled. This field is a member of `oneof`_ ``_copy_source_acl``. common_object_request_params (google.cloud._storage_v2.types.CommonObjectRequestParams): @@ -763,8 +787,8 @@ class RestoreObjectRequest(proto.Message): class CancelResumableWriteRequest(proto.Message): - r"""Message for canceling an in-progress resumable upload. ``upload_id`` - **must** be set. + r"""Request message for + [CancelResumableWrite][google.storage.v2.Storage.CancelResumableWrite]. Attributes: upload_id (str): @@ -781,13 +805,15 @@ class CancelResumableWriteRequest(proto.Message): class CancelResumableWriteResponse(proto.Message): r"""Empty response message for canceling an in-progress resumable - upload, will be extended as needed. + upload, is extended as needed. """ class ReadObjectRequest(proto.Message): - r"""Request message for ReadObject. + r"""Request message for + [ReadObject][google.storage.v2.Storage.ReadObject]. + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields @@ -805,18 +831,19 @@ class ReadObjectRequest(proto.Message): Optional. The offset for the first byte to return in the read, relative to the start of the object. - A negative ``read_offset`` value will be interpreted as the + A negative ``read_offset`` value is interpreted as the number of bytes back from the end of the object to be - returned. For example, if an object's length is 15 bytes, a - ReadObjectRequest with ``read_offset`` = -5 and - ``read_limit`` = 3 would return bytes 10 through 12 of the - object. Requesting a negative offset with magnitude larger - than the size of the object will return the entire object. + returned. For example, if an object's length is ``15`` + bytes, a ``ReadObjectRequest`` with ``read_offset`` = ``-5`` + and ``read_limit`` = ``3`` would return bytes ``10`` through + ``12`` of the object. Requesting a negative offset with + magnitude larger than the size of the object returns the + entire object. read_limit (int): Optional. The maximum number of ``data`` bytes the server is allowed to return in the sum of all ``Object`` messages. A ``read_limit`` of zero indicates that there is no limit, and - a negative ``read_limit`` will cause an error. + a negative ``read_limit`` causes an error. If the stream returns fewer bytes than allowed by the ``read_limit`` and no error occurred, the stream includes @@ -855,12 +882,11 @@ class ReadObjectRequest(proto.Message): Optional. A set of parameters common to Storage API requests concerning an object. read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. The checksummed_data - field and its children will always be present. If no mask is - specified, will default to all fields except metadata.owner - and metadata.acl. - - - may be used to mean "all fields". + Mask specifying which fields to read. The + ``checksummed_data`` field and its children are always + present. If no mask is specified, it defaults to all fields + except ``metadata. owner`` and ``metadata.acl``. ``*`` might + be used to mean "all fields". This field is a member of `oneof`_ ``_read_mask``. """ @@ -919,7 +945,9 @@ class ReadObjectRequest(proto.Message): class GetObjectRequest(proto.Message): - r"""Request message for GetObject. + r"""Request message for + [GetObject][google.storage.v2.Storage.GetObject]. + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields @@ -972,19 +1000,19 @@ class GetObjectRequest(proto.Message): Storage API requests concerning an object. read_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields to read. If no mask is - specified, will default to all fields except metadata.acl - and metadata.owner. - - - may be used to mean "all fields". + specified, it defaults to all fields except + ``metadata. acl`` and ``metadata.owner``. ``*`` might be + used to mean "all fields". This field is a member of `oneof`_ ``_read_mask``. restore_token (str): Optional. Restore token used to differentiate soft-deleted objects with the same name and generation. Only applicable - for hierarchical namespace buckets and if soft_deleted is - set to true. This parameter is optional, and is only + for hierarchical namespace buckets and if ``soft_deleted`` + is set to ``true``. This parameter is optional, and is only required in the rare case when there are multiple - soft-deleted objects with the same name and generation. + soft-deleted objects with the same ``name`` and + ``generation``. """ bucket: str = proto.Field( @@ -1042,11 +1070,12 @@ class GetObjectRequest(proto.Message): class ReadObjectResponse(proto.Message): - r"""Response message for ReadObject. + r"""Response message for + [ReadObject][google.storage.v2.Storage.ReadObject]. Attributes: checksummed_data (google.cloud._storage_v2.types.ChecksummedData): - A portion of the data for the object. The service **may** + A portion of the data for the object. The service might leave ``data`` empty for any given ``ReadResponse``. This enables the service to inform the client that the request is still live while it is running an operation to generate more @@ -1058,9 +1087,9 @@ class ReadObjectResponse(proto.Message): downloaded object and compare it against the value provided here. content_range (google.cloud._storage_v2.types.ContentRange): - If read_offset and or read_limit was specified on the - ReadObjectRequest, ContentRange will be populated on the - first ReadObjectResponse message of the read stream. + If ``read_offset`` and or ``read_limit`` is specified on the + ``ReadObjectRequest``, ``ContentRange`` is populated on the + first ``ReadObjectResponse`` message of the read stream. metadata (google.cloud._storage_v2.types.Object): Metadata of the object whose media is being returned. Only populated in the first response @@ -1137,15 +1166,13 @@ class BidiReadObjectSpec(proto.Message): Optional. A set of parameters common to Storage API requests concerning an object. read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. The checksummed_data - field and its children will always be present. If no mask is - specified, will default to all fields except metadata.owner - and metadata.acl. - - - may be used to mean "all fields". As per - https://google.aip.dev/161, this field is deprecated. As - an alternative, grpc metadata can be used: - https://cloud.google.com/apis/docs/system-parameters#definitions + Mask specifying which fields to read. The + ``checksummed_data`` field and its children are always + present. If no mask is specified, it defaults to all fields + except ``metadata. owner`` and ``metadata.acl``. ``*`` might + be used to mean "all fields". As per + https://google.aip.dev/161, this field is deprecated. As an + alternative, ``grpc metadata`` can be used: This field is a member of `oneof`_ ``_read_mask``. read_handle (google.cloud._storage_v2.types.BidiReadHandle): @@ -1220,24 +1247,23 @@ class BidiReadObjectSpec(proto.Message): class BidiReadObjectRequest(proto.Message): - r"""Request message for BidiReadObject. + r"""Request message for + [BidiReadObject][google.storage.v2.Storage.BidiReadObject]. Attributes: read_object_spec (google.cloud._storage_v2.types.BidiReadObjectSpec): Optional. The first message of each stream should set this field. If this is not the first - message, an error will be returned. Describes - the object to read. + message, an error is returned. Describes the + object to read. read_ranges (MutableSequence[google.cloud._storage_v2.types.ReadRange]): - Optional. Provides a list of 0 or more (up to - 100) ranges to read. If a single range is large - enough to require multiple responses, they are - guaranteed to be delivered in increasing offset - order. There are no ordering guarantees across - ranges. When no ranges are provided, the - response message will not include - ObjectRangeData. For full object downloads, the - offset and size can be set to 0. + Optional. Provides a list of 0 or more (up to 100) ranges to + read. If a single range is large enough to require multiple + responses, they are delivered in increasing offset order. + There are no ordering guarantees across ranges. When no + ranges are provided, the response message doesn't include + ``ObjectRangeData``. For full object downloads, the offset + and size can be set to ``0``. """ read_object_spec: "BidiReadObjectSpec" = proto.Field( @@ -1253,29 +1279,29 @@ class BidiReadObjectRequest(proto.Message): class BidiReadObjectResponse(proto.Message): - r"""Response message for BidiReadObject. + r"""Response message for + [BidiReadObject][google.storage.v2.Storage.BidiReadObject]. Attributes: object_data_ranges (MutableSequence[google.cloud._storage_v2.types.ObjectRangeData]): - A portion of the object's data. The service **may** leave - data empty for any given ReadResponse. This enables the + A portion of the object's data. The service might leave data + empty for any given ``ReadResponse``. This enables the service to inform the client that the request is still live while it is running an operation to generate more data. The - service **may** pipeline multiple responses belonging to - different read requests. Each ObjectRangeData entry will - have a read_id set to the same value as the corresponding - source read request. + service might pipeline multiple responses belonging to + different read requests. Each ``ObjectRangeData`` entry has + a ``read_id`` that is set to the same value as the + corresponding source read request. metadata (google.cloud._storage_v2.types.Object): Metadata of the object whose media is being returned. Only populated in the first response in the stream and not populated when the stream is opened with a read handle. read_handle (google.cloud._storage_v2.types.BidiReadHandle): - This field will be periodically refreshed, - however it may not be set in every response. It - allows the client to more efficiently open - subsequent bidirectional streams to the same - object. + This field is periodically refreshed, however + it might not be set in every response. It allows + the client to more efficiently open subsequent + bidirectional streams to the same object. """ object_data_ranges: MutableSequence["ObjectRangeData"] = proto.RepeatedField( @@ -1297,7 +1323,7 @@ class BidiReadObjectResponse(proto.Message): class BidiReadObjectRedirectedError(proto.Message): r"""Error proto containing details for a redirected read. This - error may be attached as details for an ABORTED response to + error might be attached as details for an ABORTED response to BidiReadObject. @@ -1306,7 +1332,7 @@ class BidiReadObjectRedirectedError(proto.Message): Attributes: read_handle (google.cloud._storage_v2.types.BidiReadHandle): The read handle for the redirected read. If - set, the client may use this in the + set, the client might use this in the BidiReadObjectSpec when retrying the read stream. routing_token (str): @@ -1332,7 +1358,7 @@ class BidiReadObjectRedirectedError(proto.Message): class BidiWriteObjectRedirectedError(proto.Message): r"""Error proto containing details for a redirected write. This - error may be attached as details for an ABORTED response to + error might be attached as details for an ABORTED response to BidiWriteObject. @@ -1349,14 +1375,14 @@ class BidiWriteObjectRedirectedError(proto.Message): write_handle (google.cloud._storage_v2.types.BidiWriteHandle): Opaque value describing a previous write. If set, the client must use this in an AppendObjectSpec first_message when - retrying the write stream. If not set, clients may retry the - original request. + retrying the write stream. If not set, clients might retry + the original request. This field is a member of `oneof`_ ``_write_handle``. generation (int): The generation of the object that triggered the redirect. - This will be set iff write_handle is set. If set, the client - must use this in an AppendObjectSpec first_message when + This is set iff ``write_handle`` is set. If set, the client + must use this in an ``AppendObjectSpec`` first_message when retrying the write stream. This field is a member of `oneof`_ ``_generation``. @@ -1420,7 +1446,7 @@ class ReadRangeError(proto.Message): class ReadRange(proto.Message): - r"""Describes a range of bytes to read in a BidiReadObjectRanges + r"""Describes a range of bytes to read in a ``BidiReadObjectRanges`` request. Attributes: @@ -1428,30 +1454,31 @@ class ReadRange(proto.Message): Required. The offset for the first byte to return in the read, relative to the start of the object. - A negative read_offset value will be interpreted as the - number of bytes back from the end of the object to be - returned. For example, if an object's length is 15 bytes, a - ReadObjectRequest with read_offset = -5 and read_length = 3 - would return bytes 10 through 12 of the object. Requesting a - negative offset with magnitude larger than the size of the - object will return the entire object. A read_offset larger - than the size of the object will result in an OutOfRange - error. + A negative read_offset value is interpreted as the number of + bytes back from the end of the object to be returned. For + example, if an object's length is 15 bytes, a + ``ReadObjectRequest`` with ``read_offset`` = -5 and + ``read_length`` = 3 would return bytes 10 through 12 of the + object. Requesting a negative offset with magnitude larger + than the size of the object returns the entire object. A + ``read_offset`` larger than the size of the object results + in an ``OutOfRange`` error. read_length (int): Optional. The maximum number of data bytes the server is allowed to return across all response messages with the same - read_id. A read_length of zero indicates to read until the - resource end, and a negative read_length will cause an - error. If the stream returns fewer bytes than allowed by the - read_length and no error occurred, the stream includes all - data from the read_offset to the resource end. + ``read_id``. A ``read_length`` of zero indicates to read + until the resource end, and a negative ``read_length`` + causes an error. If the stream returns fewer bytes than + allowed by the ``read_length`` and no error occurred, the + stream includes all data from the ``read_offset`` to the + resource end. read_id (int): Required. Read identifier provided by the client. When the - client issues more than one outstanding ReadRange on the + client issues more than one outstanding ``ReadRange`` on the same stream, responses can be mapped back to their corresponding requests using this value. Clients must ensure that all outstanding requests have different read_id values. - The server may close the stream with an error if this + The server might close the stream with an error if this condition is not met. """ @@ -1476,12 +1503,12 @@ class ObjectRangeData(proto.Message): checksummed_data (google.cloud._storage_v2.types.ChecksummedData): A portion of the data for the object. read_range (google.cloud._storage_v2.types.ReadRange): - The ReadRange describes the content being returned with - read_id set to the corresponding ReadObjectRequest in the - stream. Multiple ObjectRangeData messages may have the same - read_id but increasing offsets. ReadObjectResponse messages - with the same read_id are guaranteed to be delivered in - increasing offset order. + The ``ReadRange`` describes the content being returned with + ``read_id`` set to the corresponding ``ReadObjectRequest`` + in the stream. Multiple ``ObjectRangeData`` messages might + have the same read_id but increasing offsets. + ``ReadObjectResponse`` messages with the same ``read_id`` + are guaranteed to be delivered in increasing offset order. range_end (bool): If set, indicates there are no more bytes to read for the given ReadRange. @@ -1504,9 +1531,9 @@ class ObjectRangeData(proto.Message): class BidiReadHandle(proto.Message): - r"""BidiReadHandle contains a handle from a previous - BiDiReadObject invocation. The client can use this instead of - BidiReadObjectSpec as an optimized way of opening subsequent + r"""``BidiReadHandle`` contains a handle from a previous + ``BiDiReadObject`` invocation. The client can use this instead of + ``BidiReadObjectSpec`` as an optimized way of opening subsequent bidirectional streams to the same object. Attributes: @@ -1522,10 +1549,10 @@ class BidiReadHandle(proto.Message): class BidiWriteHandle(proto.Message): - r"""BidiWriteHandle contains a handle from a previous - BidiWriteObject invocation. The client can use this as an - optimized way of opening subsequent bidirectional streams to the - same object. + r"""``BidiWriteHandle`` contains a handle from a previous + ``BidiWriteObject`` invocation. The client can use this instead of + ``BidiReadObjectSpec`` as an optimized way of opening subsequent + bidirectional streams to the same object. Attributes: handle (bytes): @@ -1551,26 +1578,23 @@ class WriteObjectSpec(proto.Message): Required. Destination object, including its name and its metadata. predefined_acl (str): - Optional. Apply a predefined set of access - controls to this object. Valid values are - "authenticatedRead", "bucketOwnerFullControl", - "bucketOwnerRead", "private", "projectPrivate", - or "publicRead". + Optional. Apply a predefined set of access controls to this + object. Valid values are ``authenticatedRead``, + ``bucketOwnerFullControl``, ``bucketOwnerRead``, + ``private``, ``projectPrivate``, or ``publicRead``. if_generation_match (int): - Makes the operation conditional on whether - the object's current generation matches the - given value. Setting to 0 makes the operation - succeed only if there are no live versions of - the object. + Makes the operation conditional on whether the object's + current generation matches the given value. Setting to ``0`` + makes the operation succeed only if there are no live + versions of the object. This field is a member of `oneof`_ ``_if_generation_match``. if_generation_not_match (int): - Makes the operation conditional on whether - the object's live generation does not match the - given value. If no live object exists, the - precondition fails. Setting to 0 makes the - operation succeed only if there is a live - version of the object. + Makes the operation conditional on whether the object's live + generation does not match the given value. If no live object + exists, the precondition fails. Setting to ``0`` makes the + operation succeed only if there is a live version of the + object. This field is a member of `oneof`_ ``_if_generation_not_match``. if_metageneration_match (int): @@ -1588,7 +1612,7 @@ class WriteObjectSpec(proto.Message): object_size (int): The expected final object size being uploaded. If this value is set, closing the stream after writing fewer or more than - ``object_size`` bytes will result in an OUT_OF_RANGE error. + ``object_size`` bytes results in an ``OUT_OF_RANGE`` error. This situation is considered a client error, and if such an error occurs you must start the upload over from scratch, @@ -1596,9 +1620,8 @@ class WriteObjectSpec(proto.Message): This field is a member of `oneof`_ ``_object_size``. appendable (bool): - If true, the object will be created in - appendable mode. This field may only be set when - using BidiWriteObject. + If ``true``, the object is created in appendable mode. This + field might only be set when using ``BidiWriteObject``. This field is a member of `oneof`_ ``_appendable``. """ @@ -1645,7 +1668,8 @@ class WriteObjectSpec(proto.Message): class WriteObjectRequest(proto.Message): - r"""Request message for WriteObject. + r"""Request message for + [WriteObject][google.storage.v2.Storage.WriteObject]. This message has `oneof`_ fields (mutually exclusive fields). For each oneof, at most one member field can be set at the same time. @@ -1680,28 +1704,28 @@ class WriteObjectRequest(proto.Message): the sum of the first ``write_offset`` and the sizes of all ``data`` chunks sent previously on this stream. - An incorrect value will cause an error. + An incorrect value causes an error. checksummed_data (google.cloud._storage_v2.types.ChecksummedData): The data to insert. If a crc32c checksum is provided that doesn't match the checksum - computed by the service, the request will fail. + computed by the service, the request fails. This field is a member of `oneof`_ ``data``. object_checksums (google.cloud._storage_v2.types.ObjectChecksums): Optional. Checksums for the complete object. If the checksums computed by the service don't match the specified - checksums the call will fail. May only be provided in the - first or last request (either with first_message, or - finish_write set). + checksums the call fails. This field might only be provided + in the first or last request (either with ``first_message``, + or ``finish_write`` set). finish_write (bool): Optional. If ``true``, this indicates that the write is complete. Sending any ``WriteObjectRequest``\ s subsequent - to one in which ``finish_write`` is ``true`` will cause an - error. For a non-resumable write (where the upload_id was - not set in the first message), it is an error not to set + to one in which ``finish_write`` is ``true`` causes an + error. For a non-resumable write (where the ``upload_id`` + was not set in the first message), it is an error not to set this field in the final message of the stream. common_object_request_params (google.cloud._storage_v2.types.CommonObjectRequestParams): - Optional. A set of parameters common to + Optional. A set of parameters common to Cloud Storage API requests concerning an object. """ @@ -1743,7 +1767,8 @@ class WriteObjectRequest(proto.Message): class WriteObjectResponse(proto.Message): - r"""Response message for WriteObject. + r"""Response message for + [WriteObject][google.storage.v2.Storage.WriteObject]. This message has `oneof`_ fields (mutually exclusive fields). For each oneof, at most one member field can be set at the same time. @@ -1814,9 +1839,9 @@ class AppendObjectSpec(proto.Message): This field is a member of `oneof`_ ``_if_metageneration_not_match``. routing_token (str): - An optional routing token that influences - request routing for the stream. Must be provided - if a BidiWriteObjectRedirectedError is returned. + An optional routing token that influences request routing + for the stream. Must be provided if a + ``BidiWriteObjectRedirectedError`` is returned. This field is a member of `oneof`_ ``_routing_token``. write_handle (google.cloud._storage_v2.types.BidiWriteHandle): @@ -1866,7 +1891,8 @@ class AppendObjectSpec(proto.Message): class BidiWriteObjectRequest(proto.Message): - r"""Request message for BidiWriteObject. + r"""Request message for + [BidiWriteObject][google.storage.v2.Storage.BidiWriteObject]. This message has `oneof`_ fields (mutually exclusive fields). For each oneof, at most one member field can be set at the same time. @@ -1898,52 +1924,52 @@ class BidiWriteObjectRequest(proto.Message): In the first ``WriteObjectRequest`` of a ``WriteObject()`` action, it indicates the initial offset for the ``Write()`` - call. The value **must** be equal to the ``persisted_size`` - that a call to ``QueryWriteStatus()`` would return (0 if - this is the first write to the object). + call. The value must be equal to the ``persisted_size`` that + a call to ``QueryWriteStatus()`` would return (0 if this is + the first write to the object). - On subsequent calls, this value **must** be no larger than - the sum of the first ``write_offset`` and the sizes of all + On subsequent calls, this value must be no larger than the + sum of the first ``write_offset`` and the sizes of all ``data`` chunks sent previously on this stream. - An invalid value will cause an error. + An invalid value causes an error. checksummed_data (google.cloud._storage_v2.types.ChecksummedData): The data to insert. If a crc32c checksum is provided that doesn't match the checksum - computed by the service, the request will fail. + computed by the service, the request fails. This field is a member of `oneof`_ ``data``. object_checksums (google.cloud._storage_v2.types.ObjectChecksums): Optional. Checksums for the complete object. If the checksums computed by the service don't match the specified - checksums the call will fail. May only be provided in the + checksums the call fails. Might only be provided in the first request or the last request (with finish_write set). state_lookup (bool): - Optional. For each BidiWriteObjectRequest where state_lookup - is ``true`` or the client closes the stream, the service - will send a BidiWriteObjectResponse containing the current - persisted size. The persisted size sent in responses covers - all the bytes the server has persisted thus far and can be - used to decide what data is safe for the client to drop. - Note that the object's current size reported by the - BidiWriteObjectResponse may lag behind the number of bytes - written by the client. This field is ignored if - ``finish_write`` is set to true. + Optional. For each ``BidiWriteObjectRequest`` where + ``state_lookup`` is ``true`` or the client closes the + stream, the service sends a ``BidiWriteObjectResponse`` + containing the current persisted size. The persisted size + sent in responses covers all the bytes the server has + persisted thus far and can be used to decide what data is + safe for the client to drop. Note that the object's current + size reported by the ``BidiWriteObjectResponse`` might lag + behind the number of bytes written by the client. This field + is ignored if ``finish_write`` is set to true. flush (bool): Optional. Persists data written on the stream, up to and including the current message, to permanent storage. This - option should be used sparingly as it may reduce - performance. Ongoing writes will periodically be persisted - on the server even when ``flush`` is not set. This field is + option should be used sparingly as it might reduce + performance. Ongoing writes are periodically persisted on + the server even when ``flush`` is not set. This field is ignored if ``finish_write`` is set to true since there's no need to checkpoint or flush if this message completes the write. finish_write (bool): Optional. If ``true``, this indicates that the write is complete. Sending any ``WriteObjectRequest``\ s subsequent - to one in which ``finish_write`` is ``true`` will cause an - error. For a non-resumable write (where the upload_id was - not set in the first message), it is an error not to set + to one in which ``finish_write`` is ``true`` causes an + error. For a non-resumable write (where the ``upload_id`` + was not set in the first message), it is an error not to set this field in the final message of the stream. common_object_request_params (google.cloud._storage_v2.types.CommonObjectRequestParams): Optional. A set of parameters common to @@ -2025,11 +2051,10 @@ class BidiWriteObjectResponse(proto.Message): This field is a member of `oneof`_ ``write_status``. write_handle (google.cloud._storage_v2.types.BidiWriteHandle): - An optional write handle that will - periodically be present in response messages. - Clients should save it for later use in - establishing a new stream if a connection is - interrupted. + An optional write handle that is returned + periodically in response messages. Clients + should save it for later use in establishing a + new stream if a connection is interrupted. This field is a member of `oneof`_ ``_write_handle``. """ @@ -2054,7 +2079,9 @@ class BidiWriteObjectResponse(proto.Message): class ListObjectsRequest(proto.Message): - r"""Request message for ListObjects. + r"""Request message for + [ListObjects][google.storage.v2.Storage.ListObjects]. + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields @@ -2065,68 +2092,69 @@ class ListObjectsRequest(proto.Message): page_size (int): Optional. Maximum number of ``items`` plus ``prefixes`` to return in a single page of responses. As duplicate - ``prefixes`` are omitted, fewer total results may be - returned than requested. The service will use this parameter - or 1,000 items, whichever is smaller. + ``prefixes`` are omitted, fewer total results might be + returned than requested. The service uses this parameter or + 1,000 items, whichever is smaller. page_token (str): Optional. A previously-returned page token representing part of the larger set of results to view. delimiter (str): Optional. If set, returns results in a directory-like mode. - ``items`` will contain only objects whose names, aside from - the ``prefix``, do not contain ``delimiter``. Objects whose - names, aside from the ``prefix``, contain ``delimiter`` will - have their name, truncated after the ``delimiter``, returned - in ``prefixes``. Duplicate ``prefixes`` are omitted. + ``items`` contains only objects whose names, aside from the + ``prefix``, do not contain ``delimiter``. Objects whose + names, aside from the ``prefix``, contain ``delimiter`` has + their name, truncated after the ``delimiter``, returned in + ``prefixes``. Duplicate ``prefixes`` are omitted. include_trailing_delimiter (bool): Optional. If true, objects that end in exactly one instance - of ``delimiter`` will have their metadata included in - ``items`` in addition to ``prefixes``. + of ``delimiter`` has their metadata included in ``items`` in + addition to ``prefixes``. prefix (str): Optional. Filter results to objects whose names begin with this prefix. versions (bool): Optional. If ``true``, lists all versions of an object as - distinct results. For more information, see `Object - Versioning `__. + distinct results. read_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields to read from each result. If no - mask is specified, will default to all fields except - items.acl and items.owner. - - - may be used to mean "all fields". + mask is specified, defaults to all fields except + ``items.acl`` and ``items.owner``. ``*`` might be used to + mean all fields. This field is a member of `oneof`_ ``_read_mask``. lexicographic_start (str): Optional. Filter results to objects whose names are - lexicographically equal to or after lexicographic_start. If - lexicographic_end is also set, the objects listed have names - between lexicographic_start (inclusive) and - lexicographic_end (exclusive). + lexicographically equal to or after ``lexicographic_start``. + If ``lexicographic_end`` is also set, the objects listed + have names between ``lexicographic_start`` (inclusive) and + ``lexicographic_end`` (exclusive). lexicographic_end (str): Optional. Filter results to objects whose names are - lexicographically before lexicographic_end. If - lexicographic_start is also set, the objects listed have - names between lexicographic_start (inclusive) and - lexicographic_end (exclusive). + lexicographically before ``lexicographic_end``. If + ``lexicographic_start`` is also set, the objects listed have + names between ``lexicographic_start`` (inclusive) and + ``lexicographic_end`` (exclusive). soft_deleted (bool): Optional. If true, only list all soft-deleted versions of the object. Soft delete policy is required to set this option. include_folders_as_prefixes (bool): - Optional. If true, will also include folders and managed - folders (besides objects) in the returned ``prefixes``. - Requires ``delimiter`` to be set to '/'. + Optional. If true, includes folders and managed folders + (besides objects) in the returned ``prefixes``. Requires + ``delimiter`` to be set to '/'. match_glob (str): Optional. Filter results to objects and prefixes that match - this glob pattern. See `List Objects Using - Glob `__ + this glob pattern. See `List objects using + glob `__ for the full syntax. filter (str): - Optional. Filter the returned objects. Currently only - supported for the ``contexts`` field. If ``delimiter`` is - set, the returned ``prefixes`` are exempt from this filter. + Optional. An expression used to filter the returned objects + by the ``context`` field. For the full syntax, see `Filter + objects by contexts + syntax `__. + If a ``delimiter`` is set, the returned ``prefixes`` are + exempt from this filter. """ parent: str = proto.Field( @@ -2190,7 +2218,8 @@ class ListObjectsRequest(proto.Message): class QueryWriteStatusRequest(proto.Message): - r"""Request object for ``QueryWriteStatus``. + r"""Request object for + [QueryWriteStatus][google.storage.v2.Storage.QueryWriteStatus]. Attributes: upload_id (str): @@ -2214,7 +2243,8 @@ class QueryWriteStatusRequest(proto.Message): class QueryWriteStatusResponse(proto.Message): - r"""Response object for ``QueryWriteStatus``. + r"""Response object for + [QueryWriteStatus][google.storage.v2.Storage.QueryWriteStatus]. This message has `oneof`_ fields (mutually exclusive fields). For each oneof, at most one member field can be set at the same time. @@ -2254,15 +2284,17 @@ class QueryWriteStatusResponse(proto.Message): class RewriteObjectRequest(proto.Message): - r"""Request message for RewriteObject. If the source object is encrypted - using a Customer-Supplied Encryption Key the key information must be - provided in the copy_source_encryption_algorithm, - copy_source_encryption_key_bytes, and - copy_source_encryption_key_sha256_bytes fields. If the destination - object should be encrypted the keying information should be provided - in the encryption_algorithm, encryption_key_bytes, and - encryption_key_sha256_bytes fields of the - common_object_request_params.customer_encryption field. + r"""Request message for + [RewriteObject][google.storage.v2.Storage.RewriteObject]. If the + source object is encrypted using a Customer-Supplied Encryption Key + the key information must be provided in the + ``copy_source_encryption_algorithm``, + ``copy_source_encryption_key_bytes``, and + ``copy_source_encryption_key_sha256_bytes`` fields. If the + destination object should be encrypted the keying information should + be provided in the ``encryption_algorithm``, + ``encryption_key_bytes``, and ``encryption_key_sha256_bytes`` fields + of the ``common_object_request_params.customer_encryption`` field. .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields @@ -2281,9 +2313,9 @@ class RewriteObjectRequest(proto.Message): containing the destination object. destination_kms_key (str): Optional. The name of the Cloud KMS key that - will be used to encrypt the destination object. - The Cloud KMS key must be located in same - location as the object. If the parameter is not + is used to encrypt the destination object. The + Cloud KMS key must be located in same location + as the object. If the parameter is not specified, the request uses the destination bucket's default encryption key, if any, or else the Google-managed encryption key. @@ -2293,9 +2325,9 @@ class RewriteObjectRequest(proto.Message): not be populated (these values are specified in the ``destination_name``, ``destination_bucket``, and ``destination_kms_key`` fields). If ``destination`` is - present it will be used to construct the destination - object's metadata; otherwise the destination object's - metadata will be copied from the source object. + present it is used to construct the destination object's + metadata; otherwise the destination object's metadata is + copied from the source object. source_bucket (str): Required. Name of the bucket in which to find the source object. @@ -2315,11 +2347,10 @@ class RewriteObjectRequest(proto.Message): the values provided in the first rewrite request. destination_predefined_acl (str): - Optional. Apply a predefined set of access - controls to the destination object. Valid values - are "authenticatedRead", - "bucketOwnerFullControl", "bucketOwnerRead", - "private", "projectPrivate", or "publicRead". + Optional. Apply a predefined set of access controls to the + destination object. Valid values are ``authenticatedRead``, + ``bucketOwnerFullControl``, ``bucketOwnerRead``, + ``private``, ``projectPrivate``, or ``publicRead``. if_generation_match (int): Makes the operation conditional on whether the object's current generation matches the @@ -2374,15 +2405,15 @@ class RewriteObjectRequest(proto.Message): This field is a member of `oneof`_ ``_if_source_metageneration_not_match``. max_bytes_rewritten_per_call (int): - Optional. The maximum number of bytes that will be rewritten - per rewrite request. Most callers shouldn't need to specify - this parameter - it is primarily in place to support - testing. If specified the value must be an integral multiple - of 1 MiB (1048576). Also, this only applies to requests - where the source and destination span locations and/or - storage classes. Finally, this value must not change across - rewrite calls else you'll get an error that the - ``rewriteToken`` is invalid. + Optional. The maximum number of bytes that are rewritten per + rewrite request. Most callers shouldn't need to specify this + parameter - it is primarily in place to support testing. If + specified the value must be an integral multiple of 1 MiB + (1048576). Also, this only applies to requests where the + source and destination span locations and/or storage + classes. Finally, this value must not change across rewrite + calls else you'll get an error that the ``rewriteToken`` is + invalid. copy_source_encryption_algorithm (str): Optional. The algorithm used to encrypt the source object, if any. Used if the source object @@ -2403,8 +2434,8 @@ class RewriteObjectRequest(proto.Message): Storage API requests concerning an object. object_checksums (google.cloud._storage_v2.types.ObjectChecksums): Optional. The checksums of the complete - object. This will be used to validate the - destination object after rewriting. + object. This is used to validate the destination + object after rewriting. """ destination_name: str = proto.Field( @@ -2564,7 +2595,9 @@ class RewriteResponse(proto.Message): class MoveObjectRequest(proto.Message): - r"""Request message for MoveObject. + r"""Request message for + [MoveObject][google.storage.v2.Storage.MoveObject]. + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields @@ -2708,7 +2741,8 @@ class MoveObjectRequest(proto.Message): class StartResumableWriteRequest(proto.Message): - r"""Request message StartResumableWrite. + r"""Request message for + [StartResumableWrite][google.storage.v2.Storage.StartResumableWrite]. Attributes: write_object_spec (google.cloud._storage_v2.types.WriteObjectSpec): @@ -2744,7 +2778,8 @@ class StartResumableWriteRequest(proto.Message): class StartResumableWriteResponse(proto.Message): - r"""Response object for ``StartResumableWrite``. + r"""Response object for + [StartResumableWrite][google.storage.v2.Storage.StartResumableWrite]. Attributes: upload_id (str): @@ -2763,7 +2798,9 @@ class StartResumableWriteResponse(proto.Message): class UpdateObjectRequest(proto.Message): - r"""Request message for UpdateObject. + r"""Request message for + [UpdateObject][google.storage.v2.Storage.UpdateObject]. + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields @@ -2817,7 +2854,7 @@ class UpdateObjectRequest(proto.Message): To specify ALL fields, equivalent to the JSON API's "update" function, specify a single field with the value ``*``. Note: not recommended. If a new field is introduced at a later - time, an older client updating with the ``*`` may + time, an older client updating with the ``*`` might accidentally reset the new field's value. Not specifying any fields is an error. @@ -2887,7 +2924,7 @@ class CommonObjectRequestParams(proto.Message): raw bytes format (not base64-encoded). encryption_key_sha256_bytes (bytes): Optional. SHA256 hash of encryption key used - with the Customer-Supplied Encryption Keys + with the Customer-supplied encryption keys feature. """ @@ -2916,9 +2953,8 @@ class Values(proto.Enum): VALUES_UNSPECIFIED (0): Unused. Proto3 requires first enum to be 0. MAX_READ_CHUNK_BYTES (2097152): - The maximum size chunk that can will be - returned in a single ReadRequest. - 2 MiB. + The maximum size chunk that can be returned in a single + ``ReadRequest``. 2 MiB. MAX_WRITE_CHUNK_BYTES (2097152): The maximum size chunk that can be sent in a single WriteObjectRequest. 2 MiB. @@ -3006,31 +3042,25 @@ class Bucket(proto.Message): bucket_id (str): Output only. The user-chosen part of the bucket name. The ``{bucket}`` portion of the ``name`` field. For globally - unique buckets, this is equal to the "bucket name" of other - Cloud Storage APIs. Example: "pub". + unique buckets, this is equal to the ``bucket name`` of + other Cloud Storage APIs. Example: ``pub``. etag (str): - The etag of the bucket. - If included in the metadata of an - UpdateBucketRequest, the operation will only be - performed if the etag matches that of the - bucket. + The etag of the bucket. If included in the metadata of an + ``UpdateBucketRequest``, the operation is only performed if + the ``etag`` matches that of the bucket. project (str): - Immutable. The project which owns this - bucket, in the format of - "projects/{projectIdentifier}". - {projectIdentifier} can be the project ID or - project number. Output values will always be in - project number format. + Immutable. The project which owns this bucket, in the format + of ``projects/{projectIdentifier}``. ``{projectIdentifier}`` + can be the project ID or project number. Output values are + always in the project number format. metageneration (int): Output only. The metadata generation of this bucket. location (str): Immutable. The location of the bucket. Object data for objects in the bucket resides in physical storage within - this region. Defaults to ``US``. See the - [https://developers.google.com/storage/docs/concepts-techniques#specifyinglocations"][developer's - guide] for the authoritative list. Attempting to update this - field after the bucket is created will result in an error. + this region. Defaults to ``US``. Attempting to update this + field after the bucket is created results in an error. location_type (str): Output only. The location type of the bucket (region, dual-region, multi-region, etc). @@ -3039,37 +3069,39 @@ class Bucket(proto.Message): no storageClass is specified for a newly-created object. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. If this value is - not specified when the bucket is created, it will default to - ``STANDARD``. For more information, see - https://developers.google.com/storage/docs/storage-classes. + not specified when the bucket is created, it defaults to + ``STANDARD``. For more information, see `Storage + classes `__. rpo (str): Optional. The recovery point objective for cross-region replication of the bucket. Applicable only for dual- and - multi-region buckets. "DEFAULT" uses default replication. - "ASYNC_TURBO" enables turbo replication, valid for + multi-region buckets. ``DEFAULT`` uses default replication. + ``ASYNC_TURBO`` enables turbo replication, valid for dual-region buckets only. If rpo is not specified when the - bucket is created, it defaults to "DEFAULT". For more - information, see - https://cloud.google.com/storage/docs/availability-durability#turbo-replication. + bucket is created, it defaults to ``DEFAULT``. For more + information, see `Turbo + replication `__. acl (MutableSequence[google.cloud._storage_v2.types.BucketAccessControl]): Optional. Access controls on the bucket. If - iam_config.uniform_bucket_level_access is enabled on this - bucket, requests to set, read, or modify acl is an error. + ``iam_config.uniform_bucket_level_access`` is enabled on + this bucket, requests to set, read, or modify acl is an + error. default_object_acl (MutableSequence[google.cloud._storage_v2.types.ObjectAccessControl]): Optional. Default access controls to apply to new objects when no ACL is provided. If - iam_config.uniform_bucket_level_access is enabled on this - bucket, requests to set, read, or modify acl is an error. + ``iam_config.uniform_bucket_level_access`` is enabled on + this bucket, requests to set, read, or modify acl is an + error. lifecycle (google.cloud._storage_v2.types.Bucket.Lifecycle): - Optional. The bucket's lifecycle config. See - [https://developers.google.com/storage/docs/lifecycle]Lifecycle - Management] for more information. + Optional. The bucket's lifecycle configuration. See + `Lifecycle + Management `__ + for more information. create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. The creation time of the bucket. cors (MutableSequence[google.cloud._storage_v2.types.Bucket.Cors]): Optional. The bucket's - [https://www.w3.org/TR/cors/][Cross-Origin Resource Sharing] - (CORS) config. + `CORS `__ configuration. update_time (google.protobuf.timestamp_pb2.Timestamp): Output only. The modification time of the bucket. @@ -3079,32 +3111,33 @@ class Bucket(proto.Message): Event-based hold is a way to retain objects indefinitely until an event occurs, signified by the hold's release. After being released, such - objects will be subject to bucket-level - retention (if any). One sample use case of this - flag is for banks to hold loan documents for at - least 3 years after loan is paid in full. Here, + objects are subject to bucket-level retention + (if any). One sample use case of this flag is + for banks to hold loan documents for at least 3 + years after loan is paid in full. Here, bucket-level retention is 3 years and the event is loan being paid in full. In this example, - these objects will be held intact for any number - of years until the event has occurred - (event-based hold on the object is released) and - then 3 more years after that. That means - retention duration of the objects begins from - the moment event-based hold transitioned from - true to false. Objects under event-based hold - cannot be deleted, overwritten or archived until - the hold is removed. + these objects are held intact for any number of + years until the event has occurred (event-based + hold on the object is released) and then 3 more + years after that. That means retention duration + of the objects begins from the moment + event-based hold transitioned from true to + false. Objects under event-based hold cannot be + deleted, overwritten or archived until the hold + is removed. labels (MutableMapping[str, str]): Optional. User-provided labels, in key/value pairs. website (google.cloud._storage_v2.types.Bucket.Website): Optional. The bucket's website config, controlling how the service behaves when accessing bucket contents as a web - site. See the - [https://cloud.google.com/storage/docs/static-website][Static - Website Examples] for more information. + site. See the `Static website + examples `__ + for more information. versioning (google.cloud._storage_v2.types.Bucket.Versioning): - Optional. The bucket's versioning config. + Optional. The bucket's versioning + configuration. logging (google.cloud._storage_v2.types.Bucket.Logging): Optional. The bucket's logging config, which defines the destination bucket and name prefix @@ -3115,38 +3148,37 @@ class Bucket(proto.Message): encryption (google.cloud._storage_v2.types.Bucket.Encryption): Optional. Encryption config for a bucket. billing (google.cloud._storage_v2.types.Bucket.Billing): - Optional. The bucket's billing config. + Optional. The bucket's billing configuration. retention_policy (google.cloud._storage_v2.types.Bucket.RetentionPolicy): Optional. The bucket's retention policy. The retention policy enforces a minimum retention time for all objects contained in the bucket, based on their creation time. Any attempt to overwrite or delete objects younger than the - retention period will result in a PERMISSION_DENIED error. + retention period results in a ``PERMISSION_DENIED`` error. An unlocked retention policy can be modified or removed from the bucket via a storage.buckets.update operation. A locked retention policy cannot be removed or shortened in duration for the lifetime of the bucket. Attempting to remove or - decrease period of a locked retention policy will result in - a PERMISSION_DENIED error. + decrease period of a locked retention policy results in a + ``PERMISSION_DENIED`` error. iam_config (google.cloud._storage_v2.types.Bucket.IamConfig): - Optional. The bucket's IAM config. + Optional. The bucket's IAM configuration. satisfies_pzs (bool): Optional. Reserved for future use. custom_placement_config (google.cloud._storage_v2.types.Bucket.CustomPlacementConfig): Optional. Configuration that, if present, specifies the data - placement for a - [https://cloud.google.com/storage/docs/locations#location-dr][configurable - dual-region]. + placement for a `configurable + dual-region `__. autoclass (google.cloud._storage_v2.types.Bucket.Autoclass): Optional. The bucket's Autoclass configuration. If there is no configuration, the - Autoclass feature will be disabled and have no - effect on the bucket. + Autoclass feature is disabled and has no effect + on the bucket. hierarchical_namespace (google.cloud._storage_v2.types.Bucket.HierarchicalNamespace): Optional. The bucket's hierarchical namespace configuration. If there is no configuration, the - hierarchical namespace feature will be disabled - and have no effect on the bucket. + hierarchical namespace feature is disabled and + has no effect on the bucket. soft_delete_policy (google.cloud._storage_v2.types.Bucket.SoftDeletePolicy): Optional. The bucket's soft delete policy. The soft delete policy prevents soft-deleted @@ -3154,7 +3186,7 @@ class Bucket(proto.Message): object_retention (google.cloud._storage_v2.types.Bucket.ObjectRetention): Optional. The bucket's object retention configuration. Must be enabled before objects in - the bucket may have retention configured. + the bucket might have retention configured. ip_filter (google.cloud._storage_v2.types.Bucket.IpFilter): Optional. The bucket's IP filter configuration. @@ -3184,25 +3216,27 @@ class Cors(proto.Message): Attributes: origin (MutableSequence[str]): - Optional. The list of Origins eligible to receive CORS - response headers. See - [https://tools.ietf.org/html/rfc6454][RFC 6454] for more on - origins. Note: "\*" is permitted in the list of origins, and - means "any Origin". + Optional. The list of origins eligible to receive CORS + response headers. For more information about origins, see + `RFC 6454 `__. Note: + ``*`` is permitted in the list of origins, and means + ``any origin``. method (MutableSequence[str]): Optional. The list of HTTP methods on which to include CORS response headers, (``GET``, ``OPTIONS``, ``POST``, etc) - Note: "\*" is permitted in the list of methods, and means + Note: ``*`` is permitted in the list of methods, and means "any method". response_header (MutableSequence[str]): - Optional. The list of HTTP headers other than the - [https://www.w3.org/TR/cors/#simple-response-header][simple - response headers] to give permission for the user-agent to - share across domains. + Optional. The list of HTTP headers other than the `simple + response + headers `__ + to give permission for the user-agent to share across + domains. max_age_seconds (int): Optional. The value, in seconds, to return in the - [https://www.w3.org/TR/cors/#access-control-max-age-response-header][Access-Control-Max-Age - header] used in preflight responses. + `Access-Control-Max-Age + header `__ + used in preflight responses. """ origin: MutableSequence[str] = proto.RepeatedField( @@ -3230,9 +3264,8 @@ class Encryption(proto.Message): Attributes: default_kms_key (str): Optional. The name of the Cloud KMS key that - will be used to encrypt objects inserted into - this bucket, if no encryption method is - specified. + is used to encrypt objects inserted into this + bucket, if no encryption method is specified. google_managed_encryption_enforcement_config (google.cloud._storage_v2.types.Bucket.Encryption.GoogleManagedEncryptionEnforcementConfig): Optional. If omitted, then new objects with GMEK encryption-type is allowed. If set, then @@ -3273,10 +3306,11 @@ class GoogleManagedEncryptionEnforcementConfig(proto.Message): restriction_mode (str): Restriction mode for google-managed encryption for new objects within the bucket. Valid values are: - "NotRestricted", "FullyRestricted". If ``NotRestricted`` or - unset, creation of new objects with google-managed - encryption is allowed. If ``FullyRestricted``, new objects - can't be created using google-managed encryption. + ``NotRestricted`` and ``FullyRestricted``. If + ``NotRestricted`` or unset, creation of new objects with + google-managed encryption is allowed. If + ``FullyRestricted``, new objects can't be created using + google-managed encryption. This field is a member of `oneof`_ ``_restriction_mode``. effective_time (google.protobuf.timestamp_pb2.Timestamp): @@ -3309,10 +3343,11 @@ class CustomerManagedEncryptionEnforcementConfig(proto.Message): restriction_mode (str): Restriction mode for customer-managed encryption for new objects within the bucket. Valid values are: - "NotRestricted", "FullyRestricted". If ``NotRestricted`` or - unset, creation of new objects with customer-managed - encryption is allowed. If ``FullyRestricted``, new objects - can't be created using customer-managed encryption. + ``NotRestricted`` and ``FullyRestricted``. If + ``NotRestricted`` or unset, creation of new objects with + customer-managed encryption is allowed. If + ``FullyRestricted``, new objects can't be created using + customer-managed encryption. This field is a member of `oneof`_ ``_restriction_mode``. effective_time (google.protobuf.timestamp_pb2.Timestamp): @@ -3345,10 +3380,11 @@ class CustomerSuppliedEncryptionEnforcementConfig(proto.Message): restriction_mode (str): Restriction mode for customer-supplied encryption for new objects within the bucket. Valid values are: - "NotRestricted", "FullyRestricted". If ``NotRestricted`` or - unset, creation of new objects with customer-supplied - encryption is allowed. If ``FullyRestricted``, new objects - can't be created using customer-supplied encryption. + ``NotRestricted`` and ``FullyRestricted``. If + ``NotRestricted`` or unset, creation of new objects with + customer-supplied encryption is allowed. If + ``FullyRestricted``, new objects can't be created using + customer-supplied encryption. This field is a member of `oneof`_ ``_restriction_mode``. effective_time (google.protobuf.timestamp_pb2.Timestamp): @@ -3401,9 +3437,8 @@ class IamConfig(proto.Message): Optional. Bucket restriction options currently enforced on the bucket. public_access_prevention (str): - Optional. Whether IAM will enforce public - access prevention. Valid values are "enforced" - or "inherited". + Optional. Whether IAM enforces public access prevention. + Valid values are ``enforced`` or ``inherited``. """ class UniformBucketLevelAccess(proto.Message): @@ -3445,28 +3480,27 @@ class UniformBucketLevelAccess(proto.Message): ) class Lifecycle(proto.Message): - r"""Lifecycle properties of a bucket. - For more information, see - https://cloud.google.com/storage/docs/lifecycle. + r"""Lifecycle properties of a bucket. For more information, see `Object + Lifecycle + Management `__. Attributes: rule (MutableSequence[google.cloud._storage_v2.types.Bucket.Lifecycle.Rule]): Optional. A lifecycle management rule, which - is made of an action to take and the - condition(s) under which the action will be - taken. + is made of an action to take and the condition + under which the action is taken. """ class Rule(proto.Message): r"""A lifecycle Rule, combining an action to take on an object - and a condition which will trigger that action. + and a condition which triggers that action. Attributes: action (google.cloud._storage_v2.types.Bucket.Lifecycle.Rule.Action): Optional. The action to take. condition (google.cloud._storage_v2.types.Bucket.Lifecycle.Rule.Condition): - Optional. The condition(s) under which the - action will be taken. + Optional. The condition under which the + action is taken. """ class Action(proto.Message): @@ -3524,7 +3558,7 @@ class Condition(proto.Message): This field is a member of `oneof`_ ``_num_newer_versions``. matches_storage_class (MutableSequence[str]): Optional. Objects having any of the storage classes - specified by this condition will be matched. Values include + specified by this condition are matched. Values include ``MULTI_REGIONAL``, ``REGIONAL``, ``NEARLINE``, ``COLDLINE``, ``STANDARD``, and ``DURABLE_REDUCED_AVAILABILITY``. @@ -3545,8 +3579,8 @@ class Condition(proto.Message): condition only if these many days have been passed since it became noncurrent. The value of the field must be a nonnegative integer. If it's - zero, the object version will become eligible - for Lifecycle action as soon as it becomes + zero, the object version becomes eligible for + Lifecycle action as soon as it becomes noncurrent. This field is a member of `oneof`_ ``_days_since_noncurrent_time``. @@ -3686,8 +3720,8 @@ class RetentionPolicy(proto.Message): Retention duration must be greater than zero and less than 100 years. Note that enforcement of retention periods less than a day is not guaranteed. Such periods should only be - used for testing purposes. Any ``nanos`` value specified - will be rounded down to the nearest second. + used for testing purposes. Any ``nanos`` value specified is + rounded down to the nearest second. """ effective_time: timestamp_pb2.Timestamp = proto.Field( @@ -3740,9 +3774,9 @@ class SoftDeletePolicy(proto.Message): ) class Versioning(proto.Message): - r"""Properties of a bucket related to versioning. - For more on Cloud Storage versioning, see - https://cloud.google.com/storage/docs/object-versioning. + r"""Properties of a bucket related to versioning. For more information + about Cloud Storage versioning, see `Object + versioning `__. Attributes: enabled (bool): @@ -3756,25 +3790,24 @@ class Versioning(proto.Message): ) class Website(proto.Message): - r"""Properties of a bucket related to accessing the contents as a - static website. For more on hosting a static website via Cloud - Storage, see - https://cloud.google.com/storage/docs/hosting-static-website. + r"""Properties of a bucket related to accessing the contents as a static + website. For details, see `hosting a static website using Cloud + Storage `__. Attributes: main_page_suffix (str): Optional. If the requested object path is missing, the - service will ensure the path has a trailing '/', append this + service ensures the path has a trailing '/', append this suffix, and attempt to retrieve the resulting object. This allows the creation of ``index.html`` objects to represent directory pages. not_found_page (str): Optional. If the requested object path is missing, and any ``mainPageSuffix`` object is missing, if applicable, the - service will return the named object from this bucket as the - content for a - [https://tools.ietf.org/html/rfc7231#section-6.5.4][404 Not - Found] result. + service returns the named object from this bucket as the + content for a `404 Not + Found `__ + result. """ main_page_suffix: str = proto.Field( @@ -3787,10 +3820,11 @@ class Website(proto.Message): ) class CustomPlacementConfig(proto.Message): - r"""Configuration for Custom Dual Regions. It should specify precisely - two eligible regions within the same Multiregion. More information - on regions may be found - `here `__. + r"""Configuration for `configurable dual- + regions `__. + It should specify precisely two eligible regions within the same + multi-region. For details, see + `locations `__. Attributes: data_locations (MutableSequence[str]): @@ -3815,13 +3849,13 @@ class Autoclass(proto.Message): Output only. Latest instant at which the ``enabled`` field was set to true after being disabled/unconfigured or set to false after being enabled. If Autoclass is enabled when the - bucket is created, the toggle_time is set to the bucket - creation time. + bucket is created, the value of the ``toggle_time`` field is + set to the bucket ``create_time``. terminal_storage_class (str): - An object in an Autoclass bucket will - eventually cool down to the terminal storage - class if there is no access to the object. The - only valid values are NEARLINE and ARCHIVE. + An object in an Autoclass bucket eventually + cools down to the terminal storage class if + there is no access to the object. The only valid + values are NEARLINE and ARCHIVE. This field is a member of `oneof`_ ``_terminal_storage_class``. terminal_storage_class_update_time (google.protobuf.timestamp_pb2.Timestamp): @@ -3868,7 +3902,7 @@ class IpFilter(proto.Message): filtering rules are applied to a bucket and all incoming requests to the bucket are evaluated against these rules. When set to ``Disabled``, IP filtering rules are not applied - to a bucket.". + to a bucket. This field is a member of `oneof`_ ``_mode``. public_network_source (google.cloud._storage_v2.types.Bucket.IpFilter.PublicNetworkSource): @@ -3885,13 +3919,13 @@ class IpFilter(proto.Message): orgs different than the bucket's parent org to access the bucket. When set to true, validations on the existence of the VPCs won't be performed. - If set to false, each VPC network source will be + If set to false, each VPC network source is checked to belong to the same org as the bucket as well as validated for existence. allow_all_service_agent_access (bool): Whether or not to allow all P4SA access to the bucket. When set to true, IP filter config - validation will not apply. + validation doesn't apply. This field is a member of `oneof`_ ``_allow_all_service_agent_access``. """ @@ -4168,20 +4202,18 @@ class BucketAccessControl(proto.Message): - All members of the Google Apps for Business domain ``example.com`` would be ``domain-example.com`` For project entities, ``project-{team}-{projectnumber}`` - format will be returned on response. + format is returned on response. entity_alt (str): Output only. The alternative entity format, if exists. For - project entities, ``project-{team}-{projectid}`` format will - be returned on response. + project entities, ``project-{team}-{projectid}`` format is + returned in the response. entity_id (str): Optional. The ID for the entity, if any. etag (str): - Optional. The etag of the - BucketAccessControl. If included in the metadata - of an update or delete request message, the - operation operation will only be performed if - the etag matches that of the bucket's - BucketAccessControl. + Optional. The ``etag`` of the ``BucketAccessControl``. If + included in the metadata of an update or delete request + message, the operation operation is only performed if the + etag matches that of the bucket's ``BucketAccessControl``. email (str): Optional. The email address associated with the entity, if any. @@ -4272,18 +4304,18 @@ class ObjectChecksums(proto.Message): CRC32C digest of the object data. Computed by the Cloud Storage service for all written objects. If set in a WriteObjectRequest, service - will validate that the stored object matches - this checksum. + validates that the stored object matches this + checksum. This field is a member of `oneof`_ ``_crc32c``. md5_hash (bytes): Optional. 128 bit MD5 hash of the object data. For more - information about using the MD5 hash, see - [https://cloud.google.com/storage/docs/hashes-etags#json-api][Hashes - and ETags: Best Practices]. Not all objects will provide an - MD5 hash. For example, composite objects provide only crc32c - hashes. This value is equivalent to running - ``cat object.txt | openssl md5 -binary`` + information about using the MD5 hash, see `Data validation + and change + detection `__. + Not all objects provide an MD5 hash. For example, composite + objects provide only crc32c hashes. This value is equivalent + to running ``cat object.txt | openssl md5 -binary`` """ crc32c: int = proto.Field( @@ -4344,8 +4376,8 @@ class ObjectContexts(proto.Message): class CustomerEncryption(proto.Message): - r"""Describes the Customer-Supplied Encryption Key mechanism used - to store an Object's data at rest. + r"""Describes the customer-supplied encryption key mechanism used + to store an object's data at rest. Attributes: encryption_algorithm (str): @@ -4383,10 +4415,9 @@ class Object(proto.Message): Immutable. The name of the bucket containing this object. etag (str): - Optional. The etag of the object. - If included in the metadata of an update or - delete request message, the operation will only - be performed if the etag matches that of the + Optional. The ``etag`` of an object. If included in the + metadata of an update or delete request message, the + operation is only performed if the etag matches that of the live object. generation (int): Immutable. The content generation of this @@ -4409,32 +4440,30 @@ class Object(proto.Message): Optional. Storage class of the object. size (int): Output only. Content-Length of the object data in bytes, - matching - [https://tools.ietf.org/html/rfc7230#section-3.3.2][RFC 7230 - §3.3.2]. + matching `RFC 7230 + §3.3.2 `__. content_encoding (str): - Optional. Content-Encoding of the object data, matching - [https://tools.ietf.org/html/rfc7231#section-3.1.2.2][RFC - 7231 §3.1.2.2] + Optional. Content-Encoding of the object data, matching `RFC + 7231 + §3.1.2.2 `__ content_disposition (str): Optional. Content-Disposition of the object data, matching - [https://tools.ietf.org/html/rfc6266][RFC 6266]. + `RFC 6266 `__. cache_control (str): Optional. Cache-Control directive for the object data, - matching - [https://tools.ietf.org/html/rfc7234#section-5.2"][RFC 7234 - §5.2]. If omitted, and the object is accessible to all - anonymous users, the default will be - ``public, max-age=3600``. + matching `RFC 7234 + §5.2 `__. + If omitted, and the object is accessible to all anonymous + users, the default is ``public, max-age=3600``. acl (MutableSequence[google.cloud._storage_v2.types.ObjectAccessControl]): Optional. Access controls on the object. If - iam_config.uniform_bucket_level_access is enabled on the + ``iam_config.uniform_bucket_level_access`` is enabled on the parent bucket, requests to set, read, or modify acl is an error. content_language (str): - Optional. Content-Language of the object data, matching - [https://tools.ietf.org/html/rfc7231#section-3.1.3.2][RFC - 7231 §3.1.3.2]. + Optional. Content-Language of the object data, matching `RFC + 7231 + §3.1.3.2 `__. delete_time (google.protobuf.timestamp_pb2.Timestamp): Output only. If this object is noncurrent, this is the time when the object became @@ -4443,10 +4472,11 @@ class Object(proto.Message): Output only. The time when the object was finalized. content_type (str): - Optional. Content-Type of the object data, matching - [https://tools.ietf.org/html/rfc7231#section-3.1.1.5][RFC - 7231 §3.1.1.5]. If an object is stored without a - Content-Type, it is served as ``application/octet-stream``. + Optional. Content-Type of the object data, matching `RFC + 7231 + §3.1.1.5 `__. + If an object is stored without a Content-Type, it is served + as ``application/octet-stream``. create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. The creation time of the object. component_count (int): @@ -4456,13 +4486,12 @@ class Object(proto.Message): checksums (google.cloud._storage_v2.types.ObjectChecksums): Output only. Hashes for the data part of this object. This field is used for output only and - will be silently ignored if provided in - requests. The checksums of the complete object - regardless of data range. If the object is - downloaded in full, the client should compute - one of these checksums over the downloaded - object and compare it against the value provided - here. + is silently ignored if provided in requests. The + checksums of the complete object regardless of + data range. If the object is downloaded in full, + the client should compute one of these checksums + over the downloaded object and compare it + against the value provided here. update_time (google.protobuf.timestamp_pb2.Timestamp): Output only. The modification time of the object metadata. Set initially to object @@ -4480,7 +4509,7 @@ class Object(proto.Message): update_storage_class_time (google.protobuf.timestamp_pb2.Timestamp): Output only. The time at which the object's storage class was last changed. When the object is initially created, it - will be set to time_created. + is set to ``time_created``. temporary_hold (bool): Optional. Whether an object is under temporary hold. While this flag is set to true, @@ -4515,24 +4544,24 @@ class Object(proto.Message): Whether an object is under event-based hold. An event-based hold is a way to force the retention of an object until after some event occurs. Once the hold is released by - explicitly setting this field to false, the object will - become subject to any bucket-level retention policy, except - that the retention duration will be calculated from the time - the event based hold was lifted, rather than the time the - object was created. + explicitly setting this field to ``false``, the object + becomes subject to any bucket-level retention policy, except + that the retention duration is calculated from the time the + event based hold was lifted, rather than the time the object + was created. - In a WriteObject request, not setting this field implies + In a ``WriteObject`` request, not setting this field implies that the value should be taken from the parent bucket's - "default_event_based_hold" field. In a response, this field - will always be set to true or false. + ``default_event_based_hold`` field. In a response, this + field is always set to ``true`` or ``false``. This field is a member of `oneof`_ ``_event_based_hold``. owner (google.cloud._storage_v2.types.Owner): - Output only. The owner of the object. This - will always be the uploader of the object. + Output only. The owner of the object. This is + always the uploader of the object. customer_encryption (google.cloud._storage_v2.types.CustomerEncryption): - Optional. Metadata of Customer-Supplied - Encryption Key, if the object is encrypted by + Optional. Metadata of customer-supplied + encryption key, if the object is encrypted by such a key. custom_time (google.protobuf.timestamp_pb2.Timestamp): Optional. A user-specified timestamp set on @@ -4542,22 +4571,23 @@ class Object(proto.Message): soft-deleted. Soft-deleted objects are only accessible if a - soft_delete_policy is enabled. Also see hard_delete_time. + soft_delete_policy is enabled. Also see + ``hard_delete_time``. This field is a member of `oneof`_ ``_soft_delete_time``. hard_delete_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. The time when the object will be permanently + Output only. The time when the object is permanently deleted. Only set when an object becomes soft-deleted with a - soft_delete_policy. Otherwise, the object will not be + ``soft_delete_policy``. Otherwise, the object is not accessible. This field is a member of `oneof`_ ``_hard_delete_time``. retention (google.cloud._storage_v2.types.Object.Retention): Optional. Retention configuration of this - object. May only be configured if the bucket has - object retention enabled. + object. Might only be configured if the bucket + has object retention enabled. """ class Retention(proto.Message): @@ -4582,11 +4612,11 @@ class Mode(proto.Enum): No specified mode. Object is not under retention. UNLOCKED (1): - Retention period may be decreased or - increased. The Retention configuration may be - removed. The mode may be changed to locked. + Retention period might be decreased or + increased. The Retention configuration might be + removed. The mode might be changed to locked. LOCKED (2): - Retention period may be increased. + Retention period might be increased. The Retention configuration cannot be removed. The mode cannot be changed. """ @@ -4792,20 +4822,19 @@ class ObjectAccessControl(proto.Message): - All members of the Google Apps for Business domain ``example.com`` would be ``domain-example.com``. For project entities, ``project-{team}-{projectnumber}`` - format will be returned on response. + format is returned in the response. entity_alt (str): Output only. The alternative entity format, if exists. For - project entities, ``project-{team}-{projectid}`` format will - be returned on response. + project entities, ``project-{team}-{projectid}`` format is + returned in the response. entity_id (str): Optional. The ID for the entity, if any. etag (str): Optional. The etag of the ObjectAccessControl. If included in the metadata of an update or delete request message, the - operation will only be performed if the etag - matches that of the live object's - ObjectAccessControl. + operation is only performed if the etag matches + that of the live object's ObjectAccessControl. email (str): Optional. The email address associated with the entity, if any. diff --git a/tests/unit/gapic/storage_v2/test_storage.py b/tests/unit/gapic/storage_v2/test_storage.py index e6344192b..20b680341 100644 --- a/tests/unit/gapic/storage_v2/test_storage.py +++ b/tests/unit/gapic/storage_v2/test_storage.py @@ -1935,6 +1935,7 @@ def test_list_buckets(request_type, transport: str = "grpc"): # Designate an appropriate return value for the call. call.return_value = storage.ListBucketsResponse( next_page_token="next_page_token_value", + unreachable=["unreachable_value"], ) response = client.list_buckets(request) @@ -1947,6 +1948,7 @@ def test_list_buckets(request_type, transport: str = "grpc"): # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListBucketsPager) assert response.next_page_token == "next_page_token_value" + assert response.unreachable == ["unreachable_value"] def test_list_buckets_non_empty_request_with_auto_populated_field(): @@ -2077,6 +2079,7 @@ async def test_list_buckets_async( call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( storage.ListBucketsResponse( next_page_token="next_page_token_value", + unreachable=["unreachable_value"], ) ) response = await client.list_buckets(request) @@ -2090,6 +2093,7 @@ async def test_list_buckets_async( # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListBucketsAsyncPager) assert response.next_page_token == "next_page_token_value" + assert response.unreachable == ["unreachable_value"] @pytest.mark.asyncio @@ -9281,6 +9285,7 @@ async def test_list_buckets_empty_call_grpc_asyncio(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( storage.ListBucketsResponse( next_page_token="next_page_token_value", + unreachable=["unreachable_value"], ) ) await client.list_buckets(request=None) @@ -10018,6 +10023,7 @@ async def test_list_buckets_routing_parameters_request_1_grpc_asyncio(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( storage.ListBucketsResponse( next_page_token="next_page_token_value", + unreachable=["unreachable_value"], ) ) await client.list_buckets(request={"parent": "sample1"})