From 3ec6076be6b6f4029379fb5ac8da8e703da96cad Mon Sep 17 00:00:00 2001 From: Noah Meyerhans Date: Tue, 2 Feb 2021 14:32:12 -0800 Subject: [PATCH] New upstream version 1.20.0+repack --- PKG-INFO | 12 +- README.rst | 4 +- botocore/__init__.py | 2 +- botocore/awsrequest.py | 19 +- .../2016-02-06/service-2.json | 61 ++--- .../data/appmesh/2019-01-25/service-2.json | 217 +++++++++++++++++- .../iotwireless/2020-11-22/service-2.json | 13 +- .../data/location/2020-11-19/service-2.json | 2 +- .../lookoutvision/2020-11-20/service-2.json | 215 +++++++++++++++-- .../data/medialive/2017-10-14/service-2.json | 15 +- .../organizations/2016-11-28/service-2.json | 26 +-- .../data/rds-data/2018-08-01/service-2.json | 25 +- .../data/route53/2013-04-01/service-2.json | 82 +++---- .../data/s3control/2018-08-20/service-2.json | 32 ++- botocore/utils.py | 80 +++---- docs/source/_static/shortbreadv1.js | 2 - docs/source/conf.py | 4 +- setup.cfg | 3 +- setup.py | 3 +- tests/functional/test_s3.py | 40 +++- tests/functional/test_s3_control_redirects.py | 14 +- tests/unit/test_utils.py | 24 +- 22 files changed, 680 insertions(+), 215 deletions(-) delete mode 100644 docs/source/_static/shortbreadv1.js diff --git a/PKG-INFO b/PKG-INFO index 6c803d1f..96016f95 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,10 +1,9 @@ -Metadata-Version: 1.1 +Metadata-Version: 1.2 Name: botocore -Version: 1.19.63 +Version: 1.20.0 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services -Author-email: UNKNOWN License: Apache License 2.0 Description: botocore ======== @@ -22,9 +21,9 @@ Description: botocore `boto3 `__. On 10/29/2020 deprecation for Python 3.4 and Python 3.5 was announced and support - will be dropped on 02/01/2021. To avoid disruption, customers using Botocore + was dropped on 02/01/2021. To avoid disruption, customers using Botocore on Python 3.4 or 3.5 may need to upgrade their version of Python or pin the - version of Botocore in use prior to 02/01/2021. For more information, see + version of Botocore. For more information, see this `blog post `__. Getting Started @@ -125,8 +124,7 @@ Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 +Requires-Python: >= 2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.* diff --git a/README.rst b/README.rst index 357d064f..feda28de 100644 --- a/README.rst +++ b/README.rst @@ -14,9 +14,9 @@ botocore package is the foundation for the `boto3 `__. On 10/29/2020 deprecation for Python 3.4 and Python 3.5 was announced and support -will be dropped on 02/01/2021. To avoid disruption, customers using Botocore +was dropped on 02/01/2021. To avoid disruption, customers using Botocore on Python 3.4 or 3.5 may need to upgrade their version of Python or pin the -version of Botocore in use prior to 02/01/2021. For more information, see +version of Botocore. For more information, see this `blog post `__. Getting Started diff --git a/botocore/__init__.py b/botocore/__init__.py index 3a86aeed..ca5ea32f 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re import logging -__version__ = '1.19.63' +__version__ = '1.20.0' class NullHandler(logging.Handler): diff --git a/botocore/awsrequest.py b/botocore/awsrequest.py index 535b91be..f47f0cc0 100644 --- a/botocore/awsrequest.py +++ b/botocore/awsrequest.py @@ -11,6 +11,7 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. +import io import sys import logging import functools @@ -404,12 +405,18 @@ class AWSRequestPreparer(object): # Try getting the length from a seekable stream if hasattr(body, 'seek') and hasattr(body, 'tell'): - orig_pos = body.tell() - body.seek(0, 2) - end_file_pos = body.tell() - body.seek(orig_pos) - return end_file_pos - orig_pos - + try: + orig_pos = body.tell() + body.seek(0, 2) + end_file_pos = body.tell() + body.seek(orig_pos) + return end_file_pos - orig_pos + except io.UnsupportedOperation: + # in case when body is, for example, io.BufferedIOBase object + # it has "seek" method which throws "UnsupportedOperation" + # exception in such case we want to fall back to "chunked" + # encoding + pass # Failed to determine the length return None diff --git a/botocore/data/application-autoscaling/2016-02-06/service-2.json b/botocore/data/application-autoscaling/2016-02-06/service-2.json index f5639fed..9c62487e 100644 --- a/botocore/data/application-autoscaling/2016-02-06/service-2.json +++ b/botocore/data/application-autoscaling/2016-02-06/service-2.json @@ -27,7 +27,7 @@ {"shape":"ConcurrentUpdateException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Deletes the specified scaling policy for an Application Auto Scaling scalable target.

Deleting a step scaling policy deletes the underlying alarm action, but does not delete the CloudWatch alarm associated with the scaling policy, even if it no longer has an associated action.

For more information, see Delete a Step Scaling Policy and Delete a Target Tracking Scaling Policy in the Application Auto Scaling User Guide.

" + "documentation":"

Deletes the specified scaling policy for an Application Auto Scaling scalable target.

Deleting a step scaling policy deletes the underlying alarm action, but does not delete the CloudWatch alarm associated with the scaling policy, even if it no longer has an associated action.

For more information, see Delete a step scaling policy and Delete a target tracking scaling policy in the Application Auto Scaling User Guide.

" }, "DeleteScheduledAction":{ "name":"DeleteScheduledAction", @@ -43,7 +43,7 @@ {"shape":"ConcurrentUpdateException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Deletes the specified scheduled action for an Application Auto Scaling scalable target.

For more information, see Delete a Scheduled Action in the Application Auto Scaling User Guide.

" + "documentation":"

Deletes the specified scheduled action for an Application Auto Scaling scalable target.

For more information, see Delete a scheduled action in the Application Auto Scaling User Guide.

" }, "DeregisterScalableTarget":{ "name":"DeregisterScalableTarget", @@ -108,7 +108,7 @@ {"shape":"ConcurrentUpdateException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Describes the Application Auto Scaling scaling policies for the specified service namespace.

You can filter the results using ResourceId, ScalableDimension, and PolicyNames.

For more information, see Target Tracking Scaling Policies and Step Scaling Policies in the Application Auto Scaling User Guide.

" + "documentation":"

Describes the Application Auto Scaling scaling policies for the specified service namespace.

You can filter the results using ResourceId, ScalableDimension, and PolicyNames.

For more information, see Target tracking scaling policies and Step scaling policies in the Application Auto Scaling User Guide.

" }, "DescribeScheduledActions":{ "name":"DescribeScheduledActions", @@ -124,7 +124,7 @@ {"shape":"ConcurrentUpdateException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Describes the Application Auto Scaling scheduled actions for the specified service namespace.

You can filter the results using the ResourceId, ScalableDimension, and ScheduledActionNames parameters.

For more information, see Scheduled Scaling in the Application Auto Scaling User Guide.

" + "documentation":"

Describes the Application Auto Scaling scheduled actions for the specified service namespace.

You can filter the results using the ResourceId, ScalableDimension, and ScheduledActionNames parameters.

For more information, see Scheduled scaling and Managing scheduled scaling in the Application Auto Scaling User Guide.

" }, "PutScalingPolicy":{ "name":"PutScalingPolicy", @@ -142,7 +142,7 @@ {"shape":"FailedResourceAccessException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Creates or updates a scaling policy for an Application Auto Scaling scalable target.

Each scalable target is identified by a service namespace, resource ID, and scalable dimension. A scaling policy applies to the scalable target identified by those three attributes. You cannot create a scaling policy until you have registered the resource as a scalable target.

Multiple scaling policies can be in force at the same time for the same scalable target. You can have one or more target tracking scaling policies, one or more step scaling policies, or both. However, there is a chance that multiple policies could conflict, instructing the scalable target to scale out or in at the same time. Application Auto Scaling gives precedence to the policy that provides the largest capacity for both scale out and scale in. For example, if one policy increases capacity by 3, another policy increases capacity by 200 percent, and the current capacity is 10, Application Auto Scaling uses the policy with the highest calculated capacity (200% of 10 = 20) and scales out to 30.

We recommend caution, however, when using target tracking scaling policies with step scaling policies because conflicts between these policies can cause undesirable behavior. For example, if the step scaling policy initiates a scale-in activity before the target tracking policy is ready to scale in, the scale-in activity will not be blocked. After the scale-in activity completes, the target tracking policy could instruct the scalable target to scale out again.

For more information, see Target Tracking Scaling Policies and Step Scaling Policies in the Application Auto Scaling User Guide.

If a scalable target is deregistered, the scalable target is no longer available to execute scaling policies. Any scaling policies that were specified for the scalable target are deleted.

" + "documentation":"

Creates or updates a scaling policy for an Application Auto Scaling scalable target.

Each scalable target is identified by a service namespace, resource ID, and scalable dimension. A scaling policy applies to the scalable target identified by those three attributes. You cannot create a scaling policy until you have registered the resource as a scalable target.

Multiple scaling policies can be in force at the same time for the same scalable target. You can have one or more target tracking scaling policies, one or more step scaling policies, or both. However, there is a chance that multiple policies could conflict, instructing the scalable target to scale out or in at the same time. Application Auto Scaling gives precedence to the policy that provides the largest capacity for both scale out and scale in. For example, if one policy increases capacity by 3, another policy increases capacity by 200 percent, and the current capacity is 10, Application Auto Scaling uses the policy with the highest calculated capacity (200% of 10 = 20) and scales out to 30.

We recommend caution, however, when using target tracking scaling policies with step scaling policies because conflicts between these policies can cause undesirable behavior. For example, if the step scaling policy initiates a scale-in activity before the target tracking policy is ready to scale in, the scale-in activity will not be blocked. After the scale-in activity completes, the target tracking policy could instruct the scalable target to scale out again.

For more information, see Target tracking scaling policies and Step scaling policies in the Application Auto Scaling User Guide.

If a scalable target is deregistered, the scalable target is no longer available to execute scaling policies. Any scaling policies that were specified for the scalable target are deleted.

" }, "PutScheduledAction":{ "name":"PutScheduledAction", @@ -159,7 +159,7 @@ {"shape":"ConcurrentUpdateException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Creates or updates a scheduled action for an Application Auto Scaling scalable target.

Each scalable target is identified by a service namespace, resource ID, and scalable dimension. A scheduled action applies to the scalable target identified by those three attributes. You cannot create a scheduled action until you have registered the resource as a scalable target.

When start and end times are specified with a recurring schedule using a cron expression or rates, they form the boundaries of when the recurring action starts and stops.

To update a scheduled action, specify the parameters that you want to change. If you don't specify start and end times, the old values are deleted.

For more information, see Scheduled Scaling in the Application Auto Scaling User Guide.

If a scalable target is deregistered, the scalable target is no longer available to run scheduled actions. Any scheduled actions that were specified for the scalable target are deleted.

" + "documentation":"

Creates or updates a scheduled action for an Application Auto Scaling scalable target.

Each scalable target is identified by a service namespace, resource ID, and scalable dimension. A scheduled action applies to the scalable target identified by those three attributes. You cannot create a scheduled action until you have registered the resource as a scalable target.

When start and end times are specified with a recurring schedule using a cron expression or rates, they form the boundaries for when the recurring action starts and stops.

To update a scheduled action, specify the parameters that you want to change. If you don't specify start and end times, the old values are deleted.

For more information, see Scheduled scaling in the Application Auto Scaling User Guide.

If a scalable target is deregistered, the scalable target is no longer available to run scheduled actions. Any scheduled actions that were specified for the scalable target are deleted.

" }, "RegisterScalableTarget":{ "name":"RegisterScalableTarget", @@ -350,7 +350,7 @@ }, "ResourceIds":{ "shape":"ResourceIdsMaxLen1600", - "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

" + "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

" }, "ScalableDimension":{ "shape":"ScalableDimension", @@ -389,7 +389,7 @@ }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

" + "documentation":"

The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

" }, "ScalableDimension":{ "shape":"ScalableDimension", @@ -432,7 +432,7 @@ }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

" + "documentation":"

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

" }, "ScalableDimension":{ "shape":"ScalableDimension", @@ -475,7 +475,7 @@ }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

" + "documentation":"

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

  • Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.

" }, "ScalableDimension":{ "shape":"ScalableDimension", @@ -535,7 +535,7 @@ "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"

A per-account resource limit is exceeded. For more information, see Application Auto Scaling Limits.

", + "documentation":"

A per-account resource limit is exceeded. For more information, see Application Auto Scaling service quotas.

", "exception":true }, "MaxResults":{"type":"integer"}, @@ -642,7 +642,7 @@ "documentation":"

Identifies the resource associated with the metric type. You can't specify a resource label unless the metric type is ALBRequestCountPerTarget and there is a target group attached to the Spot Fleet request or ECS service.

You create the resource label by appending the final portion of the load balancer ARN and the final portion of the target group ARN into a single value, separated by a forward slash (/). The format is app/<load-balancer-name>/<load-balancer-id>/targetgroup/<target-group-name>/<target-group-id>, where:

  • app/<load-balancer-name>/<load-balancer-id> is the final portion of the load balancer ARN

  • targetgroup/<target-group-name>/<target-group-id> is the final portion of the target group ARN.

This is an example: app/EC2Co-EcsEl-1TKLTMITMM0EO/f37c06a68c1748aa/targetgroup/EC2Co-Defau-LDNM7Q3ZH1ZN/6d4ea56ca2d6a18d.

To find the ARN for an Application Load Balancer, use the DescribeLoadBalancers API operation. To find the ARN for the target group, use the DescribeTargetGroups API operation.

" } }, - "documentation":"

Represents a predefined metric for a target tracking scaling policy to use with Application Auto Scaling.

Only the AWS services that you're using send metrics to Amazon CloudWatch. To determine whether a desired metric already exists by looking up its namespace and dimension using the CloudWatch metrics dashboard in the console, follow the procedure in Building Dashboards with CloudWatch in the Application Auto Scaling User Guide.

" + "documentation":"

Represents a predefined metric for a target tracking scaling policy to use with Application Auto Scaling.

Only the AWS services that you're using send metrics to Amazon CloudWatch. To determine whether a desired metric already exists by looking up its namespace and dimension using the CloudWatch metrics dashboard in the console, follow the procedure in Building dashboards with CloudWatch in the Application Auto Scaling User Guide.

" }, "PutScalingPolicyRequest":{ "type":"structure", @@ -671,7 +671,7 @@ }, "PolicyType":{ "shape":"PolicyType", - "documentation":"

The policy type. This parameter is required if you are creating a scaling policy.

The following policy types are supported:

TargetTrackingScaling—Not supported for Amazon EMR

StepScaling—Not supported for DynamoDB, Amazon Comprehend, Lambda, Amazon Keyspaces (for Apache Cassandra), or Amazon MSK.

For more information, see Target Tracking Scaling Policies and Step Scaling Policies in the Application Auto Scaling User Guide.

" + "documentation":"

The policy type. This parameter is required if you are creating a scaling policy.

The following policy types are supported:

TargetTrackingScaling—Not supported for Amazon EMR

StepScaling—Not supported for DynamoDB, Amazon Comprehend, Lambda, Amazon Keyspaces (for Apache Cassandra), or Amazon MSK.

For more information, see Target tracking scaling policies and Step scaling policies in the Application Auto Scaling User Guide.

" }, "StepScalingPolicyConfiguration":{ "shape":"StepScalingPolicyConfiguration", @@ -712,7 +712,11 @@ }, "Schedule":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The schedule for this action. The following formats are supported:

  • At expressions - \"at(yyyy-mm-ddThh:mm:ss)\"

  • Rate expressions - \"rate(value unit)\"

  • Cron expressions - \"cron(fields)\"

At expressions are useful for one-time schedules. Specify the time in UTC.

For rate expressions, value is a positive integer and unit is minute | minutes | hour | hours | day | days.

For cron expressions, fields is a cron expression. The supported cron format consists of six fields separated by white spaces: [Minutes] [Hours] [Day_of_Month] [Month] [Day_of_Week] [Year].

For more information and examples, see Scheduled Scaling in the Application Auto Scaling User Guide.

" + "documentation":"

The schedule for this action. The following formats are supported:

  • At expressions - \"at(yyyy-mm-ddThh:mm:ss)\"

  • Rate expressions - \"rate(value unit)\"

  • Cron expressions - \"cron(fields)\"

At expressions are useful for one-time schedules. Cron expressions are useful for scheduled actions that run periodically at a specified date and time, and rate expressions are useful for scheduled actions that run at a regular interval.

At and cron expressions use Universal Coordinated Time (UTC) by default.

The cron format consists of six fields separated by white spaces: [Minutes] [Hours] [Day_of_Month] [Month] [Day_of_Week] [Year].

For rate expressions, value is a positive integer and unit is minute | minutes | hour | hours | day | days.

For more information and examples, see Example scheduled actions for Application Auto Scaling in the Application Auto Scaling User Guide.

" + }, + "Timezone":{ + "shape":"ResourceIdMaxLen1600", + "documentation":"

Specifies the time zone used when setting a scheduled action by using an at or cron expression. If a time zone is not provided, UTC is used by default.

Valid values are the canonical names of the IANA time zones supported by Joda-Time (such as Etc/GMT+9 or Pacific/Tahiti). For more information, see https://www.joda.org/joda-time/timezones.html.

" }, "ScheduledActionName":{ "shape":"ScheduledActionName", @@ -728,11 +732,11 @@ }, "StartTime":{ "shape":"TimestampType", - "documentation":"

The date and time for this scheduled action to start.

" + "documentation":"

The date and time for this scheduled action to start, in UTC.

" }, "EndTime":{ "shape":"TimestampType", - "documentation":"

The date and time for the recurring schedule to end.

" + "documentation":"

The date and time for the recurring schedule to end, in UTC.

" }, "ScalableTargetAction":{ "shape":"ScalableTargetAction", @@ -775,11 +779,11 @@ }, "RoleARN":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

This parameter is required for services that do not support service-linked roles (such as Amazon EMR), and it must specify the ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf.

If the service supports service-linked roles, Application Auto Scaling uses a service-linked role, which it creates if it does not yet exist. For more information, see Application Auto Scaling IAM Roles.

" + "documentation":"

This parameter is required for services that do not support service-linked roles (such as Amazon EMR), and it must specify the ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf.

If the service supports service-linked roles, Application Auto Scaling uses a service-linked role, which it creates if it does not yet exist. For more information, see Application Auto Scaling IAM roles.

" }, "SuspendedState":{ "shape":"SuspendedState", - "documentation":"

An embedded object that contains attributes and attribute values that are used to suspend and resume automatic scaling. Setting the value of an attribute to true suspends the specified scaling activities. Setting it to false (default) resumes the specified scaling activities.

Suspension Outcomes

  • For DynamicScalingInSuspended, while a suspension is in effect, all scale-in activities that are triggered by a scaling policy are suspended.

  • For DynamicScalingOutSuspended, while a suspension is in effect, all scale-out activities that are triggered by a scaling policy are suspended.

  • For ScheduledScalingSuspended, while a suspension is in effect, all scaling activities that involve scheduled actions are suspended.

For more information, see Suspending and Resuming Scaling in the Application Auto Scaling User Guide.

" + "documentation":"

An embedded object that contains attributes and attribute values that are used to suspend and resume automatic scaling. Setting the value of an attribute to true suspends the specified scaling activities. Setting it to false (default) resumes the specified scaling activities.

Suspension Outcomes

  • For DynamicScalingInSuspended, while a suspension is in effect, all scale-in activities that are triggered by a scaling policy are suspended.

  • For DynamicScalingOutSuspended, while a suspension is in effect, all scale-out activities that are triggered by a scaling policy are suspended.

  • For ScheduledScalingSuspended, while a suspension is in effect, all scaling activities that involve scheduled actions are suspended.

For more information, see Suspending and resuming scaling in the Application Auto Scaling User Guide.

" } } }, @@ -801,7 +805,8 @@ }, "ResourceIdsMaxLen1600":{ "type":"list", - "member":{"shape":"ResourceIdMaxLen1600"} + "member":{"shape":"ResourceIdMaxLen1600"}, + "max":50 }, "ResourceLabel":{ "type":"string", @@ -1053,7 +1058,11 @@ }, "Schedule":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The schedule for this action. The following formats are supported:

  • At expressions - \"at(yyyy-mm-ddThh:mm:ss)\"

  • Rate expressions - \"rate(value unit)\"

  • Cron expressions - \"cron(fields)\"

At expressions are useful for one-time schedules. Specify the time in UTC.

For rate expressions, value is a positive integer and unit is minute | minutes | hour | hours | day | days.

For cron expressions, fields is a cron expression. The supported cron format consists of six fields separated by white spaces: [Minutes] [Hours] [Day_of_Month] [Month] [Day_of_Week] [Year].

For more information and examples, see Scheduled Scaling in the Application Auto Scaling User Guide.

" + "documentation":"

The schedule for this action. The following formats are supported:

  • At expressions - \"at(yyyy-mm-ddThh:mm:ss)\"

  • Rate expressions - \"rate(value unit)\"

  • Cron expressions - \"cron(fields)\"

At expressions are useful for one-time schedules. Cron expressions are useful for scheduled actions that run periodically at a specified date and time, and rate expressions are useful for scheduled actions that run at a regular interval.

At and cron expressions use Universal Coordinated Time (UTC) by default.

The cron format consists of six fields separated by white spaces: [Minutes] [Hours] [Day_of_Month] [Month] [Day_of_Week] [Year].

For rate expressions, value is a positive integer and unit is minute | minutes | hour | hours | day | days.

For more information and examples, see Example scheduled actions for Application Auto Scaling in the Application Auto Scaling User Guide.

" + }, + "Timezone":{ + "shape":"ResourceIdMaxLen1600", + "documentation":"

The time zone used when referring to the date and time of a scheduled action, when the scheduled action uses an at or cron expression.

" }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", @@ -1065,11 +1074,11 @@ }, "StartTime":{ "shape":"TimestampType", - "documentation":"

The date and time that the action is scheduled to begin.

" + "documentation":"

The date and time that the action is scheduled to begin, in UTC.

" }, "EndTime":{ "shape":"TimestampType", - "documentation":"

The date and time that the action is scheduled to end.

" + "documentation":"

The date and time that the action is scheduled to end, in UTC.

" }, "ScalableTargetAction":{ "shape":"ScalableTargetAction", @@ -1149,7 +1158,7 @@ }, "Cooldown":{ "shape":"Cooldown", - "documentation":"

The amount of time, in seconds, to wait for a previous scaling activity to take effect.

With scale-out policies, the intention is to continuously (but not excessively) scale out. After Application Auto Scaling successfully scales out using a step scaling policy, it starts to calculate the cooldown time. The scaling policy won't increase the desired capacity again unless either a larger scale out is triggered or the cooldown period ends. While the cooldown period is in effect, capacity added by the initiating scale-out activity is calculated as part of the desired capacity for the next scale-out activity. For example, when an alarm triggers a step scaling policy to increase the capacity by 2, the scaling activity completes successfully, and a cooldown period starts. If the alarm triggers again during the cooldown period but at a more aggressive step adjustment of 3, the previous increase of 2 is considered part of the current capacity. Therefore, only 1 is added to the capacity.

With scale-in policies, the intention is to scale in conservatively to protect your application’s availability, so scale-in activities are blocked until the cooldown period has expired. However, if another alarm triggers a scale-out activity during the cooldown period after a scale-in activity, Application Auto Scaling scales out the target immediately. In this case, the cooldown period for the scale-in activity stops and doesn't complete.

Application Auto Scaling provides a default value of 300 for the following scalable targets:

  • ECS services

  • Spot Fleet requests

  • EMR clusters

  • AppStream 2.0 fleets

  • Aurora DB clusters

  • Amazon SageMaker endpoint variants

  • Custom resources

For all other scalable targets, the default value is 0:

  • DynamoDB tables

  • DynamoDB global secondary indexes

  • Amazon Comprehend document classification and entity recognizer endpoints

  • Lambda provisioned concurrency

  • Amazon Keyspaces tables

  • Amazon MSK cluster storage

" + "documentation":"

The amount of time, in seconds, to wait for a previous scaling activity to take effect.

With scale-out policies, the intention is to continuously (but not excessively) scale out. After Application Auto Scaling successfully scales out using a step scaling policy, it starts to calculate the cooldown time. The scaling policy won't increase the desired capacity again unless either a larger scale out is triggered or the cooldown period ends. While the cooldown period is in effect, capacity added by the initiating scale-out activity is calculated as part of the desired capacity for the next scale-out activity. For example, when an alarm triggers a step scaling policy to increase the capacity by 2, the scaling activity completes successfully, and a cooldown period starts. If the alarm triggers again during the cooldown period but at a more aggressive step adjustment of 3, the previous increase of 2 is considered part of the current capacity. Therefore, only 1 is added to the capacity.

With scale-in policies, the intention is to scale in conservatively to protect your application’s availability, so scale-in activities are blocked until the cooldown period has expired. However, if another alarm triggers a scale-out activity during the cooldown period after a scale-in activity, Application Auto Scaling scales out the target immediately. In this case, the cooldown period for the scale-in activity stops and doesn't complete.

Application Auto Scaling provides a default value of 300 for the following scalable targets:

  • ECS services

  • Spot Fleet requests

  • EMR clusters

  • AppStream 2.0 fleets

  • Aurora DB clusters

  • Amazon SageMaker endpoint variants

  • Custom resources

For all other scalable targets, the default value is 0:

  • DynamoDB tables

  • DynamoDB global secondary indexes

  • Amazon Comprehend document classification and entity recognizer endpoints

  • Lambda provisioned concurrency

  • Amazon Keyspaces tables

  • Amazon MSK broker storage

" }, "MetricAggregationType":{ "shape":"MetricAggregationType", @@ -1194,11 +1203,11 @@ }, "ScaleOutCooldown":{ "shape":"Cooldown", - "documentation":"

The amount of time, in seconds, to wait for a previous scale-out activity to take effect.

With the scale-out cooldown period, the intention is to continuously (but not excessively) scale out. After Application Auto Scaling successfully scales out using a target tracking scaling policy, it starts to calculate the cooldown time. The scaling policy won't increase the desired capacity again unless either a larger scale out is triggered or the cooldown period ends. While the cooldown period is in effect, the capacity added by the initiating scale-out activity is calculated as part of the desired capacity for the next scale-out activity.

Application Auto Scaling provides a default value of 300 for the following scalable targets:

  • ECS services

  • Spot Fleet requests

  • EMR clusters

  • AppStream 2.0 fleets

  • Aurora DB clusters

  • Amazon SageMaker endpoint variants

  • Custom resources

For all other scalable targets, the default value is 0:

  • DynamoDB tables

  • DynamoDB global secondary indexes

  • Amazon Comprehend document classification and entity recognizer endpoints

  • Lambda provisioned concurrency

  • Amazon Keyspaces tables

  • Amazon MSK cluster storage

" + "documentation":"

The amount of time, in seconds, to wait for a previous scale-out activity to take effect.

With the scale-out cooldown period, the intention is to continuously (but not excessively) scale out. After Application Auto Scaling successfully scales out using a target tracking scaling policy, it starts to calculate the cooldown time. The scaling policy won't increase the desired capacity again unless either a larger scale out is triggered or the cooldown period ends. While the cooldown period is in effect, the capacity added by the initiating scale-out activity is calculated as part of the desired capacity for the next scale-out activity.

Application Auto Scaling provides a default value of 300 for the following scalable targets:

  • ECS services

  • Spot Fleet requests

  • EMR clusters

  • AppStream 2.0 fleets

  • Aurora DB clusters

  • Amazon SageMaker endpoint variants

  • Custom resources

For all other scalable targets, the default value is 0:

  • DynamoDB tables

  • DynamoDB global secondary indexes

  • Amazon Comprehend document classification and entity recognizer endpoints

  • Lambda provisioned concurrency

  • Amazon Keyspaces tables

  • Amazon MSK broker storage

" }, "ScaleInCooldown":{ "shape":"Cooldown", - "documentation":"

The amount of time, in seconds, after a scale-in activity completes before another scale-in activity can start.

With the scale-in cooldown period, the intention is to scale in conservatively to protect your application’s availability, so scale-in activities are blocked until the cooldown period has expired. However, if another alarm triggers a scale-out activity during the scale-in cooldown period, Application Auto Scaling scales out the target immediately. In this case, the scale-in cooldown period stops and doesn't complete.

Application Auto Scaling provides a default value of 300 for the following scalable targets:

  • ECS services

  • Spot Fleet requests

  • EMR clusters

  • AppStream 2.0 fleets

  • Aurora DB clusters

  • Amazon SageMaker endpoint variants

  • Custom resources

For all other scalable targets, the default value is 0:

  • DynamoDB tables

  • DynamoDB global secondary indexes

  • Amazon Comprehend document classification and entity recognizer endpoints

  • Lambda provisioned concurrency

  • Amazon Keyspaces tables

  • Amazon MSK cluster storage

" + "documentation":"

The amount of time, in seconds, after a scale-in activity completes before another scale-in activity can start.

With the scale-in cooldown period, the intention is to scale in conservatively to protect your application’s availability, so scale-in activities are blocked until the cooldown period has expired. However, if another alarm triggers a scale-out activity during the scale-in cooldown period, Application Auto Scaling scales out the target immediately. In this case, the scale-in cooldown period stops and doesn't complete.

Application Auto Scaling provides a default value of 300 for the following scalable targets:

  • ECS services

  • Spot Fleet requests

  • EMR clusters

  • AppStream 2.0 fleets

  • Aurora DB clusters

  • Amazon SageMaker endpoint variants

  • Custom resources

For all other scalable targets, the default value is 0:

  • DynamoDB tables

  • DynamoDB global secondary indexes

  • Amazon Comprehend document classification and entity recognizer endpoints

  • Lambda provisioned concurrency

  • Amazon Keyspaces tables

  • Amazon MSK broker storage

" }, "DisableScaleIn":{ "shape":"DisableScaleIn", @@ -1221,5 +1230,5 @@ "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" } }, - "documentation":"

With Application Auto Scaling, you can configure automatic scaling for the following resources:

  • Amazon ECS services

  • Amazon EC2 Spot Fleet requests

  • Amazon EMR clusters

  • Amazon AppStream 2.0 fleets

  • Amazon DynamoDB tables and global secondary indexes throughput capacity

  • Amazon Aurora Replicas

  • Amazon SageMaker endpoint variants

  • Custom resources provided by your own applications or services

  • Amazon Comprehend document classification and entity recognizer endpoints

  • AWS Lambda function provisioned concurrency

  • Amazon Keyspaces (for Apache Cassandra) tables

  • Amazon Managed Streaming for Apache Kafka cluster storage

API Summary

The Application Auto Scaling service API includes three key sets of actions:

  • Register and manage scalable targets - Register AWS or custom resources as scalable targets (a resource that Application Auto Scaling can scale), set minimum and maximum capacity limits, and retrieve information on existing scalable targets.

  • Configure and manage automatic scaling - Define scaling policies to dynamically scale your resources in response to CloudWatch alarms, schedule one-time or recurring scaling actions, and retrieve your recent scaling activity history.

  • Suspend and resume scaling - Temporarily suspend and later resume automatic scaling by calling the RegisterScalableTarget API action for any Application Auto Scaling scalable target. You can suspend and resume (individually or in combination) scale-out activities that are triggered by a scaling policy, scale-in activities that are triggered by a scaling policy, and scheduled scaling.

To learn more about Application Auto Scaling, including information about granting IAM users required permissions for Application Auto Scaling actions, see the Application Auto Scaling User Guide.

" + "documentation":"

With Application Auto Scaling, you can configure automatic scaling for the following resources:

  • Amazon ECS services

  • Amazon EC2 Spot Fleet requests

  • Amazon EMR clusters

  • Amazon AppStream 2.0 fleets

  • Amazon DynamoDB tables and global secondary indexes throughput capacity

  • Amazon Aurora Replicas

  • Amazon SageMaker endpoint variants

  • Custom resources provided by your own applications or services

  • Amazon Comprehend document classification and entity recognizer endpoints

  • AWS Lambda function provisioned concurrency

  • Amazon Keyspaces (for Apache Cassandra) tables

  • Amazon Managed Streaming for Apache Kafka broker storage

API Summary

The Application Auto Scaling service API includes three key sets of actions:

  • Register and manage scalable targets - Register AWS or custom resources as scalable targets (a resource that Application Auto Scaling can scale), set minimum and maximum capacity limits, and retrieve information on existing scalable targets.

  • Configure and manage automatic scaling - Define scaling policies to dynamically scale your resources in response to CloudWatch alarms, schedule one-time or recurring scaling actions, and retrieve your recent scaling activity history.

  • Suspend and resume scaling - Temporarily suspend and later resume automatic scaling by calling the RegisterScalableTarget API action for any Application Auto Scaling scalable target. You can suspend and resume (individually or in combination) scale-out activities that are triggered by a scaling policy, scale-in activities that are triggered by a scaling policy, and scheduled scaling.

To learn more about Application Auto Scaling, including information about granting IAM users required permissions for Application Auto Scaling actions, see the Application Auto Scaling User Guide.

" } diff --git a/botocore/data/appmesh/2019-01-25/service-2.json b/botocore/data/appmesh/2019-01-25/service-2.json index a0949bda..195973a1 100644 --- a/botocore/data/appmesh/2019-01-25/service-2.json +++ b/botocore/data/appmesh/2019-01-25/service-2.json @@ -933,6 +933,10 @@ "type":"structure", "required":["validation"], "members":{ + "certificate":{ + "shape":"ClientTlsCertificate", + "documentation":"

A reference to an object that represents a client's TLS certificate.

" + }, "enforce":{ "shape":"Boolean", "documentation":"

Whether the policy is enforced. The default is True, if a value isn't specified.

", @@ -949,6 +953,18 @@ }, "documentation":"

A reference to an object that represents a Transport Layer Security (TLS) client policy.

" }, + "ClientTlsCertificate":{ + "type":"structure", + "members":{ + "file":{"shape":"ListenerTlsFileCertificate"}, + "sds":{ + "shape":"ListenerTlsSdsCertificate", + "documentation":"

A reference to an object that represents a client's TLS Secret Discovery Service certificate.

" + } + }, + "documentation":"

An object that represents the client's certificate.

", + "union":true + }, "ConflictException":{ "type":"structure", "members":{ @@ -3096,11 +3112,15 @@ "members":{ "certificate":{ "shape":"ListenerTlsCertificate", - "documentation":"

A reference to an object that represents a listener's TLS certificate.

" + "documentation":"

A reference to an object that represents a listener's Transport Layer Security (TLS) certificate.

" }, "mode":{ "shape":"ListenerTlsMode", "documentation":"

Specify one of the following modes.

  • STRICT – Listener only accepts connections with TLS enabled.

  • PERMISSIVE – Listener accepts connections with or without TLS enabled.

  • DISABLED – Listener only accepts connections without TLS.

" + }, + "validation":{ + "shape":"ListenerTlsValidationContext", + "documentation":"

A reference to an object that represents a listener's Transport Layer Security (TLS) validation context.

" } }, "documentation":"

An object that represents the Transport Layer Security (TLS) properties for a listener.

" @@ -3126,6 +3146,10 @@ "file":{ "shape":"ListenerTlsFileCertificate", "documentation":"

A reference to an object that represents a local file certificate.

" + }, + "sds":{ + "shape":"ListenerTlsSdsCertificate", + "documentation":"

A reference to an object that represents a listener's Secret Discovery Service certificate.

" } }, "documentation":"

An object that represents a listener's Transport Layer Security (TLS) certificate.

", @@ -3157,6 +3181,44 @@ "DISABLED" ] }, + "ListenerTlsSdsCertificate":{ + "type":"structure", + "required":["secretName"], + "members":{ + "secretName":{ + "shape":"SdsSecretName", + "documentation":"

A reference to an object that represents the name of the secret requested from the Secret Discovery Service provider representing Transport Layer Security (TLS) materials like a certificate or certificate chain.

" + } + }, + "documentation":"

An object that represents the listener's Secret Discovery Service certificate. The proxy must be configured with a local SDS provider via a Unix Domain Socket. See App Mesh TLS documentation for more info.

" + }, + "ListenerTlsValidationContext":{ + "type":"structure", + "required":["trust"], + "members":{ + "subjectAlternativeNames":{ + "shape":"SubjectAlternativeNames", + "documentation":"

A reference to an object that represents the SANs for a listener's Transport Layer Security (TLS) validation context.

" + }, + "trust":{ + "shape":"ListenerTlsValidationContextTrust", + "documentation":"

A reference to where to retrieve the trust chain when validating a peer’s Transport Layer Security (TLS) certificate.

" + } + }, + "documentation":"

An object that represents a listener's Transport Layer Security (TLS) validation context.

" + }, + "ListenerTlsValidationContextTrust":{ + "type":"structure", + "members":{ + "file":{"shape":"TlsValidationContextFileTrust"}, + "sds":{ + "shape":"TlsValidationContextSdsTrust", + "documentation":"

A reference to an object that represents a listener's Transport Layer Security (TLS) Secret Discovery Service validation context trust.

" + } + }, + "documentation":"

An object that represents a listener's Transport Layer Security (TLS) validation context trust.

", + "union":true + }, "Listeners":{ "type":"list", "member":{"shape":"Listener"}, @@ -3618,6 +3680,7 @@ "DELETED" ] }, + "SdsSecretName":{"type":"string"}, "ServiceDiscovery":{ "type":"structure", "members":{ @@ -3646,6 +3709,37 @@ "retryable":{"throttling":false} }, "String":{"type":"string"}, + "SubjectAlternativeName":{ + "type":"string", + "max":254, + "min":1 + }, + "SubjectAlternativeNameList":{ + "type":"list", + "member":{"shape":"SubjectAlternativeName"} + }, + "SubjectAlternativeNameMatchers":{ + "type":"structure", + "required":["exact"], + "members":{ + "exact":{ + "shape":"SubjectAlternativeNameList", + "documentation":"

The values sent must match the specified values exactly.

" + } + }, + "documentation":"

An object that represents the methods by which a subject alternative name on a peer Transport Layer Security (TLS) certificate can be matched.

" + }, + "SubjectAlternativeNames":{ + "type":"structure", + "required":["match"], + "members":{ + "match":{ + "shape":"SubjectAlternativeNameMatchers", + "documentation":"

An object that represents the criteria for determining a SANs match.

" + } + }, + "documentation":"

An object that represents the subject alternative names secured by the certificate.

" + }, "TagKey":{ "type":"string", "max":128, @@ -3769,12 +3863,16 @@ "type":"structure", "required":["trust"], "members":{ + "subjectAlternativeNames":{ + "shape":"SubjectAlternativeNames", + "documentation":"

A reference to an object that represents the SANs for a Transport Layer Security (TLS) validation context.

" + }, "trust":{ "shape":"TlsValidationContextTrust", - "documentation":"

A reference to an object that represents a TLS validation context trust.

" + "documentation":"

A reference to where to retrieve the trust chain when validating a peer’s Transport Layer Security (TLS) certificate.

" } }, - "documentation":"

An object that represents a Transport Layer Security (TLS) validation context.

" + "documentation":"

An object that represents how the proxy will validate its peer during Transport Layer Security (TLS) negotiation.

" }, "TlsValidationContextAcmTrust":{ "type":"structure", @@ -3785,7 +3883,7 @@ "documentation":"

One or more ACM Amazon Resource Name (ARN)s.

" } }, - "documentation":"

An object that represents a TLS validation context trust for an AWS Certicate Manager (ACM) certificate.

" + "documentation":"

An object that represents a Transport Layer Security (TLS) validation context trust for an AWS Certicate Manager (ACM) certificate.

" }, "TlsValidationContextFileTrust":{ "type":"structure", @@ -3798,16 +3896,31 @@ }, "documentation":"

An object that represents a Transport Layer Security (TLS) validation context trust for a local file.

" }, + "TlsValidationContextSdsTrust":{ + "type":"structure", + "required":["secretName"], + "members":{ + "secretName":{ + "shape":"SdsSecretName", + "documentation":"

A reference to an object that represents the name of the secret for a Transport Layer Security (TLS) Secret Discovery Service validation context trust.

" + } + }, + "documentation":"

An object that represents a Transport Layer Security (TLS) Secret Discovery Service validation context trust. The proxy must be configured with a local SDS provider via a Unix Domain Socket. See App Mesh TLS documentation for more info.

" + }, "TlsValidationContextTrust":{ "type":"structure", "members":{ "acm":{ "shape":"TlsValidationContextAcmTrust", - "documentation":"

A reference to an object that represents a TLS validation context trust for an AWS Certicate Manager (ACM) certificate.

" + "documentation":"

A reference to an object that represents a Transport Layer Security (TLS) validation context trust for an AWS Certicate Manager (ACM) certificate.

" }, "file":{ "shape":"TlsValidationContextFileTrust", - "documentation":"

An object that represents a TLS validation context trust for a local file.

" + "documentation":"

An object that represents a Transport Layer Security (TLS) validation context trust for a local file.

" + }, + "sds":{ + "shape":"TlsValidationContextSdsTrust", + "documentation":"

A reference to an object that represents a Transport Layer Security (TLS) Secret Discovery Service validation context trust.

" } }, "documentation":"

An object that represents a Transport Layer Security (TLS) validation context trust.

", @@ -4246,6 +4359,10 @@ "type":"structure", "required":["validation"], "members":{ + "certificate":{ + "shape":"VirtualGatewayClientTlsCertificate", + "documentation":"

A reference to an object that represents a virtual gateway's client's Transport Layer Security (TLS) certificate.

" + }, "enforce":{ "shape":"Boolean", "documentation":"

Whether the policy is enforced. The default is True, if a value isn't specified.

", @@ -4257,11 +4374,23 @@ }, "validation":{ "shape":"VirtualGatewayTlsValidationContext", - "documentation":"

A reference to an object that represents a TLS validation context.

" + "documentation":"

A reference to an object that represents a Transport Layer Security (TLS) validation context.

" } }, "documentation":"

An object that represents a Transport Layer Security (TLS) client policy.

" }, + "VirtualGatewayClientTlsCertificate":{ + "type":"structure", + "members":{ + "file":{"shape":"VirtualGatewayListenerTlsFileCertificate"}, + "sds":{ + "shape":"VirtualGatewayListenerTlsSdsCertificate", + "documentation":"

A reference to an object that represents a virtual gateway's client's Secret Discovery Service certificate.

" + } + }, + "documentation":"

An object that represents the virtual gateway's client's Transport Layer Security (TLS) certificate.

", + "union":true + }, "VirtualGatewayConnectionPool":{ "type":"structure", "members":{ @@ -4459,6 +4588,10 @@ "mode":{ "shape":"VirtualGatewayListenerTlsMode", "documentation":"

Specify one of the following modes.

  • STRICT – Listener only accepts connections with TLS enabled.

  • PERMISSIVE – Listener accepts connections with or without TLS enabled.

  • DISABLED – Listener only accepts connections without TLS.

" + }, + "validation":{ + "shape":"VirtualGatewayListenerTlsValidationContext", + "documentation":"

A reference to an object that represents a virtual gateway's listener's Transport Layer Security (TLS) validation context.

" } }, "documentation":"

An object that represents the Transport Layer Security (TLS) properties for a listener.

" @@ -4484,6 +4617,10 @@ "file":{ "shape":"VirtualGatewayListenerTlsFileCertificate", "documentation":"

A reference to an object that represents a local file certificate.

" + }, + "sds":{ + "shape":"VirtualGatewayListenerTlsSdsCertificate", + "documentation":"

A reference to an object that represents a virtual gateway's listener's Secret Discovery Service certificate.

" } }, "documentation":"

An object that represents a listener's Transport Layer Security (TLS) certificate.

", @@ -4515,6 +4652,44 @@ "DISABLED" ] }, + "VirtualGatewayListenerTlsSdsCertificate":{ + "type":"structure", + "required":["secretName"], + "members":{ + "secretName":{ + "shape":"VirtualGatewaySdsSecretName", + "documentation":"

A reference to an object that represents the name of the secret secret requested from the Secret Discovery Service provider representing Transport Layer Security (TLS) materials like a certificate or certificate chain.

" + } + }, + "documentation":"

An object that represents the virtual gateway's listener's Secret Discovery Service certificate.The proxy must be configured with a local SDS provider via a Unix Domain Socket. See App Mesh TLS documentation for more info.

" + }, + "VirtualGatewayListenerTlsValidationContext":{ + "type":"structure", + "required":["trust"], + "members":{ + "subjectAlternativeNames":{ + "shape":"SubjectAlternativeNames", + "documentation":"

A reference to an object that represents the SANs for a virtual gateway listener's Transport Layer Security (TLS) validation context.

" + }, + "trust":{ + "shape":"VirtualGatewayListenerTlsValidationContextTrust", + "documentation":"

A reference to where to retrieve the trust chain when validating a peer’s Transport Layer Security (TLS) certificate.

" + } + }, + "documentation":"

An object that represents a virtual gateway's listener's Transport Layer Security (TLS) validation context.

" + }, + "VirtualGatewayListenerTlsValidationContextTrust":{ + "type":"structure", + "members":{ + "file":{"shape":"VirtualGatewayTlsValidationContextFileTrust"}, + "sds":{ + "shape":"VirtualGatewayTlsValidationContextSdsTrust", + "documentation":"

A reference to an object that represents a virtual gateway's listener's Transport Layer Security (TLS) Secret Discovery Service validation context trust.

" + } + }, + "documentation":"

An object that represents a virtual gateway's listener's Transport Layer Security (TLS) validation context trust.

", + "union":true + }, "VirtualGatewayListeners":{ "type":"list", "member":{"shape":"VirtualGatewayListener"}, @@ -4605,6 +4780,7 @@ }, "documentation":"

An object that represents a virtual gateway returned by a list operation.

" }, + "VirtualGatewaySdsSecretName":{"type":"string"}, "VirtualGatewaySpec":{ "type":"structure", "required":["listeners"], @@ -4644,9 +4820,13 @@ "type":"structure", "required":["trust"], "members":{ + "subjectAlternativeNames":{ + "shape":"SubjectAlternativeNames", + "documentation":"

A reference to an object that represents the SANs for a virtual gateway's listener's Transport Layer Security (TLS) validation context.

" + }, "trust":{ "shape":"VirtualGatewayTlsValidationContextTrust", - "documentation":"

A reference to an object that represents a TLS validation context trust.

" + "documentation":"

A reference to where to retrieve the trust chain when validating a peer’s Transport Layer Security (TLS) certificate.

" } }, "documentation":"

An object that represents a Transport Layer Security (TLS) validation context.

" @@ -4660,7 +4840,7 @@ "documentation":"

One or more ACM Amazon Resource Name (ARN)s.

" } }, - "documentation":"

An object that represents a TLS validation context trust for an AWS Certicate Manager (ACM) certificate.

" + "documentation":"

An object that represents a Transport Layer Security (TLS) validation context trust for an AWS Certicate Manager (ACM) certificate.

" }, "VirtualGatewayTlsValidationContextFileTrust":{ "type":"structure", @@ -4673,16 +4853,31 @@ }, "documentation":"

An object that represents a Transport Layer Security (TLS) validation context trust for a local file.

" }, + "VirtualGatewayTlsValidationContextSdsTrust":{ + "type":"structure", + "required":["secretName"], + "members":{ + "secretName":{ + "shape":"VirtualGatewaySdsSecretName", + "documentation":"

A reference to an object that represents the name of the secret for a virtual gateway's Transport Layer Security (TLS) Secret Discovery Service validation context trust.

" + } + }, + "documentation":"

An object that represents a virtual gateway's listener's Transport Layer Security (TLS) Secret Discovery Service validation context trust. The proxy must be configured with a local SDS provider via a Unix Domain Socket. See App Mesh TLS documentation for more info.

" + }, "VirtualGatewayTlsValidationContextTrust":{ "type":"structure", "members":{ "acm":{ "shape":"VirtualGatewayTlsValidationContextAcmTrust", - "documentation":"

A reference to an object that represents a TLS validation context trust for an AWS Certicate Manager (ACM) certificate.

" + "documentation":"

A reference to an object that represents a Transport Layer Security (TLS) validation context trust for an AWS Certicate Manager (ACM) certificate.

" }, "file":{ "shape":"VirtualGatewayTlsValidationContextFileTrust", - "documentation":"

An object that represents a TLS validation context trust for a local file.

" + "documentation":"

An object that represents a Transport Layer Security (TLS) validation context trust for a local file.

" + }, + "sds":{ + "shape":"VirtualGatewayTlsValidationContextSdsTrust", + "documentation":"

A reference to an object that represents a virtual gateway's Transport Layer Security (TLS) Secret Discovery Service validation context trust.

" } }, "documentation":"

An object that represents a Transport Layer Security (TLS) validation context trust.

", diff --git a/botocore/data/iotwireless/2020-11-22/service-2.json b/botocore/data/iotwireless/2020-11-22/service-2.json index ba38521f..23509c6a 100644 --- a/botocore/data/iotwireless/2020-11-22/service-2.json +++ b/botocore/data/iotwireless/2020-11-22/service-2.json @@ -1715,7 +1715,10 @@ }, "ExpressionType":{ "type":"string", - "enum":["RuleName"] + "enum":[ + "RuleName", + "MqttTopic" + ] }, "FNwkSIntKey":{ "type":"string", @@ -2021,6 +2024,10 @@ "IotCertificateId":{ "shape":"IotCertificateId", "documentation":"

The ID of the certificate associated with the wireless gateway.

" + }, + "LoRaWANNetworkServerCertificateId":{ + "shape":"IotCertificateId", + "documentation":"

The ID of the certificate associated with the wireless gateway and used for LoRaWANNetworkServer endpoint.

" } } }, @@ -3046,7 +3053,7 @@ }, "TransmitMode":{ "shape":"TransmitMode", - "documentation":"

The transmit mode to use to send data to the wireless device. Can be: 0 for UM (unacknowledge mode), 1 for AM (acknowledge mode), or 2 for (TM) transparent mode.

" + "documentation":"

The transmit mode to use to send data to the wireless device. Can be: 0 for UM (unacknowledge mode) or 1 for AM (acknowledge mode).

" }, "PayloadData":{ "shape":"PayloadData", @@ -3161,7 +3168,7 @@ }, "Fingerprint":{ "shape":"Fingerprint", - "documentation":"

Fingerprint for Sidewalk application server private key.

" + "documentation":"

The fingerprint of the Sidewalk application server private key.

" } }, "documentation":"

Information about a Sidewalk account.

" diff --git a/botocore/data/location/2020-11-19/service-2.json b/botocore/data/location/2020-11-19/service-2.json index 4ee1f690..0068484f 100644 --- a/botocore/data/location/2020-11-19/service-2.json +++ b/botocore/data/location/2020-11-19/service-2.json @@ -2455,7 +2455,7 @@ "members":{ "Style":{ "shape":"MapStyle", - "documentation":"

Specifies the map style selected from an available data provider.

Valid styles: VectorEsriLightGrayCanvas, VectorEsriLight, VectorEsriStreets, VectorEsriNavigation, VectorEsriDarkGrayCanvas, VectorEsriLightGrayCanvas, VectorHereBerlin

When using HERE as your data provider, and selecting the Style VectorHereBerlin, you may not use HERE Maps for Asset Management. See the AWS Service Terms for Amazon Location Service.

" + "documentation":"

Specifies the map style selected from an available data provider.

Valid styles: VectorEsriStreets, VectorEsriTopographic, VectorEsriNavigation, VectorEsriDarkGrayCanvas, VectorEsriLightGrayCanvas, VectorHereBerlin.

When using HERE as your data provider, and selecting the Style VectorHereBerlin, you may not use HERE Maps for Asset Management. See the AWS Service Terms for Amazon Location Service.

" } }, "documentation":"

Specifies the map tile style selected from an available provider.

" diff --git a/botocore/data/lookoutvision/2020-11-20/service-2.json b/botocore/data/lookoutvision/2020-11-20/service-2.json index eda65e75..ffe7e7c0 100644 --- a/botocore/data/lookoutvision/2020-11-20/service-2.json +++ b/botocore/data/lookoutvision/2020-11-20/service-2.json @@ -30,7 +30,7 @@ {"shape":"ThrottlingException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Creates a new dataset in an Amazon Lookout for Vision project. CreateDataset can create a training or a test dataset from a valid dataset source (DatasetSource).

If you want a single dataset project, specify train for the value of DatasetType.

To have a project with separate training and test datasets, call CreateDataset twice. On the first call, specify train for the value of DatasetType. On the second call, specify test for the value of DatasetType. of dataset with

" + "documentation":"

Creates a new dataset in an Amazon Lookout for Vision project. CreateDataset can create a training or a test dataset from a valid dataset source (DatasetSource).

If you want a single dataset project, specify train for the value of DatasetType.

To have a project with separate training and test datasets, call CreateDataset twice. On the first call, specify train for the value of DatasetType. On the second call, specify test for the value of DatasetType.

This operation requires permissions to perform the lookoutvision:CreateDataset operation.

" }, "CreateModel":{ "name":"CreateModel", @@ -50,7 +50,7 @@ {"shape":"ThrottlingException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Creates a new version of a model within an an Amazon Lookout for Vision project. CreateModel is an asynchronous operation in which Amazon Lookout for Vision trains, tests, and evaluates a new version of a model.

To get the current status, check the Status field returned in the response from DescribeModel.

If the project has a single dataset, Amazon Lookout for Vision internally splits the dataset to create a training and a test dataset. If the project has a training and a test dataset, Lookout for Vision uses the respective datasets to train and test the model.

After training completes, the evaluation metrics are stored at the location specified in OutputConfig.

" + "documentation":"

Creates a new version of a model within an an Amazon Lookout for Vision project. CreateModel is an asynchronous operation in which Amazon Lookout for Vision trains, tests, and evaluates a new version of a model.

To get the current status, check the Status field returned in the response from DescribeModel.

If the project has a single dataset, Amazon Lookout for Vision internally splits the dataset to create a training and a test dataset. If the project has a training and a test dataset, Lookout for Vision uses the respective datasets to train and test the model.

After training completes, the evaluation metrics are stored at the location specified in OutputConfig.

This operation requires permissions to perform the lookoutvision:CreateModel operation. If you want to tag your model, you also require permission to the lookoutvision:TagResource operation.

" }, "CreateProject":{ "name":"CreateProject", @@ -69,7 +69,7 @@ {"shape":"ThrottlingException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Creates an empty Amazon Lookout for Vision project. After you create the project, add a dataset by calling CreateDataset.

" + "documentation":"

Creates an empty Amazon Lookout for Vision project. After you create the project, add a dataset by calling CreateDataset.

This operation requires permissions to perform the lookoutvision:CreateProject operation.

" }, "DeleteDataset":{ "name":"DeleteDataset", @@ -88,7 +88,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Deletes an existing Amazon Lookout for Vision dataset.

If your the project has a single dataset, you must create a new dataset before you can create a model.

If you project has a training dataset and a test dataset consider the following.

  • If you delete the test dataset, your project reverts to a single dataset project. If you then train the model, Amazon Lookout for Vision internally splits the remaining dataset into a training and test dataset.

  • If you delete the training dataset, you must create a training dataset before you can create a model.

It might take a while to delete the dataset. To check the current status, check the Status field in the response from a call to DescribeDataset.

" + "documentation":"

Deletes an existing Amazon Lookout for Vision dataset.

If your the project has a single dataset, you must create a new dataset before you can create a model.

If you project has a training dataset and a test dataset consider the following.

  • If you delete the test dataset, your project reverts to a single dataset project. If you then train the model, Amazon Lookout for Vision internally splits the remaining dataset into a training and test dataset.

  • If you delete the training dataset, you must create a training dataset before you can create a model.

It might take a while to delete the dataset. To check the current status, check the Status field in the response from a call to DescribeDataset.

This operation requires permissions to perform the lookoutvision:DeleteDataset operation.

" }, "DeleteModel":{ "name":"DeleteModel", @@ -107,7 +107,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Deletes an Amazon Lookout for Vision model. You can't delete a running model. To stop a running model, use the StopModel operation.

" + "documentation":"

Deletes an Amazon Lookout for Vision model. You can't delete a running model. To stop a running model, use the StopModel operation.

This operation requires permissions to perform the lookoutvision:DeleteModel operation.

" }, "DeleteProject":{ "name":"DeleteProject", @@ -125,7 +125,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Deletes an Amazon Lookout for Vision project.

To delete a project, you must first delete each version of the model associated with the project. To delete a model use the DeleteModel operation.

The training and test datasets are deleted automatically for you. The images referenced by the training and test datasets aren't deleted.

" + "documentation":"

Deletes an Amazon Lookout for Vision project.

To delete a project, you must first delete each version of the model associated with the project. To delete a model use the DeleteModel operation.

You also have to delete the dataset(s) associated with the model. For more information, see DeleteDataset. The images referenced by the training and test datasets aren't deleted.

This operation requires permissions to perform the lookoutvision:DeleteProject operation.

" }, "DescribeDataset":{ "name":"DescribeDataset", @@ -143,7 +143,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Describe an Amazon Lookout for Vision dataset.

" + "documentation":"

Describe an Amazon Lookout for Vision dataset.

This operation requires permissions to perform the lookoutvision:DescribeDataset operation.

" }, "DescribeModel":{ "name":"DescribeModel", @@ -161,7 +161,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Describes a version of an Amazon Lookout for Vision model.

" + "documentation":"

Describes a version of an Amazon Lookout for Vision model.

This operation requires permissions to perform the lookoutvision:DescribeModel operation.

" }, "DescribeProject":{ "name":"DescribeProject", @@ -179,7 +179,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Describes an Amazon Lookout for Vision project.

" + "documentation":"

Describes an Amazon Lookout for Vision project.

This operation requires permissions to perform the lookoutvision:DescribeProject operation.

" }, "DetectAnomalies":{ "name":"DetectAnomalies", @@ -197,7 +197,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Detects anomalies in an image that you supply.

The response from DetectAnomalies includes a boolean prediction that the image contains one or more anomalies and a confidence value for the prediction.

Before calling DetectAnomalies, you must first start your model with the StartModel operation. You are charged for the amount of time, in minutes, that a model runs and for the number of anomaly detection units that your model uses. If you are not using a model, use the StopModel operation to stop your model.

" + "documentation":"

Detects anomalies in an image that you supply.

The response from DetectAnomalies includes a boolean prediction that the image contains one or more anomalies and a confidence value for the prediction.

Before calling DetectAnomalies, you must first start your model with the StartModel operation. You are charged for the amount of time, in minutes, that a model runs and for the number of anomaly detection units that your model uses. If you are not using a model, use the StopModel operation to stop your model.

This operation requires permissions to perform the lookoutvision:DetectAnomalies operation.

" }, "ListDatasetEntries":{ "name":"ListDatasetEntries", @@ -215,7 +215,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Lists the JSON Lines within a dataset. An Amazon Lookout for Vision JSON Line contains the anomaly information for a single image, including the image location and the assigned label.

" + "documentation":"

Lists the JSON Lines within a dataset. An Amazon Lookout for Vision JSON Line contains the anomaly information for a single image, including the image location and the assigned label.

This operation requires permissions to perform the lookoutvision:ListDatasetEntries operation.

" }, "ListModels":{ "name":"ListModels", @@ -233,7 +233,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Lists the versions of a model in an Amazon Lookout for Vision project.

" + "documentation":"

Lists the versions of a model in an Amazon Lookout for Vision project.

This operation requires permissions to perform the lookoutvision:ListModels operation.

" }, "ListProjects":{ "name":"ListProjects", @@ -251,7 +251,25 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Lists the Amazon Lookout for Vision projects in your AWS account.

" + "documentation":"

Lists the Amazon Lookout for Vision projects in your AWS account.

This operation requires permissions to perform the lookoutvision:ListProjects operation.

" + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/2020-11-20/tags/{resourceArn}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns a list of tags attached to the specified Amazon Lookout for Vision model.

This operation requires permissions to perform the lookoutvision:ListTagsForResource operation.

" }, "StartModel":{ "name":"StartModel", @@ -271,7 +289,7 @@ {"shape":"ThrottlingException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Starts the running of the version of an Amazon Lookout for Vision model. Starting a model takes a while to complete. To check the current state of the model, use DescribeModel.

Once the model is running, you can detect custom labels in new images by calling DetectAnomalies.

You are charged for the amount of time that the model is running. To stop a running model, call StopModel.

" + "documentation":"

Starts the running of the version of an Amazon Lookout for Vision model. Starting a model takes a while to complete. To check the current state of the model, use DescribeModel.

Once the model is running, you can detect custom labels in new images by calling DetectAnomalies.

You are charged for the amount of time that the model is running. To stop a running model, call StopModel.

This operation requires permissions to perform the lookoutvision:StartModel operation.

" }, "StopModel":{ "name":"StopModel", @@ -290,7 +308,44 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Stops a running model. The operation might take a while to complete. To check the current status, call DescribeModel.

" + "documentation":"

Stops a running model. The operation might take a while to complete. To check the current status, call DescribeModel.

This operation requires permissions to perform the lookoutvision:StopModel operation.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/2020-11-20/tags/{resourceArn}" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Adds one or more key-value tags to an Amazon Lookout for Vision model. For more information, see Tagging a model in the Amazon Lookout for Vision Developer Guide.

This operation requires permissions to perform the lookoutvision:TagResource operation.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/2020-11-20/tags/{resourceArn}" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Removes one or more tags from an Amazon Lookout for Vision model. For more information, see Tagging a model in the Amazon Lookout for Vision Developer Guide.

This operation requires permissions to perform the lookoutvision:UntagResource operation.

" }, "UpdateDatasetEntries":{ "name":"UpdateDatasetEntries", @@ -309,7 +364,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Adds one or more JSON Line entries to a dataset. A JSON Line includes information about an image used for training or testing an Amazon Lookout for Vision model. The following is an example JSON Line.

Updating a dataset might take a while to complete. To check the current status, call DescribeDataset and check the Status field in the response.

" + "documentation":"

Adds one or more JSON Line entries to a dataset. A JSON Line includes information about an image used for training or testing an Amazon Lookout for Vision model. The following is an example JSON Line.

Updating a dataset might take a while to complete. To check the current status, call DescribeDataset and check the Status field in the response.

This operation requires permissions to perform the lookoutvision:UpdateDatasetEntries operation.

" } }, "shapes":{ @@ -417,7 +472,7 @@ "locationName":"projectName" }, "Description":{ - "shape":"ModelDescription", + "shape":"ModelDescriptionMessage", "documentation":"

A description for the version of the model.

" }, "ClientToken":{ @@ -434,6 +489,10 @@ "KmsKeyId":{ "shape":"KmsKeyId", "documentation":"

The identifier of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use for encypting the model. If this parameter is not specified, the model is encrypted by a key that AWS owns and manages.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A set of tags (key-value pairs) that you want to attach to the model.

" } } }, @@ -452,7 +511,7 @@ "members":{ "ProjectName":{ "shape":"ProjectName", - "documentation":"

S nsme for the project.

" + "documentation":"

The name for the project.

" }, "ClientToken":{ "shape":"ClientToken", @@ -1062,6 +1121,27 @@ } } }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"TagArn", + "documentation":"

The Amazon Resource Name (ARN) of the model for which you want to list tags.

", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagList", + "documentation":"

A map of tag keys and values attached to the specified model.

" + } + } + }, "ModelArn":{"type":"string"}, "ModelDescription":{ "type":"structure", @@ -1161,7 +1241,7 @@ }, "Performance":{ "shape":"ModelPerformance", - "documentation":"

Performance metrics for the model. Created during training.

" + "documentation":"

Performance metrics for the model. Not available until training has successfully completed.

" } }, "documentation":"

Describes an Amazon Lookout for Vision model.

" @@ -1489,6 +1569,77 @@ "requiresLength":true, "streaming":true }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

The key of the tag that is attached to the specified model.

" + }, + "Value":{ + "shape":"TagValue", + "documentation":"

The value of the tag that is attached to the specified model.

" + } + }, + "documentation":"

A key and value pair that is attached to the specified Amazon Lookout for Vision model.

" + }, + "TagArn":{ + "type":"string", + "max":1011, + "min":1 + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"TagArn", + "documentation":"

The Amazon Resource Name (ARN) of the model to assign the tags.

", + "location":"uri", + "locationName":"resourceArn" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The key-value tags to assign to the model.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, "ThrottlingException":{ "type":"structure", "required":["Message"], @@ -1513,6 +1664,32 @@ "error":{"httpStatusCode":429}, "exception":true }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"TagArn", + "documentation":"

The Amazon Resource Name (ARN) of the model from which you want to remove tags.

", + "location":"uri", + "locationName":"resourceArn" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

A list of the keys of the tags that you want to remove.

", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateDatasetEntriesRequest":{ "type":"structure", "required":[ diff --git a/botocore/data/medialive/2017-10-14/service-2.json b/botocore/data/medialive/2017-10-14/service-2.json index c61c0dbe..162e498b 100644 --- a/botocore/data/medialive/2017-10-14/service-2.json +++ b/botocore/data/medialive/2017-10-14/service-2.json @@ -6941,6 +6941,12 @@ "Destination" ] }, + "FrameCaptureHlsSettings": { + "type": "structure", + "members": { + }, + "documentation": "Frame Capture Hls Settings" + }, "FrameCaptureIntervalUnit": { "type": "string", "documentation": "Frame Capture Interval Unit", @@ -6974,10 +6980,7 @@ "documentation": "Unit for the frame capture interval." } }, - "documentation": "Frame Capture Settings", - "required": [ - "CaptureInterval" - ] + "documentation": "Frame Capture Settings" }, "GatewayTimeoutException": { "type": "structure", @@ -8408,6 +8411,10 @@ "shape": "Fmp4HlsSettings", "locationName": "fmp4HlsSettings" }, + "FrameCaptureHlsSettings": { + "shape": "FrameCaptureHlsSettings", + "locationName": "frameCaptureHlsSettings" + }, "StandardHlsSettings": { "shape": "StandardHlsSettings", "locationName": "standardHlsSettings" diff --git a/botocore/data/organizations/2016-11-28/service-2.json b/botocore/data/organizations/2016-11-28/service-2.json index de1098be..8ba6e2ee 100644 --- a/botocore/data/organizations/2016-11-28/service-2.json +++ b/botocore/data/organizations/2016-11-28/service-2.json @@ -281,7 +281,7 @@ {"shape":"ServiceException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

Removes the specified member AWS account as a delegated administrator for the specified AWS service.

Deregistering a delegated administrator can have unintended impacts on the functionality of the enabled AWS service. See the documentation for the enabled service before you deregister a delegated administrator so that you understand any potential impacts.

You can run this action only for AWS services that support this feature. For a current list of services that support it, see the column Supports Delegated Administrator in the table at AWS Services that you can use with AWS Organizations in the AWS Organizations User Guide.

This operation can be called only from the organization's management account.

" + "documentation":"

Removes the specified member AWS account as a delegated administrator for the specified AWS service.

Deregistering a delegated administrator can have unintended impacts on the functionality of the enabled AWS service. See the documentation for the enabled service before you deregister a delegated administrator so that you understand any potential impacts.

You can run this action only for AWS services that support this feature. For a current list of services that support it, see the column Supports Delegated Administrator in the table at AWS Services that you can use with AWS Organizations in the AWS Organizations User Guide.

This operation can be called only from the organization's management account.

" }, "DescribeAccount":{ "name":"DescribeAccount", @@ -452,7 +452,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

Disables the integration of an AWS service (the service that is specified by ServicePrincipal) with AWS Organizations. When you disable integration, the specified service no longer can create a service-linked role in new accounts in your organization. This means the service can't perform operations on your behalf on any new accounts in your organization. The service can still perform operations in older accounts until the service completes its clean-up from AWS Organizations.

We recommend that you disable integration between AWS Organizations and the specified AWS service by using the console or commands that are provided by the specified service. Doing so ensures that the other service is aware that it can clean up any resources that are required only for the integration. How the service cleans up its resources in the organization's accounts depends on that service. For more information, see the documentation for the other AWS service.

After you perform the DisableAWSServiceAccess operation, the specified service can no longer perform operations in your organization's accounts unless the operations are explicitly permitted by the IAM policies that are attached to your roles.

For more information about integrating other services with AWS Organizations, including the list of services that work with Organizations, see Integrating AWS Organizations with Other AWS Services in the AWS Organizations User Guide.

This operation can be called only from the organization's management account.

" + "documentation":"

Disables the integration of an AWS service (the service that is specified by ServicePrincipal) with AWS Organizations. When you disable integration, the specified service no longer can create a service-linked role in new accounts in your organization. This means the service can't perform operations on your behalf on any new accounts in your organization. The service can still perform operations in older accounts until the service completes its clean-up from AWS Organizations.

We strongly recommend that you don't use this command to disable integration between AWS Organizations and the specified AWS service. Instead, use the console or commands that are provided by the specified service. This lets the trusted service perform any required initialization when enabling trusted access, such as creating any required resources and any required clean up of resources when disabling trusted access.

For information about how to disable trusted service access to your organization using the trusted service, see the Learn more link under the Supports Trusted Access column at AWS services that you can use with AWS Organizations. on this page.

If you disable access by using this command, it causes the following actions to occur:

  • The service can no longer create a service-linked role in the accounts in your organization. This means that the service can't perform operations on your behalf on any new accounts in your organization. The service can still perform operations in older accounts until the service completes its clean-up from AWS Organizations.

  • The service can no longer perform tasks in the member accounts in the organization, unless those operations are explicitly permitted by the IAM policies that are attached to your roles. This includes any data aggregation from the member accounts to the management account, or to a delegated administrator account, where relevant.

  • Some services detect this and clean up any remaining data or resources related to the integration, while other services stop accessing the organization but leave any historical data and configuration in place to support a possible re-enabling of the integration.

Using the other service's console or commands to disable the integration ensures that the other service is aware that it can clean up any resources that are required only for the integration. How the service cleans up its resources in the organization's accounts depends on that service. For more information, see the documentation for the other AWS service.

After you perform the DisableAWSServiceAccess operation, the specified service can no longer perform operations in your organization's accounts

For more information about integrating other services with AWS Organizations, including the list of services that work with Organizations, see Integrating AWS Organizations with Other AWS Services in the AWS Organizations User Guide.

This operation can be called only from the organization's management account.

" }, "DisablePolicyType":{ "name":"DisablePolicyType", @@ -579,7 +579,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Removes a member account from its parent organization. This version of the operation is performed by the account that wants to leave. To remove a member account as a user in the management account, use RemoveAccountFromOrganization instead.

This operation can be called only from a member account in the organization.

  • The management account in an organization with all features enabled can set service control policies (SCPs) that can restrict what administrators of member accounts can do. This includes preventing them from successfully calling LeaveOrganization and leaving the organization.

  • You can leave an organization as a member account only if the account is configured with the information required to operate as a standalone account. When you create an account in an organization using the AWS Organizations console, API, or CLI commands, the information required of standalone accounts is not automatically collected. For each account that you want to make standalone, you must perform the following steps. If any of the steps are already completed for this account, that step doesn't appear.

    • Choose a support plan

    • Provide and verify the required contact information

    • Provide a current payment method

    AWS uses the payment method to charge for any billable (not free tier) AWS activity that occurs while the account isn't attached to an organization. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • You can leave an organization only after you enable IAM user access to billing in your account. For more information, see Activating Access to the Billing and Cost Management Console in the AWS Billing and Cost Management User Guide.

  • After the account leaves the organization, all tags that were attached to the account object in the organization are deleted. AWS accounts outside of an organization do not support tags.

" + "documentation":"

Removes a member account from its parent organization. This version of the operation is performed by the account that wants to leave. To remove a member account as a user in the management account, use RemoveAccountFromOrganization instead.

This operation can be called only from a member account in the organization.

  • The management account in an organization with all features enabled can set service control policies (SCPs) that can restrict what administrators of member accounts can do. This includes preventing them from successfully calling LeaveOrganization and leaving the organization.

  • You can leave an organization as a member account only if the account is configured with the information required to operate as a standalone account. When you create an account in an organization using the AWS Organizations console, API, or CLI commands, the information required of standalone accounts is not automatically collected. For each account that you want to make standalone, you must perform the following steps. If any of the steps are already completed for this account, that step doesn't appear.

    • Choose a support plan

    • Provide and verify the required contact information

    • Provide a current payment method

    AWS uses the payment method to charge for any billable (not free tier) AWS activity that occurs while the account isn't attached to an organization. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • The account that you want to leave must not be a delegated administrator account for any AWS service enabled for your organization. If the account is a delegated administrator, you must first change the delegated administrator account to another account that is remaining in the organization.

  • You can leave an organization only after you enable IAM user access to billing in your account. For more information, see Activating Access to the Billing and Cost Management Console in the AWS Billing and Cost Management User Guide.

  • After the account leaves the organization, all tags that were attached to the account object in the organization are deleted. AWS accounts outside of an organization do not support tags.

" }, "ListAWSServiceAccessForOrganization":{ "name":"ListAWSServiceAccessForOrganization", @@ -913,7 +913,7 @@ {"shape":"ServiceException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

Enables the specified member account to administer the Organizations features of the specified AWS service. It grants read-only access to AWS Organizations service data. The account still requires IAM permissions to access and administer the AWS service.

You can run this action only for AWS services that support this feature. For a current list of services that support it, see the column Supports Delegated Administrator in the table at AWS Services that you can use with AWS Organizations in the AWS Organizations User Guide.

This operation can be called only from the organization's management account.

" + "documentation":"

Enables the specified member account to administer the Organizations features of the specified AWS service. It grants read-only access to AWS Organizations service data. The account still requires IAM permissions to access and administer the AWS service.

You can run this action only for AWS services that support this feature. For a current list of services that support it, see the column Supports Delegated Administrator in the table at AWS Services that you can use with AWS Organizations in the AWS Organizations User Guide.

This operation can be called only from the organization's management account.

" }, "RemoveAccountFromOrganization":{ "name":"RemoveAccountFromOrganization", @@ -933,7 +933,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Removes the specified account from the organization.

The removed account becomes a standalone account that isn't a member of any organization. It's no longer subject to any policies and is responsible for its own bill payments. The organization's management account is no longer charged for any expenses accrued by the member account after it's removed from the organization.

This operation can be called only from the organization's management account. Member accounts can remove themselves with LeaveOrganization instead.

  • You can remove an account from your organization only if the account is configured with the information required to operate as a standalone account. When you create an account in an organization using the AWS Organizations console, API, or CLI commands, the information required of standalone accounts is not automatically collected. For an account that you want to make standalone, you must choose a support plan, provide and verify the required contact information, and provide a current payment method. AWS uses the payment method to charge for any billable (not free tier) AWS activity that occurs while the account isn't attached to an organization. To remove an account that doesn't yet have this information, you must sign in as the member account and follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • After the account leaves the organization, all tags that were attached to the account object in the organization are deleted. AWS accounts outside of an organization do not support tags.

" + "documentation":"

Removes the specified account from the organization.

The removed account becomes a standalone account that isn't a member of any organization. It's no longer subject to any policies and is responsible for its own bill payments. The organization's management account is no longer charged for any expenses accrued by the member account after it's removed from the organization.

This operation can be called only from the organization's management account. Member accounts can remove themselves with LeaveOrganization instead.

  • You can remove an account from your organization only if the account is configured with the information required to operate as a standalone account. When you create an account in an organization using the AWS Organizations console, API, or CLI commands, the information required of standalone accounts is not automatically collected. For an account that you want to make standalone, you must choose a support plan, provide and verify the required contact information, and provide a current payment method. AWS uses the payment method to charge for any billable (not free tier) AWS activity that occurs while the account isn't attached to an organization. To remove an account that doesn't yet have this information, you must sign in as the member account and follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • The account that you want to leave must not be a delegated administrator account for any AWS service enabled for your organization. If the account is a delegated administrator, you must first change the delegated administrator account to another account that is remaining in the organization.

  • After the account leaves the organization, all tags that were attached to the account object in the organization are deleted. AWS accounts outside of an organization do not support tags.

" }, "TagResource":{ "name":"TagResource", @@ -1076,7 +1076,7 @@ }, "Arn":{ "shape":"AccountArn", - "documentation":"

The Amazon Resource Name (ARN) of the account.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Organizations User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the account.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Service Authorization Reference.

" }, "Email":{ "shape":"Email", @@ -2031,7 +2031,7 @@ }, "Arn":{ "shape":"HandshakeArn", - "documentation":"

The Amazon Resource Name (ARN) of a handshake.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Organizations User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of a handshake.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Service Authorization Reference.

" }, "Parties":{ "shape":"HandshakeParties", @@ -2842,7 +2842,7 @@ }, "Arn":{ "shape":"OrganizationArn", - "documentation":"

The Amazon Resource Name (ARN) of an organization.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Organizations User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of an organization.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Service Authorization Reference.

" }, "FeatureSet":{ "shape":"OrganizationFeatureSet", @@ -2850,7 +2850,7 @@ }, "MasterAccountArn":{ "shape":"AccountArn", - "documentation":"

The Amazon Resource Name (ARN) of the account that is designated as the management account for the organization.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Organizations User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the account that is designated as the management account for the organization.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Service Authorization Reference.

" }, "MasterAccountId":{ "shape":"AccountId", @@ -2899,7 +2899,7 @@ }, "Arn":{ "shape":"OrganizationalUnitArn", - "documentation":"

The Amazon Resource Name (ARN) of this OU.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Organizations User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of this OU.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Service Authorization Reference.

" }, "Name":{ "shape":"OrganizationalUnitName", @@ -3066,7 +3066,7 @@ }, "Arn":{ "shape":"PolicyArn", - "documentation":"

The Amazon Resource Name (ARN) of the policy.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Organizations User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the policy.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Service Authorization Reference.

" }, "Name":{ "shape":"PolicyName", @@ -3101,7 +3101,7 @@ }, "Arn":{ "shape":"GenericArn", - "documentation":"

The Amazon Resource Name (ARN) of the policy target.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Organizations User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the policy target.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Service Authorization Reference.

" }, "Name":{ "shape":"TargetName", @@ -3218,7 +3218,7 @@ }, "Arn":{ "shape":"RootArn", - "documentation":"

The Amazon Resource Name (ARN) of the root.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Organizations User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the root.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Service Authorization Reference.

" }, "Name":{ "shape":"RootName", diff --git a/botocore/data/rds-data/2018-08-01/service-2.json b/botocore/data/rds-data/2018-08-01/service-2.json index 294450a6..0920fc5f 100644 --- a/botocore/data/rds-data/2018-08-01/service-2.json +++ b/botocore/data/rds-data/2018-08-01/service-2.json @@ -159,7 +159,8 @@ "documentation":"

An array of strings.

" } }, - "documentation":"

Contains an array.

" + "documentation":"

Contains an array.

", + "union":true }, "ArrayValueList":{ "type":"list", @@ -395,8 +396,8 @@ "DecimalReturnType":{ "type":"string", "enum":[ - "DOUBLE_OR_LONG", - "STRING" + "STRING", + "DOUBLE_OR_LONG" ] }, "DoubleArray":{ @@ -480,7 +481,7 @@ }, "schema":{ "shape":"DbName", - "documentation":"

The name of the database schema.

" + "documentation":"

The name of the database schema.

Currently, the schema parameter isn't supported.

" }, "secretArn":{ "shape":"Arn", @@ -551,7 +552,8 @@ "documentation":"

A value of string data type.

" } }, - "documentation":"

Contains a value.

" + "documentation":"

Contains a value.

", + "union":true }, "FieldList":{ "type":"list", @@ -721,7 +723,7 @@ }, "typeHint":{ "shape":"TypeHint", - "documentation":"

A hint that specifies the correct object type for data type mapping.

Values:

  • DECIMAL - The corresponding String parameter value is sent as an object of DECIMAL type to the database.

  • TIMESTAMP - The corresponding String parameter value is sent as an object of TIMESTAMP type to the database. The accepted format is YYYY-MM-DD HH:MM:SS[.FFF].

  • TIME - The corresponding String parameter value is sent as an object of TIME type to the database. The accepted format is HH:MM:SS[.FFF].

  • DATE - The corresponding String parameter value is sent as an object of DATE type to the database. The accepted format is YYYY-MM-DD.

" + "documentation":"

A hint that specifies the correct object type for data type mapping. Possible values are as follows:

  • DATE - The corresponding String parameter value is sent as an object of DATE type to the database. The accepted format is YYYY-MM-DD.

  • DECIMAL - The corresponding String parameter value is sent as an object of DECIMAL type to the database.

  • JSON - The corresponding String parameter value is sent as an object of JSON type to the database.

  • TIME - The corresponding String parameter value is sent as an object of TIME type to the database. The accepted format is HH:MM:SS[.FFF].

  • TIMESTAMP - The corresponding String parameter value is sent as an object of TIMESTAMP type to the database. The accepted format is YYYY-MM-DD HH:MM:SS[.FFF].

  • UUID - The corresponding String parameter value is sent as an object of UUID type to the database.

" }, "value":{ "shape":"Field", @@ -808,10 +810,12 @@ "TypeHint":{ "type":"string", "enum":[ + "JSON", + "UUID", + "TIMESTAMP", "DATE", - "DECIMAL", "TIME", - "TIMESTAMP" + "DECIMAL" ] }, "UpdateResult":{ @@ -872,8 +876,9 @@ "documentation":"

A value for a column of STRUCT data type.

" } }, - "documentation":"

Contains the value of a column.

 <important> <p>This data type is deprecated.</p> </important> 
" + "documentation":"

Contains the value of a column.

 <important> <p>This data type is deprecated.</p> </important> 
", + "union":true } }, - "documentation":"

Amazon RDS Data Service

Amazon RDS provides an HTTP endpoint to run SQL statements on an Amazon Aurora Serverless DB cluster. To run these statements, you work with the Data Service API.

For more information about the Data Service API, see Using the Data API for Aurora Serverless in the Amazon Aurora User Guide.

If you have questions or comments related to the Data API, send email to Rds-data-api-feedback@amazon.com.

" + "documentation":"

Amazon RDS Data Service

Amazon RDS provides an HTTP endpoint to run SQL statements on an Amazon Aurora Serverless DB cluster. To run these statements, you work with the Data Service API.

For more information about the Data Service API, see Using the Data API for Aurora Serverless in the Amazon Aurora User Guide.

" } diff --git a/botocore/data/route53/2013-04-01/service-2.json b/botocore/data/route53/2013-04-01/service-2.json index 34524786..58df0d99 100644 --- a/botocore/data/route53/2013-04-01/service-2.json +++ b/botocore/data/route53/2013-04-01/service-2.json @@ -27,7 +27,7 @@ {"shape":"InvalidSigningStatus"}, {"shape":"InvalidKMSArn"} ], - "documentation":"

Activates a key signing key (KSK) so that it can be used for signing by DNSSEC. This operation changes the KSK status to ACTIVE.

" + "documentation":"

Activates a key-signing key (KSK) so that it can be used for signing by DNSSEC. This operation changes the KSK status to ACTIVE.

" }, "AssociateVPCWithHostedZone":{ "name":"AssociateVPCWithHostedZone", @@ -166,7 +166,7 @@ {"shape":"TooManyKeySigningKeys"}, {"shape":"ConcurrentModification"} ], - "documentation":"

Creates a new key signing key (KSK) associated with a hosted zone. You can only have two KSKs per hosted zone.

" + "documentation":"

Creates a new key-signing key (KSK) associated with a hosted zone. You can only have two KSKs per hosted zone.

" }, "CreateQueryLoggingConfig":{ "name":"CreateQueryLoggingConfig", @@ -317,7 +317,7 @@ {"shape":"KeySigningKeyInUse"}, {"shape":"KeySigningKeyInParentDSRecord"} ], - "documentation":"

Deactivates a key signing key (KSK) so that it will not be used for signing by DNSSEC. This operation changes the KSK status to INACTIVE.

" + "documentation":"

Deactivates a key-signing key (KSK) so that it will not be used for signing by DNSSEC. This operation changes the KSK status to INACTIVE.

" }, "DeleteHealthCheck":{ "name":"DeleteHealthCheck", @@ -366,7 +366,7 @@ {"shape":"InvalidSigningStatus"}, {"shape":"InvalidKMSArn"} ], - "documentation":"

Deletes a key signing key (KSK). Before you can delete a KSK, you must deactivate it. The KSK must be deactived before you can delete it regardless of whether the hosted zone is enabled for DNSSEC signing.

" + "documentation":"

Deletes a key-signing key (KSK). Before you can delete a KSK, you must deactivate it. The KSK must be deactived before you can delete it regardless of whether the hosted zone is enabled for DNSSEC signing.

" }, "DeleteQueryLoggingConfig":{ "name":"DeleteQueryLoggingConfig", @@ -468,7 +468,7 @@ {"shape":"InvalidKeySigningKeyStatus"}, {"shape":"InvalidKMSArn"} ], - "documentation":"

Disables DNSSEC signing in a specific hosted zone. This action does not deactivate any key signing keys (KSKs) that are active in the hosted zone.

" + "documentation":"

Disables DNSSEC signing in a specific hosted zone. This action does not deactivate any key-signing keys (KSKs) that are active in the hosted zone.

" }, "DisassociateVPCFromHostedZone":{ "name":"DisassociateVPCFromHostedZone", @@ -546,7 +546,7 @@ }, "input":{"shape":"GetCheckerIpRangesRequest"}, "output":{"shape":"GetCheckerIpRangesResponse"}, - "documentation":"

GetCheckerIpRanges still works, but we recommend that you download ip-ranges.json, which includes IP address ranges for all AWS services. For more information, see IP Address Ranges of Amazon Route 53 Servers in the Amazon Route 53 Developer Guide.

" + "documentation":"

Route 53 does not perform authorization for this API because it retrieves information that is already available to the public.

GetCheckerIpRanges still works, but we recommend that you download ip-ranges.json, which includes IP address ranges for all AWS services. For more information, see IP Address Ranges of Amazon Route 53 Servers in the Amazon Route 53 Developer Guide.

" }, "GetDNSSEC":{ "name":"GetDNSSEC", @@ -560,7 +560,7 @@ {"shape":"NoSuchHostedZone"}, {"shape":"InvalidArgument"} ], - "documentation":"

Returns information about DNSSEC for a specific hosted zone, including the key signing keys (KSKs) and zone signing keys (ZSKs) in the hosted zone.

" + "documentation":"

Returns information about DNSSEC for a specific hosted zone, including the key-signing keys (KSKs) in the hosted zone.

" }, "GetGeoLocation":{ "name":"GetGeoLocation", @@ -574,7 +574,7 @@ {"shape":"NoSuchGeoLocation"}, {"shape":"InvalidInput"} ], - "documentation":"

Gets information about whether a specified geographic location is supported for Amazon Route 53 geolocation resource record sets.

Use the following syntax to determine whether a continent is supported for geolocation:

GET /2013-04-01/geolocation?continentcode=two-letter abbreviation for a continent

Use the following syntax to determine whether a country is supported for geolocation:

GET /2013-04-01/geolocation?countrycode=two-character country code

Use the following syntax to determine whether a subdivision of a country is supported for geolocation:

GET /2013-04-01/geolocation?countrycode=two-character country code&subdivisioncode=subdivision code

" + "documentation":"

Gets information about whether a specified geographic location is supported for Amazon Route 53 geolocation resource record sets.

Route 53 does not perform authorization for this API because it retrieves information that is already available to the public.

Use the following syntax to determine whether a continent is supported for geolocation:

GET /2013-04-01/geolocation?continentcode=two-letter abbreviation for a continent

Use the following syntax to determine whether a country is supported for geolocation:

GET /2013-04-01/geolocation?countrycode=two-character country code

Use the following syntax to determine whether a subdivision of a country is supported for geolocation:

GET /2013-04-01/geolocation?countrycode=two-character country code&subdivisioncode=subdivision code

" }, "GetHealthCheck":{ "name":"GetHealthCheck", @@ -763,7 +763,7 @@ "errors":[ {"shape":"InvalidInput"} ], - "documentation":"

Retrieves a list of supported geographic locations.

Countries are listed first, and continents are listed last. If Amazon Route 53 supports subdivisions for a country (for example, states or provinces), the subdivisions for that country are listed in alphabetical order immediately after the corresponding country.

For a list of supported geolocation codes, see the GeoLocation data type.

" + "documentation":"

Retrieves a list of supported geographic locations.

Countries are listed first, and continents are listed last. If Amazon Route 53 supports subdivisions for a country (for example, states or provinces), the subdivisions for that country are listed in alphabetical order immediately after the corresponding country.

Route 53 does not perform authorization for this API because it retrieves information that is already available to the public.

For a list of supported geolocation codes, see the GeoLocation data type.

" }, "ListHealthChecks":{ "name":"ListHealthChecks", @@ -1125,7 +1125,7 @@ }, "Name":{ "shape":"SigningKeyName", - "documentation":"

An alphanumeric string used to identify a key signing key (KSK).

", + "documentation":"

A string used to identify a key-signing key (KSK). Name can include numbers, letters, and underscores (_). Name must be unique for each key-signing key in the same hosted zone.

", "location":"uri", "locationName":"Name" } @@ -1147,7 +1147,7 @@ "members":{ "Region":{ "shape":"CloudWatchRegion", - "documentation":"

For the CloudWatch alarm that you want Route 53 health checkers to use to determine whether this health check is healthy, the region that the alarm was created in.

For the current list of CloudWatch regions, see Amazon CloudWatch in the AWS Service Endpoints chapter of the Amazon Web Services General Reference.

" + "documentation":"

For the CloudWatch alarm that you want Route 53 health checkers to use to determine whether this health check is healthy, the region that the alarm was created in.

For the current list of CloudWatch regions, see Amazon CloudWatch endpoints and quotas in the Amazon Web Services General Reference.

" }, "Name":{ "shape":"AlarmName", @@ -1172,7 +1172,7 @@ "members":{ "HostedZoneId":{ "shape":"ResourceId", - "documentation":"

Alias resource records sets only: The value used depends on where you want to route traffic:

Amazon API Gateway custom regional APIs and edge-optimized APIs

Specify the hosted zone ID for your API. You can get the applicable value using the AWS CLI command get-domain-names:

  • For regional APIs, specify the value of regionalHostedZoneId.

  • For edge-optimized APIs, specify the value of distributionHostedZoneId.

Amazon Virtual Private Cloud interface VPC endpoint

Specify the hosted zone ID for your interface endpoint. You can get the value of HostedZoneId using the AWS CLI command describe-vpc-endpoints.

CloudFront distribution

Specify Z2FDTNDATAQYW2.

Alias resource record sets for CloudFront can't be created in a private zone.

Elastic Beanstalk environment

Specify the hosted zone ID for the region that you created the environment in. The environment must have a regionalized subdomain. For a list of regions and the corresponding hosted zone IDs, see AWS Elastic Beanstalk in the \"AWS Service Endpoints\" chapter of the Amazon Web Services General Reference.

ELB load balancer

Specify the value of the hosted zone ID for the load balancer. Use the following methods to get the hosted zone ID:

  • Service Endpoints table in the \"Elastic Load Balancing Endpoints and Quotas\" topic in the Amazon Web Services General Reference: Use the value that corresponds with the region that you created your load balancer in. Note that there are separate columns for Application and Classic Load Balancers and for Network Load Balancers.

  • AWS Management Console: Go to the Amazon EC2 page, choose Load Balancers in the navigation pane, select the load balancer, and get the value of the Hosted zone field on the Description tab.

  • Elastic Load Balancing API: Use DescribeLoadBalancers to get the applicable value. For more information, see the applicable guide:

  • AWS CLI: Use describe-load-balancers to get the applicable value. For more information, see the applicable guide:

AWS Global Accelerator accelerator

Specify Z2BJ6XQ5FK7U4H.

An Amazon S3 bucket configured as a static website

Specify the hosted zone ID for the region that you created the bucket in. For more information about valid values, see the table Amazon S3 Website Endpoints in the Amazon Web Services General Reference.

Another Route 53 resource record set in your hosted zone

Specify the hosted zone ID of your hosted zone. (An alias resource record set can't reference a resource record set in a different hosted zone.)

" + "documentation":"

Alias resource records sets only: The value used depends on where you want to route traffic:

Amazon API Gateway custom regional APIs and edge-optimized APIs

Specify the hosted zone ID for your API. You can get the applicable value using the AWS CLI command get-domain-names:

  • For regional APIs, specify the value of regionalHostedZoneId.

  • For edge-optimized APIs, specify the value of distributionHostedZoneId.

Amazon Virtual Private Cloud interface VPC endpoint

Specify the hosted zone ID for your interface endpoint. You can get the value of HostedZoneId using the AWS CLI command describe-vpc-endpoints.

CloudFront distribution

Specify Z2FDTNDATAQYW2.

Alias resource record sets for CloudFront can't be created in a private zone.

Elastic Beanstalk environment

Specify the hosted zone ID for the region that you created the environment in. The environment must have a regionalized subdomain. For a list of regions and the corresponding hosted zone IDs, see AWS Elastic Beanstalk endpoints and quotas in the the Amazon Web Services General Reference.

ELB load balancer

Specify the value of the hosted zone ID for the load balancer. Use the following methods to get the hosted zone ID:

  • Elastic Load Balancing endpoints and quotas topic in the Amazon Web Services General Reference: Use the value that corresponds with the region that you created your load balancer in. Note that there are separate columns for Application and Classic Load Balancers and for Network Load Balancers.

  • AWS Management Console: Go to the Amazon EC2 page, choose Load Balancers in the navigation pane, select the load balancer, and get the value of the Hosted zone field on the Description tab.

  • Elastic Load Balancing API: Use DescribeLoadBalancers to get the applicable value. For more information, see the applicable guide:

  • AWS CLI: Use describe-load-balancers to get the applicable value. For more information, see the applicable guide:

AWS Global Accelerator accelerator

Specify Z2BJ6XQ5FK7U4H.

An Amazon S3 bucket configured as a static website

Specify the hosted zone ID for the region that you created the bucket in. For more information about valid values, see the table Amazon S3 Website Endpoints in the Amazon Web Services General Reference.

Another Route 53 resource record set in your hosted zone

Specify the hosted zone ID of your hosted zone. (An alias resource record set can't reference a resource record set in a different hosted zone.)

" }, "DNSName":{ "shape":"DNSName", @@ -1628,15 +1628,15 @@ }, "KeyManagementServiceArn":{ "shape":"SigningKeyString", - "documentation":"

The Amazon resource name (ARN) for a customer managed key (CMK) in AWS Key Management Service (KMS). The KeyManagementServiceArn must be unique for each key signing key (KSK) in a single hosted zone. To see an example of KeyManagementServiceArn that grants the correct permissions for DNSSEC, scroll down to Example.

You must configure the CMK as follows:

Status

Enabled

Key spec

ECC_NIST_P256

Key usage

Sign and verify

Key policy

The key policy must give permission for the following actions:

  • DescribeKey

  • GetPublicKey

  • Sign

The key policy must also include the Amazon Route 53 service in the principal for your account. Specify the following:

  • \"Service\": \"api-service.dnssec.route53.aws.internal\"

For more information about working with CMK in KMS, see AWS Key Management Service concepts.

" + "documentation":"

The Amazon resource name (ARN) for a customer managed customer master key (CMK) in AWS Key Management Service (AWS KMS). The KeyManagementServiceArn must be unique for each key-signing key (KSK) in a single hosted zone. To see an example of KeyManagementServiceArn that grants the correct permissions for DNSSEC, scroll down to Example.

You must configure the customer managed CMK as follows:

Status

Enabled

Key spec

ECC_NIST_P256

Key usage

Sign and verify

Key policy

The key policy must give permission for the following actions:

  • DescribeKey

  • GetPublicKey

  • Sign

The key policy must also include the Amazon Route 53 service in the principal for your account. Specify the following:

  • \"Service\": \"api-service.dnssec.route53.aws.internal\"

For more information about working with a customer managed CMK in AWS KMS, see AWS Key Management Service concepts.

" }, "Name":{ "shape":"SigningKeyName", - "documentation":"

An alphanumeric string used to identify a key signing key (KSK). Name must be unique for each key signing key in the same hosted zone.

" + "documentation":"

A string used to identify a key-signing key (KSK). Name can include numbers, letters, and underscores (_). Name must be unique for each key-signing key in the same hosted zone.

" }, "Status":{ "shape":"SigningKeyStatus", - "documentation":"

A string specifying the initial status of the key signing key (KSK). You can set the value to ACTIVE or INACTIVE.

" + "documentation":"

A string specifying the initial status of the key-signing key (KSK). You can set the value to ACTIVE or INACTIVE.

" } } }, @@ -1651,11 +1651,11 @@ "ChangeInfo":{"shape":"ChangeInfo"}, "KeySigningKey":{ "shape":"KeySigningKey", - "documentation":"

The key signing key (KSK) that the request creates.

" + "documentation":"

The key-signing key (KSK) that the request creates.

" }, "Location":{ "shape":"ResourceURI", - "documentation":"

The unique URL representing the new key signing key (KSK).

", + "documentation":"

The unique URL representing the new key-signing key (KSK).

", "location":"header", "locationName":"Location" } @@ -1926,7 +1926,7 @@ "members":{ "ServeSignature":{ "shape":"ServeSignature", - "documentation":"

Indicates your hosted zone signging status: SIGNING, NOT_SIGNING, or INTERNAL_FAILURE. If the status is INTERNAL_FAILURE, see StatusMessage for information about steps that you can take to correct the problem.

A status INTERNAL_FAILURE means there was an error during a request. Before you can continue to work with DNSSEC signing, including working with key signing keys (KSKs), you must correct the problem by enabling or disabling DNSSEC signing for the hosted zone.

" + "documentation":"

A string that represents the current hosted zone signing status.

Status can have one of the following values:

SIGNING

DNSSEC signing is enabled for the hosted zone.

NOT_SIGNING

DNSSEC signing is not enabled for the hosted zone.

DELETING

DNSSEC signing is in the process of being removed for the hosted zone.

ACTION_NEEDED

There is a problem with signing in the hosted zone that requires you to take action to resolve. For example, the customer managed customer master key (CMK) might have been deleted, or the permissions for the customer managed CMK might have been changed.

INTERNAL_FAILURE

There was an error during a request. Before you can continue to work with DNSSEC signing, including with key-signing keys (KSKs), you must correct the problem by enabling or disabling DNSSEC signing for the hosted zone.

" }, "StatusMessage":{ "shape":"SigningKeyStatusMessage", @@ -1950,7 +1950,7 @@ }, "Name":{ "shape":"SigningKeyName", - "documentation":"

An alphanumeric string used to identify a key signing key (KSK).

", + "documentation":"

A string used to identify a key-signing key (KSK).

", "location":"uri", "locationName":"Name" } @@ -2110,7 +2110,7 @@ }, "Name":{ "shape":"SigningKeyName", - "documentation":"

An alphanumeric string used to identify a key signing key (KSK).

", + "documentation":"

A string used to identify a key-signing key (KSK).

", "location":"uri", "locationName":"Name" } @@ -2419,7 +2419,7 @@ }, "SubdivisionCode":{ "shape":"GeoLocationSubdivisionCode", - "documentation":"

The code for the subdivision. Route 53 currently supports only states in the United States.

" + "documentation":"

The code for the subdivision, such as a particular state within the United States. For a list of US state abbreviations, see Appendix B: Two–Letter State and Possession Abbreviations on the United States Postal Service website. For a list of all supported subdivision codes, use the ListGeoLocations API.

" }, "SubdivisionName":{ "shape":"GeoLocationSubdivisionName", @@ -2542,7 +2542,7 @@ }, "KeySigningKeys":{ "shape":"KeySigningKeys", - "documentation":"

The key signing keys (KSKs) in your account.

" + "documentation":"

The key-signing keys (KSKs) in your account.

" } } }, @@ -2563,7 +2563,7 @@ }, "SubdivisionCode":{ "shape":"GeoLocationSubdivisionCode", - "documentation":"

For SubdivisionCode, Amazon Route 53 supports only states of the United States. For a list of state abbreviations, see Appendix B: Two–Letter State and Possession Abbreviations on the United States Postal Service website.

If you specify subdivisioncode, you must also specify US for CountryCode.

", + "documentation":"

The code for the subdivision, such as a particular state within the United States. For a list of US state abbreviations, see Appendix B: Two–Letter State and Possession Abbreviations on the United States Postal Service website. For a list of all supported subdivision codes, use the ListGeoLocations API.

", "location":"querystring", "locationName":"subdivisioncode" } @@ -2930,7 +2930,7 @@ "members":{ "Id":{ "shape":"HealthCheckId", - "documentation":"

The identifier that Amazon Route 53assigned to the health check when you created it. When you add or update a resource record set, you use this value to specify which health check to use. The value can be up to 64 characters long.

" + "documentation":"

The identifier that Amazon Route 53 assigned to the health check when you created it. When you add or update a resource record set, you use this value to specify which health check to use. The value can be up to 64 characters long.

" }, "CallerReference":{ "shape":"HealthCheckNonce", @@ -3428,7 +3428,7 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

The key signing key (KSK) name that you specified isn't a valid name.

", + "documentation":"

The key-signing key (KSK) name that you specified isn't a valid name.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -3437,7 +3437,7 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

The key signing key (KSK) status isn't valid or another KSK has the status INTERNAL_FAILURE.

", + "documentation":"

The key-signing key (KSK) status isn't valid or another KSK has the status INTERNAL_FAILURE.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -3489,15 +3489,15 @@ "members":{ "Name":{ "shape":"SigningKeyName", - "documentation":"

An alphanumeric string used to identify a key signing key (KSK). Name must be unique for each key signing key in the same hosted zone.

" + "documentation":"

A string used to identify a key-signing key (KSK). Name can include numbers, letters, and underscores (_). Name must be unique for each key-signing key in the same hosted zone.

" }, "KmsArn":{ "shape":"SigningKeyString", - "documentation":"

The Amazon resource name (ARN) used to identify the customer managed key (CMK) in AWS Key Management Service (KMS). The KmsArn must be unique for each key signing key (KSK) in a single hosted zone.

You must configure the CMK as follows:

Status

Enabled

Key spec

ECC_NIST_P256

Key usage

Sign and verify

Key policy

The key policy must give permission for the following actions:

  • DescribeKey

  • GetPublicKey

  • Sign

The key policy must also include the Amazon Route 53 service in the principal for your account. Specify the following:

  • \"Service\": \"api-service.dnssec.route53.aws.internal\"

For more information about working with the customer managed key (CMK) in KMS, see AWS Key Management Service concepts.

" + "documentation":"

The Amazon resource name (ARN) used to identify the customer managed customer master key (CMK) in AWS Key Management Service (AWS KMS). The KmsArn must be unique for each key-signing key (KSK) in a single hosted zone.

You must configure the CMK as follows:

Status

Enabled

Key spec

ECC_NIST_P256

Key usage

Sign and verify

Key policy

The key policy must give permission for the following actions:

  • DescribeKey

  • GetPublicKey

  • Sign

The key policy must also include the Amazon Route 53 service in the principal for your account. Specify the following:

  • \"Service\": \"api-service.dnssec.route53.aws.internal\"

For more information about working with the customer managed CMK in AWS KMS, see AWS Key Management Service concepts.

" }, "Flag":{ "shape":"SigningKeyInteger", - "documentation":"

An integer that specifies how the key is used. For key signing key (KSK), this value is always 257.

" + "documentation":"

An integer that specifies how the key is used. For key-signing key (KSK), this value is always 257.

" }, "SigningAlgorithmMnemonic":{ "shape":"SigningKeyString", @@ -3537,29 +3537,29 @@ }, "Status":{ "shape":"SigningKeyStatus", - "documentation":"

A string that represents the current key signing key (KSK) status.

Status can have one of the following values:

ACTIVE

The KSK is being used for signing.

INACTIVE

The KSK is not being used for signing.

ACTION_NEEDED

There is an error in the KSK that requires you to take action to resolve.

INTERNAL_FAILURE

There was an error during a request. Before you can continue to work with DNSSEC signing, including actions that involve this KSK, you must correct the problem. For example, you may need to activate or deactivate the KSK.

" + "documentation":"

A string that represents the current key-signing key (KSK) status.

Status can have one of the following values:

ACTIVE

The KSK is being used for signing.

INACTIVE

The KSK is not being used for signing.

DELETING

The KSK is in the process of being deleted.

ACTION_NEEDED

There is a problem with the KSK that requires you to take action to resolve. For example, the customer managed customer master key (CMK) might have been deleted, or the permissions for the customer managed CMK might have been changed.

INTERNAL_FAILURE

There was an error during a request. Before you can continue to work with DNSSEC signing, including actions that involve this KSK, you must correct the problem. For example, you may need to activate or deactivate the KSK.

" }, "StatusMessage":{ "shape":"SigningKeyStatusMessage", - "documentation":"

The status message provided for the following key signing key (KSK) statuses: ACTION_NEEDED or INTERNAL_FAILURE. The status message includes information about what the problem might be and steps that you can take to correct the issue.

" + "documentation":"

The status message provided for the following key-signing key (KSK) statuses: ACTION_NEEDED or INTERNAL_FAILURE. The status message includes information about what the problem might be and steps that you can take to correct the issue.

" }, "CreatedDate":{ "shape":"TimeStamp", - "documentation":"

The date when the key signing key (KSK) was created.

" + "documentation":"

The date when the key-signing key (KSK) was created.

" }, "LastModifiedDate":{ "shape":"TimeStamp", - "documentation":"

The last time that the key signing key (KSK) was changed.

" + "documentation":"

The last time that the key-signing key (KSK) was changed.

" } }, - "documentation":"

A key signing key (KSK) is a complex type that represents a public/private key pair. The private key is used to generate a digital signature for the zone signing key (ZSK). The public key is stored in the DNS and is used to authenticate the ZSK. A KSK is always associated with a hosted zone; it cannot exist by itself.

" + "documentation":"

A key-signing key (KSK) is a complex type that represents a public/private key pair. The private key is used to generate a digital signature for the zone signing key (ZSK). The public key is stored in the DNS and is used to authenticate the ZSK. A KSK is always associated with a hosted zone; it cannot exist by itself.

" }, "KeySigningKeyAlreadyExists":{ "type":"structure", "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

You've already created a key signing key (KSK) with this name or with the same customer managed key (CMK) ARN.

", + "documentation":"

You've already created a key-signing key (KSK) with this name or with the same customer managed customer master key (CMK) ARN.

", "error":{"httpStatusCode":409}, "exception":true }, @@ -3568,7 +3568,7 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

The key signing key (KSK) is specified in a parent DS record.

", + "documentation":"

The key-signing key (KSK) is specified in a parent DS record.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -3577,7 +3577,7 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

The key signing key (KSK) that you specified can't be deactivated because it's the only KSK for a currently-enabled DNSSEC. Disable DNSSEC signing, or add or enable another KSK.

", + "documentation":"

The key-signing key (KSK) that you specified can't be deactivated because it's the only KSK for a currently-enabled DNSSEC. Disable DNSSEC signing, or add or enable another KSK.

", "exception":true }, "KeySigningKeyWithActiveStatusNotFound":{ @@ -3585,7 +3585,7 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

A key signing key (KSK) with ACTIVE status wasn't found.

", + "documentation":"

A key-signing key (KSK) with ACTIVE status wasn't found.

", "exception":true }, "KeySigningKeys":{ @@ -4589,7 +4589,7 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

The specified key signing key (KSK) doesn't exist.

", + "documentation":"

The specified key-signing key (KSK) doesn't exist.

", "error":{"httpStatusCode":404}, "exception":true }, @@ -4822,7 +4822,7 @@ }, "Type":{ "shape":"RRType", - "documentation":"

The DNS record type. For information about different record types and how data is encoded for them, see Supported DNS Resource Record Types in the Amazon Route 53 Developer Guide.

Valid values for basic resource record sets: A | AAAA | CAA | CNAME | MX | NAPTR | NS | PTR | SOA | SPF | SRV | TXT

Values for weighted, latency, geolocation, and failover resource record sets: A | AAAA | CAA | CNAME | MX | NAPTR | PTR | SPF | SRV | TXT. When creating a group of weighted, latency, geolocation, or failover resource record sets, specify the same value for all of the resource record sets in the group.

Valid values for multivalue answer resource record sets: A | AAAA | MX | NAPTR | PTR | SPF | SRV | TXT

SPF records were formerly used to verify the identity of the sender of email messages. However, we no longer recommend that you create resource record sets for which the value of Type is SPF. RFC 7208, Sender Policy Framework (SPF) for Authorizing Use of Domains in Email, Version 1, has been updated to say, \"...[I]ts existence and mechanism defined in [RFC4408] have led to some interoperability issues. Accordingly, its use is no longer appropriate for SPF version 1; implementations are not to use it.\" In RFC 7208, see section 14.1, The SPF DNS Record Type.

Values for alias resource record sets:

  • Amazon API Gateway custom regional APIs and edge-optimized APIs: A

  • CloudFront distributions: A

    If IPv6 is enabled for the distribution, create two resource record sets to route traffic to your distribution, one with a value of A and one with a value of AAAA.

  • Amazon API Gateway environment that has a regionalized subdomain: A

  • ELB load balancers: A | AAAA

  • Amazon S3 buckets: A

  • Amazon Virtual Private Cloud interface VPC endpoints A

  • Another resource record set in this hosted zone: Specify the type of the resource record set that you're creating the alias for. All values are supported except NS and SOA.

    If you're creating an alias record that has the same name as the hosted zone (known as the zone apex), you can't route traffic to a record for which the value of Type is CNAME. This is because the alias record must have the same type as the record you're routing traffic to, and creating a CNAME record for the zone apex isn't supported even for an alias record.

" + "documentation":"

The DNS record type. For information about different record types and how data is encoded for them, see Supported DNS Resource Record Types in the Amazon Route 53 Developer Guide.

Valid values for basic resource record sets: A | AAAA | CAA | CNAME | DS |MX | NAPTR | NS | PTR | SOA | SPF | SRV | TXT

Values for weighted, latency, geolocation, and failover resource record sets: A | AAAA | CAA | CNAME | MX | NAPTR | PTR | SPF | SRV | TXT. When creating a group of weighted, latency, geolocation, or failover resource record sets, specify the same value for all of the resource record sets in the group.

Valid values for multivalue answer resource record sets: A | AAAA | MX | NAPTR | PTR | SPF | SRV | TXT

SPF records were formerly used to verify the identity of the sender of email messages. However, we no longer recommend that you create resource record sets for which the value of Type is SPF. RFC 7208, Sender Policy Framework (SPF) for Authorizing Use of Domains in Email, Version 1, has been updated to say, \"...[I]ts existence and mechanism defined in [RFC4408] have led to some interoperability issues. Accordingly, its use is no longer appropriate for SPF version 1; implementations are not to use it.\" In RFC 7208, see section 14.1, The SPF DNS Record Type.

Values for alias resource record sets:

  • Amazon API Gateway custom regional APIs and edge-optimized APIs: A

  • CloudFront distributions: A

    If IPv6 is enabled for the distribution, create two resource record sets to route traffic to your distribution, one with a value of A and one with a value of AAAA.

  • Amazon API Gateway environment that has a regionalized subdomain: A

  • ELB load balancers: A | AAAA

  • Amazon S3 buckets: A

  • Amazon Virtual Private Cloud interface VPC endpoints A

  • Another resource record set in this hosted zone: Specify the type of the resource record set that you're creating the alias for. All values are supported except NS and SOA.

    If you're creating an alias record that has the same name as the hosted zone (known as the zone apex), you can't route traffic to a record for which the value of Type is CNAME. This is because the alias record must have the same type as the record you're routing traffic to, and creating a CNAME record for the zone apex isn't supported even for an alias record.

" }, "SetIdentifier":{ "shape":"ResourceRecordSetIdentifier", @@ -5236,7 +5236,7 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

You've reached the limit for the number of key signing keys (KSKs). Remove at least one KSK, and then try again.

", + "documentation":"

You've reached the limit for the number of key-signing keys (KSKs). Remove at least one KSK, and then try again.

", "exception":true }, "TooManyTrafficPolicies":{ diff --git a/botocore/data/s3control/2018-08-20/service-2.json b/botocore/data/s3control/2018-08-20/service-2.json index f313c3d0..ea443762 100644 --- a/botocore/data/s3control/2018-08-20/service-2.json +++ b/botocore/data/s3control/2018-08-20/service-2.json @@ -61,7 +61,7 @@ {"shape":"IdempotencyException"}, {"shape":"InternalServiceException"} ], - "documentation":"

S3 Batch Operations performs large-scale Batch Operations on Amazon S3 objects. Batch Operations can run a single operation or action on lists of Amazon S3 objects that you specify. For more information, see S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

This operation creates an S3 Batch Operations job.

Related actions include:

", + "documentation":"

You can use S3 Batch Operations to perform large-scale batch operations on Amazon S3 objects. Batch Operations can run a single operation on lists of Amazon S3 objects that you specify. For more information, see S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

This operation creates a S3 Batch Operations job.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -176,7 +176,7 @@ "requestUri":"/v20180820/storagelens/{storagelensid}" }, "input":{"shape":"DeleteStorageLensConfigurationRequest"}, - "documentation":"

Deletes the Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Working with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

To use this action, you must have permission to perform the s3:DeleteStorageLensConfiguration action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

Deletes the Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

To use this action, you must have permission to perform the s3:DeleteStorageLensConfiguration action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -189,7 +189,7 @@ }, "input":{"shape":"DeleteStorageLensConfigurationTaggingRequest"}, "output":{"shape":"DeleteStorageLensConfigurationTaggingResult"}, - "documentation":"

Deletes the Amazon S3 Storage Lens configuration tags. For more information about S3 Storage Lens, see Working with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

To use this action, you must have permission to perform the s3:DeleteStorageLensConfigurationTagging action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

Deletes the Amazon S3 Storage Lens configuration tags. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

To use this action, you must have permission to perform the s3:DeleteStorageLensConfigurationTagging action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -346,7 +346,7 @@ }, "input":{"shape":"GetStorageLensConfigurationRequest"}, "output":{"shape":"GetStorageLensConfigurationResult"}, - "documentation":"

Gets the Amazon S3 Storage Lens configuration. For more information, see Working with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

To use this action, you must have permission to perform the s3:GetStorageLensConfiguration action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

Gets the Amazon S3 Storage Lens configuration. For more information, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

To use this action, you must have permission to perform the s3:GetStorageLensConfiguration action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -359,7 +359,7 @@ }, "input":{"shape":"GetStorageLensConfigurationTaggingRequest"}, "output":{"shape":"GetStorageLensConfigurationTaggingResult"}, - "documentation":"

Gets the tags of Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Working with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

To use this action, you must have permission to perform the s3:GetStorageLensConfigurationTagging action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

Gets the tags of Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

To use this action, you must have permission to perform the s3:GetStorageLensConfigurationTagging action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -416,7 +416,7 @@ }, "input":{"shape":"ListStorageLensConfigurationsRequest"}, "output":{"shape":"ListStorageLensConfigurationsResult"}, - "documentation":"

Gets a list of Amazon S3 Storage Lens configurations. For more information about S3 Storage Lens, see Working with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

To use this action, you must have permission to perform the s3:ListStorageLensConfigurations action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

Gets a list of Amazon S3 Storage Lens configurations. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

To use this action, you must have permission to perform the s3:ListStorageLensConfigurations action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -543,7 +543,7 @@ "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, "output":{"shape":"PutStorageLensConfigurationTaggingResult"}, - "documentation":"

Put or replace tags on an existing Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Working with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

To use this action, you must have permission to perform the s3:PutStorageLensConfigurationTagging action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

Put or replace tags on an existing Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

To use this action, you must have permission to perform the s3:PutStorageLensConfigurationTagging action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -1897,7 +1897,7 @@ "members":{ "ObjectArn":{ "shape":"S3KeyArnString", - "documentation":"

The Amazon Resource Name (ARN) for a manifest object.

" + "documentation":"

The Amazon Resource Name (ARN) for a manifest object.

Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.

" }, "ObjectVersionId":{ "shape":"S3ObjectVersionId", @@ -1958,6 +1958,11 @@ "documentation":"

Directs the specified job to run a PUT Object tagging call on every object in the manifest.

", "box":true }, + "S3DeleteObjectTagging":{ + "shape":"S3DeleteObjectTaggingOperation", + "documentation":"

Directs the specified job to execute a DELETE Object tagging call on every object in the manifest.

", + "box":true + }, "S3InitiateRestoreObject":{ "shape":"S3InitiateRestoreObjectOperation", "documentation":"

Directs the specified job to initiate restore requests for every archived object in the manifest.

", @@ -2183,7 +2188,7 @@ "members":{ "Prefix":{ "shape":"Prefix", - "documentation":"

Prefix identifying one or more objects to which the rule applies.

" + "documentation":"

Prefix identifying one or more objects to which the rule applies.

Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.

" }, "Tag":{"shape":"S3Tag"}, "And":{ @@ -2491,6 +2496,7 @@ "S3PutObjectCopy", "S3PutObjectAcl", "S3PutObjectTagging", + "S3DeleteObjectTagging", "S3InitiateRestoreObject", "S3PutObjectLegalHold", "S3PutObjectRetention" @@ -3028,6 +3034,12 @@ }, "documentation":"

Contains the configuration parameters for a PUT Copy object operation. S3 Batch Operations passes every object to the underlying PUT Copy object API. For more information about the parameters for this operation, see PUT Object - Copy.

" }, + "S3DeleteObjectTaggingOperation":{ + "type":"structure", + "members":{ + }, + "documentation":"

Contains no configuration parameters because the DELETE Object tagging API only accepts the bucket name and key name as parameters, which are defined in the job's manifest.

" + }, "S3ExpirationInDays":{ "type":"integer", "min":0 @@ -3449,7 +3461,7 @@ "members":{ "S3BucketDestination":{ "shape":"S3BucketDestination", - "documentation":"

A container for the bucket where the S3 Storage Lens metrics export will be located.

" + "documentation":"

A container for the bucket where the S3 Storage Lens metrics export will be located.

This bucket must be located in the same Region as the storage lens configuration.

" } }, "documentation":"

A container to specify the properties of your S3 Storage Lens metrics export, including the destination, schema, and format.

" diff --git a/botocore/utils.py b/botocore/utils.py index 3eebfeb7..468b4ae6 100644 --- a/botocore/utils.py +++ b/botocore/utils.py @@ -1543,13 +1543,6 @@ class S3EndpointSetter(object): return 's3_accesspoint' in request.context def _validate_accesspoint_supported(self, request): - if self._endpoint_url: - raise UnsupportedS3AccesspointConfigurationError( - msg=( - 'Client cannot use a custom "endpoint_url" when ' - 'specifying an access-point ARN.' - ) - ) if self._use_accelerate_endpoint: raise UnsupportedS3AccesspointConfigurationError( msg=( @@ -1609,19 +1602,26 @@ class S3EndpointSetter(object): accesspoint_netloc_components = [ '%s-%s' % (s3_accesspoint['name'], s3_accesspoint['account']), ] - if 'outpost_name' in s3_accesspoint: - outpost_host = [s3_accesspoint['outpost_name'], 's3-outposts'] - accesspoint_netloc_components.extend(outpost_host) + outpost_name = s3_accesspoint.get('outpost_name') + if self._endpoint_url: + if outpost_name: + accesspoint_netloc_components.append(outpost_name) + endpoint_url_netloc = urlsplit(self._endpoint_url).netloc + accesspoint_netloc_components.append(endpoint_url_netloc) else: - accesspoint_netloc_components.append('s3-accesspoint') - if self._s3_config.get('use_dualstack_endpoint'): - accesspoint_netloc_components.append('dualstack') - accesspoint_netloc_components.extend( - [ - region_name, - self._get_dns_suffix(region_name) - ] - ) + if outpost_name: + outpost_host = [outpost_name, 's3-outposts'] + accesspoint_netloc_components.extend(outpost_host) + else: + accesspoint_netloc_components.append('s3-accesspoint') + if self._s3_config.get('use_dualstack_endpoint'): + accesspoint_netloc_components.append('dualstack') + accesspoint_netloc_components.extend( + [ + region_name, + self._get_dns_suffix(region_name) + ] + ) return '.'.join(accesspoint_netloc_components) def _get_accesspoint_path(self, original_path, request_context): @@ -1770,7 +1770,6 @@ class S3ControlEndpointSetter(object): return 'outpost_id' in request.context def _validate_endpoint_from_arn_details_supported(self, request): - self._validate_no_custom_endpoint() if not self._s3_config.get('use_arn_region', False): arn_region = request.context['arn_details']['region'] if arn_region != self._region: @@ -1797,17 +1796,7 @@ class S3ControlEndpointSetter(object): if 'outpost_name' in request.context['arn_details']: self._validate_outpost_redirection_valid(request) - def _validate_no_custom_endpoint(self): - if self._endpoint_url: - raise UnsupportedS3ControlConfigurationError( - msg=( - 'Client cannot use a custom "endpoint_url" when ' - 'specifying a resource ARN.' - ) - ) - def _validate_outpost_redirection_valid(self, request): - self._validate_no_custom_endpoint() if self._s3_config.get('use_dualstack_endpoint'): raise UnsupportedS3ControlConfigurationError( msg=( @@ -1865,22 +1854,29 @@ class S3ControlEndpointSetter(object): def _construct_s3_control_endpoint(self, region_name, account): self._validate_host_labels(region_name, account) - netloc = [ - account, - 's3-control', - ] - self._add_dualstack(netloc) - dns_suffix = self._get_dns_suffix(region_name) - netloc.extend([region_name, dns_suffix]) + if self._endpoint_url: + endpoint_url_netloc = urlsplit(self._endpoint_url).netloc + netloc = [account, endpoint_url_netloc] + else: + netloc = [ + account, + 's3-control', + ] + self._add_dualstack(netloc) + dns_suffix = self._get_dns_suffix(region_name) + netloc.extend([region_name, dns_suffix]) return self._construct_netloc(netloc) def _construct_outpost_endpoint(self, region_name): self._validate_host_labels(region_name) - netloc = [ - 's3-outposts', - region_name, - self._get_dns_suffix(region_name), - ] + if self._endpoint_url: + return urlsplit(self._endpoint_url).netloc + else: + netloc = [ + 's3-outposts', + region_name, + self._get_dns_suffix(region_name), + ] return self._construct_netloc(netloc) def _construct_netloc(self, netloc): diff --git a/docs/source/_static/shortbreadv1.js b/docs/source/_static/shortbreadv1.js deleted file mode 100644 index abc5bf52..00000000 --- a/docs/source/_static/shortbreadv1.js +++ /dev/null @@ -1,2 +0,0 @@ -!function(e,a){if("object"==typeof exports&&"object"==typeof module)module.exports=a();else if("function"==typeof define&&define.amd)define([],a);else{var t=a();for(var n in t)("object"==typeof exports?exports:e)[n]=t[n]}}(window,(function(){return function(e){var a={};function t(n){if(a[n])return a[n].exports;var c=a[n]={i:n,l:!1,exports:{}};return e[n].call(c.exports,c,c.exports,t),c.l=!0,c.exports}return t.m=e,t.c=a,t.d=function(e,a,n){t.o(e,a)||Object.defineProperty(e,a,{enumerable:!0,get:n})},t.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},t.t=function(e,a){if(1&a&&(e=t(e)),8&a)return e;if(4&a&&"object"==typeof e&&e&&e.__esModule)return e;var n=Object.create(null);if(t.r(n),Object.defineProperty(n,"default",{enumerable:!0,value:e}),2&a&&"string"!=typeof e)for(var c in e)t.d(n,c,function(a){return e[a]}.bind(null,c));return n},t.n=function(e){var a=e&&e.__esModule?function(){return e.default}:function(){return e};return t.d(a,"a",a),a},t.o=function(e,a){return Object.prototype.hasOwnProperty.call(e,a)},t.p="",t(t.s=13)}([function(e,a,t){"use strict";var n=this&&this.__assign||function(){return(n=Object.assign||function(e){for(var a,t=1,n=arguments.length;t0&&s.forEach((function(a){if("string"==typeof a)l.appendChild(n.createTextNode(a));else if("number"==typeof a)l.appendChild(n.createTextNode(s.toString()));else{if(null===a)throw Error("Unsupported child type "+a);e(l,a,n,!0)}})),i?a.appendChild(l):a.insertBefore(l,a.firstChild)}},function(e,a,t){"use strict";var n,c=function(){return void 0===n&&(n=Boolean(window&&document&&document.all&&!window.atob)),n},i=function(){var e={};return function(a){if(void 0===e[a]){var t=document.querySelector(a);if(window.HTMLIFrameElement&&t instanceof window.HTMLIFrameElement)try{t=t.contentDocument.head}catch(e){t=null}e[a]=t}return e[a]}}(),o=[];function r(e){for(var a=-1,t=0;t-1?"awsccc-Rtl":"";function b(){return document.querySelector("div[data-id="+r.BANNER_ID+"]")}function f(){return document.querySelector("div[data-id="+r.CUSTOMIZE_ID+"]")}function h(e,a){var t=document.querySelector("label[data-id=awsccc-u-cb-"+e+"-label]"),n=t.classList,c=t.querySelector("input");a?(c.setAttribute("checked",""),n.add("awsccc-u-cb-checkbox-active")):(n.remove("awsccc-u-cb-checkbox-active"),c.removeAttribute("checked")),c.setAttribute("aria-checked",""+a)}var g=function(e){var a=e.event,t=e.category;"checkbox"===a.target.getAttribute("type")&&h(t,!p(t))},m=function(a){return function(t,n){var c=b().querySelector("div[data-id=awsccc-cb-tabstart]");document.querySelector("div[data-id="+r.CUSTOMIZE_ID+"]").style.cssText="display: none",b().style.cssText="display: none",c.setAttribute("tabindex","-1"),e.onSaveConsent(t),document.body.classList.remove("awsccc-cs-modal-open"),e.log("info")(a,{detail:"Save Consent Clicked",source:n,cookie:e.getConsentCookie()})}},k=function(e){"Escape"===e.key&&x()},v=function(){return e.getConsentCookie()||u.DEFAULT_COOKIE},w=function(a){var t;t=v(),o.COOKIE_CATEGORIES.filter((function(e){return e!==o.ESSENTIAL})).forEach((function(e){h(e,t[e])})),f().addEventListener("keydown",k),f().style.cssText="display: block",document.body.classList.add("awsccc-cs-modal-open");var n=document.querySelectorAll("div[data-id="+r.TABTRAP_ID+"]");l.convertToArray(n).forEach((function(e,a){0===a&&e.focus({preventScroll:!0}),e.setAttribute("tabindex","0")})),e.log("info")("customizeCookies",{detail:"Customize Consent Clicked",source:a,cookie:e.getConsentCookie()})},x=function(){f().removeEventListener("keydown",k),f().style.cssText="display: none",document.body.classList.remove("awsccc-cs-modal-open");var e=f().querySelectorAll("div[data-id="+r.TABTRAP_ID+"]");(l.convertToArray(e).forEach((function(e){e.setAttribute("tabindex","-1")})),"block"===b().style.display)&&b().querySelector("div[data-id=awsccc-cb-tabstart]").focus({preventScroll:!0})};return d.default((function(){document.querySelector("#"+r.CONTAINER_ID)||n.render(e.parent||document.body,n.act("div",{id:r.CONTAINER_ID},n.act("div",{id:r.APP_ID,class:t},n.act(c.default,{showConsentSelector:w,handleSaveClick:m("acceptAll"),localizedText:a.consentBanner}),n.act(i.default,{consentState:v(),handleSaveClick:m("customize"),handleCheckboxToggle:g,localizedText:a.consentSelector,closeConsentSelector:x}))))})),{showConsentSelector:function(e){d.default((function(){w(e)}))},showBanner:function(e){d.default((function(){var a;a=b().querySelector("div[data-id=awsccc-cb-tabstart]"),b().style.cssText="display: block",a.setAttribute("tabindex","0"),a.focus({preventScroll:!0}),e()}))}}}a.isChecked=p,a.default={createShortbreadUi:function(e){return b(e)}}},function(e,a,t){"use strict";var n=this&&this.__assign||function(){return(n=Object.assign||function(e){for(var a,t=1,n=arguments.length;t0)try{var o=JSON.parse(atob(c[c.length-1]));return 1===(n=o).e&&"number"==typeof n.p&&"number"==typeof n.f&&"number"==typeof n.a&&"string"==typeof n.i&&"string"==typeof n.v?{essential:1===(t=o).e,performance:1===t.p,functional:1===t.f,advertising:1===t.a,id:t.i,version:t.v}:void i("getCookie",{detail:"Cookie format is not valid",cookie:o})}catch(e){return void i("getCookie",{detail:"Error parsing cookie",cookie:c[c.length-1]})}}function r(e){void 0===e&&(e=function(){return document.cookie});var a=o(e());if(a&&a.id)return a.id}function s(e){document.cookie=e}a.getConsentCookie=function(e,a){void 0===e&&(e=function(){return document.cookie});var t=o(e(),a);if(t)return{essential:t.essential,performance:t.performance,functional:t.functional,advertising:t.advertising}},a.setConsentCookie=function(e,a,t,o,l){void 0===a&&(a=".aws.amazon.com"),void 0===t&&(t=i.DEFAULT_COOKIE_AGE),void 0===o&&(o=c.v4),void 0===l&&(l=s);var u,d=n(n({},e),{id:r()||o(),version:i.COOKIE_VERSION}),p={e:(u=d).essential?1:0,p:u.performance?1:0,f:u.functional?1:0,a:u.advertising?1:0,i:u.id,v:u.version};return l("awsccc="+btoa(JSON.stringify(p))+"; domain="+a+"; path=/; max-age="+t+"; secure=true; SameSite=Lax"),d}},function(e,a,t){"use strict";Object.defineProperty(a,"__esModule",{value:!0}),a.queryGeolocationByHttpGetRequest=a.timestampUrl=a.QUERY_PARAM_KEY=void 0;var n=t(11);a.QUERY_PARAM_KEY="awsccc",a.timestampUrl=function(e){if(-1!==e.indexOf("?")){var t=e.split("?");e=t[0]+"?"+a.QUERY_PARAM_KEY+"="+Date.now()+"&"+t[1]}else{if(-1===e.indexOf("#"))return e+"?"+a.QUERY_PARAM_KEY+"="+Date.now();t=e.split("#");e=t[0]+"?"+a.QUERY_PARAM_KEY+"="+Date.now()+"#"+t[1]}return e},a.queryGeolocationByHttpGetRequest=function(e,t,c){function i(a,t,n,c,i){a("info")("geolocationLatency",{metric:t,region:n,detail:c,url:e,status:i.status})}return void 0===e&&(e="https://prod.tools.shortbread.aws.dev/1x1.png"),void 0===t&&(t=5e3),void 0===c&&(c=n.DEFAULT_LOGGER),function(o,r){void 0===r&&(r=c||n.DEFAULT_LOGGER);var s=Date.now(),l=new XMLHttpRequest;l.addEventListener("load",(function(){var e=403===l.status?"NON-EU":"EU";i(r,Date.now()-s,e,"Geolocation Response Received",l),o(e)})),l.addEventListener("timeout",(function(){o("EU");var a="Geolocation Request Timed out";i(r,t,"EU",a,l),r("error")("geolocationRequestTimeout",{url:e,timeoutSetting:t,detail:a})})),l.open("GET",a.timestampUrl(e)),l.timeout=t,l.send()}},a.default=a.queryGeolocationByHttpGetRequest},function(e,a,t){"use strict";var n=this&&this.__assign||function(){return(n=Object.assign||function(e){for(var a,t=1,n=arguments.length;td)&&void 0===e.nsecs&&(h=0),h>=1e4)throw new Error("uuid.v1(): Can't create more than 10M uuids/sec");d=f,p=h,l=r;var m=(1e4*(268435455&(f+=122192928e5))+h)%4294967296;c[n++]=m>>>24&255,c[n++]=m>>>16&255,c[n++]=m>>>8&255,c[n++]=255&m;var k=f/4294967296*1e4&268435455;c[n++]=k>>>8&255,c[n++]=255&k,c[n++]=k>>>24&15|16,c[n++]=k>>>16&255,c[n++]=r>>>8|128,c[n++]=255&r;for(var v=0;v<6;++v)c[n+v]=o[v];return a||u(c)};var f=function(e,a,t){function n(e,n,c,i){var o=c&&i||0;if("string"==typeof e&&(e=function(e){e=unescape(encodeURIComponent(e));for(var a=[],t=0;t>>9<<4)+1}function g(e,a){var t=(65535&e)+(65535&a);return(e>>16)+(a>>16)+(t>>16)<<16|65535&t}function m(e,a,t,n,c,i){return g((o=g(g(a,e),g(n,i)))<<(r=c)|o>>>32-r,t);var o,r}function k(e,a,t,n,c,i,o){return m(a&t|~a&n,e,a,c,i,o)}function v(e,a,t,n,c,i,o){return m(a&n|t&~n,e,a,c,i,o)}function w(e,a,t,n,c,i,o){return m(a^t^n,e,a,c,i,o)}function x(e,a,t,n,c,i,o){return m(t^(a|~n),e,a,c,i,o)}var y=f("v3",48,(function(e){if("string"==typeof e){var a=unescape(encodeURIComponent(e));e=new Uint8Array(a.length);for(var t=0;t>5]>>>n%32&255,i=parseInt("0123456789abcdef".charAt(c>>>4&15)+"0123456789abcdef".charAt(15&c),16);a.push(i)}return a}(function(e,a){e[a>>5]|=128<>5]|=(255&e[n/8])<>>32-a}var _=f("v5",80,(function(e){var a=[1518500249,1859775393,2400959708,3395469782],t=[1732584193,4023233417,2562383102,271733878,3285377520];if("string"==typeof e){var n=unescape(encodeURIComponent(e));e=[];for(var c=0;c>>0;v=k,k=m,m=A(g,30)>>>0,g=h,h=y}t[0]=t[0]+h>>>0,t[1]=t[1]+g>>>0,t[2]=t[2]+m>>>0,t[3]=t[3]+k>>>0,t[4]=t[4]+v>>>0}return[t[0]>>24&255,t[0]>>16&255,t[0]>>8&255,255&t[0],t[1]>>24&255,t[1]>>16&255,t[1]>>8&255,255&t[1],t[2]>>24&255,t[2]>>16&255,t[2]>>8&255,255&t[2],t[3]>>24&255,t[3]>>16&255,t[3]>>8&255,255&t[3],t[4]>>24&255,t[4]>>16&255,t[4]>>8&255,255&t[4]]}))},function(e,a,t){e.exports=t(14)},function(e,a,t){"use strict";var n=this&&this.__createBinding||(Object.create?function(e,a,t,n){void 0===n&&(n=t),Object.defineProperty(e,n,{enumerable:!0,get:function(){return a[t]}})}:function(e,a,t,n){void 0===n&&(n=t),e[n]=a[t]}),c=this&&this.__exportStar||function(e,a){for(var t in e)"default"===t||a.hasOwnProperty(t)||n(a,e,t)};Object.defineProperty(a,"__esModule",{value:!0});var i=t(15);Object.defineProperty(a,"AWSCShortbread",{enumerable:!0,get:function(){return i.AWSCShortbread}}),c(t(10),a),c(t(9),a)},function(e,a,t){"use strict";var n=this&&this.__assign||function(){return(n=Object.assign||function(e){for(var a,t=1,n=arguments.length;tspan{color:#687078}",""]),e.exports=a},function(e,a,t){"use strict";Object.defineProperty(a,"__esModule",{value:!0});var n=t(0),c=t(6),i=t(3);t(35);var o=t(5);a.default=function(e){var a=e.handleSaveClick,t=e.handleCancelClick,r=e.localizedText;return n.act("div",{id:"awsccc-cs-f-c"},n.act(c.default,{dataId:i.CUSTOMIZE_CANCEL_BTN_ID,variant:"secondary",events:{onclick:t},text:r["button-cancel"],props:{"aria-label":r["button-cancel-aria-label"]}}),n.act(c.default,{dataId:i.CUSTOMIZE_SAVE_BTN_ID,variant:"primary",events:{onclick:function(){a({essential:!0,performance:o.isChecked("performance"),functional:o.isChecked("functional"),advertising:o.isChecked("advertising")},"preferencesModal")}},text:r["button-save"],props:{"aria-label":r["button-save-aria-label"]}}))}},function(e,a,t){var n=t(1),c=t(36);"string"==typeof(c=c.__esModule?c.default:c)&&(c=[[e.i,c,""]]);var i={insert:"head",singleton:!1};n(c,i);e.exports=c.locals||{}},function(e,a,t){(a=t(2)(!1)).push([e.i,"#awsccc-sb-ux-c #awsccc-sb-a.awsccc-Rtl #awsccc-cs-f-c{text-align:left}#awsccc-sb-ux-c #awsccc-sb-a #awsccc-cs-f-c{text-decoration:none;padding:10px 20px;text-align:right;border-top:1px solid #eaeded;display:flex;justify-content:center;flex-wrap:wrap}#awsccc-sb-ux-c #awsccc-sb-a #awsccc-cs-f-c .awsccc-u-btn{margin-left:10px}#awsccc-sb-ux-c #awsccc-sb-a #awsccc-cs-f-c .awsccc-u-btn.awsccc-u-btn-secondary{background-color:#fff;border-color:#fff;color:#545b64;margin-bottom:6px}#awsccc-sb-ux-c #awsccc-sb-a #awsccc-cs-f-c .awsccc-u-btn.awsccc-u-btn-secondary:hover{color:#000;background-color:#fafafa}@media screen and (min-width: 700px){#awsccc-sb-ux-c #awsccc-sb-a #awsccc-cs-f-c{display:block}#awsccc-sb-ux-c #awsccc-sb-a #awsccc-cs-f-c .awsccc-u-btn.awsccc-u-btn-secondary{margin-bottom:0}}",""]),e.exports=a},function(e,a,t){var n=t(1),c=t(38);"string"==typeof(c=c.__esModule?c.default:c)&&(c=[[e.i,c,""]]);var i={insert:"head",singleton:!1};n(c,i);e.exports=c.locals||{}},function(e,a,t){(a=t(2)(!1)).push([e.i,"#awsccc-sb-ux-c #awsccc-sb-a #awsccc-cs-container{display:flex;align-items:center;justify-items:center;bottom:0;left:0;right:0;top:0;position:fixed;z-index:10002;outline:0;overflow:hidden}#awsccc-sb-ux-c #awsccc-sb-a #awsccc-cs-container-inner{max-width:820px;box-sizing:border-box;outline:none;margin:10px auto;width:calc(100vw - 20px)}#awsccc-sb-ux-c #awsccc-sb-a #awsccc-cs-content{background-color:#fff;border-radius:0;box-sizing:border-box;margin-bottom:0;word-wrap:break-word;box-shadow:0 1px 1px 0 rgba(0,28,36,.3),1px 1px 1px 0 rgba(0,28,36,.15),-1px 1px 1px 0 rgba(0,28,36,.15)}#awsccc-sb-ux-c #awsccc-sb-a #awsccc-cs-header{background-color:#fafafa;padding:19px 20px;border-bottom:1px solid #eaeded}#awsccc-sb-ux-c #awsccc-sb-a #awsccc-cs-title{min-width:0;word-break:break-word;color:#16191f;flex:auto}#awsccc-sb-ux-c #awsccc-sb-a #awsccc-cs-title h2{font-size:18px;font-weight:700;margin:0}#awsccc-sb-ux-c #awsccc-sb-a #awsccc-cs-modalBody{overflow-y:auto;max-height:calc(100vh - 200px);padding:19px 20px}#awsccc-sb-ux-c #awsccc-sb-a #awsccc-cs-modalOverlay{background-color:rgba(242,243,243,.9);position:fixed;z-index:10001;right:0;top:0;bottom:0;left:0}",""]),e.exports=a},function(e,a,t){"use strict";Object.defineProperty(a,"__esModule",{value:!0});var n=t(0);t(40),a.default=function(e){var a=e.size,t=void 0===a?"10px":a;return n.act("div",{class:"awsccc-u-i-open-c"},n.act("svg",{class:"awsccc-u-i-open",viewBox:"0 0 16 16",focusable:"false","aria-hidden":"true",style:{height:t,width:t}},n.act("path",{class:"awsccc-stroke-linecap-square",d:"M10 2h4v4"}),n.act("path",{d:"M6 10l8-8"}),n.act("path",{class:"awsccc-stroke-linejoin-round",d:"M14 9.048V14H2V2h5"})))}},function(e,a,t){var n=t(1),c=t(41);"string"==typeof(c=c.__esModule?c.default:c)&&(c=[[e.i,c,""]]);var i={insert:"head",singleton:!1};n(c,i);e.exports=c.locals||{}},function(e,a,t){(a=t(2)(!1)).push([e.i,"#awsccc-sb-ux-c #awsccc-sb-a .awsccc-u-i-open-c{display:inline-block;vertical-align:middle;line-height:1em;padding-left:.3em}#awsccc-sb-ux-c #awsccc-sb-a .awsccc-u-i-open-c svg{stroke-width:2px;pointer-events:none;fill:none;padding-bottom:1px}#awsccc-sb-ux-c #awsccc-sb-a .awsccc-u-i-open-c svg .awsccc-stroke-linecap-square{stroke-linecap:square}#awsccc-sb-ux-c #awsccc-sb-a .awsccc-u-i-open-c svg .awsccc-stroke-linejoin-round{stroke-linejoin:round}#awsccc-sb-ux-c #awsccc-sb-a .awsccc-u-i-open-c svg path{stroke:currentColor}",""]),e.exports=a},function(e,a,t){var n=t(1),c=t(43);"string"==typeof(c=c.__esModule?c.default:c)&&(c=[[e.i,c,""]]);var i={insert:"head",singleton:!1};n(c,i);e.exports=c.locals||{}},function(e,a,t){(a=t(2)(!1)).push([e.i,'#awsccc-sb-ux-c #awsccc-sb-a *{font-family:"Amazon Ember","HelveticaNeue","Helvetica Neue","Amazon Ember",Roboto,"Roboto-Regular","Amazon Ember",Helvetica,Arial,sans-serif;font-size:14px;line-height:20px;color:#16191f;text-align:left;background:none;border:0}#awsccc-sb-ux-c #awsccc-sb-a.awsccc-Rtl *{direction:rtl;text-align:right}#awsccc-sb-ux-c #awsccc-sb-a.awsccc-Rtl .awsccc-cs-s-container .awsccc-cs-s-action{right:auto;left:20px}@media screen and (min-width: 1020px){#awsccc-sb-ux-c #awsccc-sb-a.awsccc-Rtl #awsccc-cb-c #awsccc-cb-title{padding-right:40px}}#awsccc-sb-ux-c #awsccc-sb-a a,#awsccc-sb-ux-c #awsccc-sb-a a>span,#awsccc-sb-ux-c #awsccc-sb-a a svg path{color:#0073bb;text-decoration:none}#awsccc-sb-ux-c #awsccc-sb-a a:hover,#awsccc-sb-ux-c #awsccc-sb-a a>span:hover,#awsccc-sb-ux-c #awsccc-sb-a a svg path:hover{color:#0073bb;text-decoration:underline}#awsccc-sb-ux-c #awsccc-sb-a .awsccc-tab-helper{outline:0;text-decoration:none}.awsccc-cs-modal-open{overflow:hidden;-webkit-box-sizing:border-box;box-sizing:border-box}',""]),e.exports=a},function(e,a,t){"use strict";Object.defineProperty(a,"__esModule",{value:!0}),a.convertToArray=a.update=void 0,a.update=function(e,a){return Object.keys(a).forEach((function(t){e[t]=a[t]})),e},a.convertToArray=function(e){return Array.prototype.slice.call(e)}},function(e,a,t){"use strict";Object.defineProperty(a,"__esModule",{value:!0});a.default=function(e,a,t){function n(){a.removeEventListener("DOMContentLoaded",n),t.removeEventListener("load",n),e()}void 0===a&&(a=document),void 0===t&&(t=window),"loading"!==a.readyState?t.setTimeout(e):(a.addEventListener("DOMContentLoaded",n),t.addEventListener("load",n))}}])})); -//# sourceMappingURL=index.js.map \ No newline at end of file diff --git a/docs/source/conf.py b/docs/source/conf.py index 9acdcb94..b934a97d 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -52,9 +52,9 @@ copyright = u'2013, Mitch Garnaat' # built documents. # # The short X.Y version. -version = '1.19.' +version = '1.20' # The full version, including alpha/beta/rc tags. -release = '1.19.63' +release = '1.20.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/setup.cfg b/setup.cfg index 5b7d6cad..e680b271 100644 --- a/setup.cfg +++ b/setup.cfg @@ -5,8 +5,7 @@ universal = 1 requires-dist = python-dateutil>=2.1,<3.0.0 jmespath>=0.7.1,<1.0.0 - urllib3>=1.25.4,<1.25.8; python_version=='3.4' - urllib3>=1.25.4,<1.27; python_version!='3.4' + urllib3>=1.25.4,<1.27 [egg_info] tag_build = diff --git a/setup.py b/setup.py index 373533e0..90d68b6c 100644 --- a/setup.py +++ b/setup.py @@ -52,6 +52,7 @@ setup( install_requires=requires, extras_require={}, license="Apache License 2.0", + python_requires=">= 2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*", classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', @@ -62,8 +63,6 @@ setup( 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.4', - 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', diff --git a/tests/functional/test_s3.py b/tests/functional/test_s3.py index b7645125..b545bea9 100644 --- a/tests/functional/test_s3.py +++ b/tests/functional/test_s3.py @@ -562,12 +562,24 @@ class TestAccesspointArn(BaseS3ClientConfigurationTest): accesspoint_arn = ( 'arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint' ) - self.client, _ = self.create_stubbed_s3_client( + self.client, http_stubber = self.create_stubbed_s3_client( endpoint_url='https://custom.com') - with self.assertRaises( - botocore.exceptions. - UnsupportedS3AccesspointConfigurationError): - self.client.list_objects(Bucket=accesspoint_arn) + http_stubber.add_response() + self.client.list_objects(Bucket=accesspoint_arn) + expected_endpoint = 'myendpoint-123456789012.custom.com' + self.assert_endpoint(http_stubber.requests[0], expected_endpoint) + + def test_accesspoint_arn_with_custom_endpoint_and_dualstack(self): + accesspoint_arn = ( + 'arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint' + ) + self.client, http_stubber = self.create_stubbed_s3_client( + endpoint_url='https://custom.com', + config=Config(s3={'use_dualstack_endpoint': True})) + http_stubber.add_response() + self.client.list_objects(Bucket=accesspoint_arn) + expected_endpoint = 'myendpoint-123456789012.custom.com' + self.assert_endpoint(http_stubber.requests[0], expected_endpoint) def test_accesspoint_arn_with_s3_accelerate(self): accesspoint_arn = ( @@ -745,6 +757,24 @@ class TestAccesspointArn(BaseS3ClientConfigurationTest): ) self.assert_endpoint(request, expected_endpoint) + def test_basic_outpost_arn(self): + outpost_arn = ( + 'arn:aws:s3-outposts:us-west-2:123456789012:outpost:' + 'op-01234567890123456:accesspoint:myaccesspoint' + ) + self.client, self.http_stubber = self.create_stubbed_s3_client( + endpoint_url='https://custom.com', + region_name='us-east-1') + self.http_stubber.add_response() + self.client.list_objects(Bucket=outpost_arn) + request = self.http_stubber.requests[0] + self.assert_signing_name(request, 's3-outposts') + self.assert_signing_region(request, 'us-west-2') + expected_endpoint = ( + 'myaccesspoint-123456789012.op-01234567890123456.custom.com' + ) + self.assert_endpoint(request, expected_endpoint) + def test_outpost_arn_with_s3_accelerate(self): outpost_arn = ( 'arn:aws:s3-outposts:us-west-2:123456789012:outpost:' diff --git a/tests/functional/test_s3_control_redirects.py b/tests/functional/test_s3_control_redirects.py index 7dcf7f8a..b6e53204 100644 --- a/tests/functional/test_s3_control_redirects.py +++ b/tests/functional/test_s3_control_redirects.py @@ -420,9 +420,10 @@ class TestS3ControlRedirection(unittest.TestCase): def test_outpost_redirection_custom_endpoint(self): self._bootstrap_client(endpoint_url='https://outpost.foo.com/') self.stubber.add_response() - with self.assertRaises(UnsupportedS3ControlConfigurationError): - with self.stubber: - self.client.create_bucket(Bucket='foo', OutpostId='op-123') + with self.stubber: + self.client.create_bucket(Bucket='foo', OutpostId='op-123') + _assert_netloc(self.stubber, 'outpost.foo.com') + _assert_header(self.stubber, 'x-amz-outpost-id', 'op-123') def test_normal_ap_request_has_correct_endpoint(self): self.stubber.add_response() @@ -430,6 +431,13 @@ class TestS3ControlRedirection(unittest.TestCase): self.client.get_access_point_policy(Name='MyAp', AccountId='1234') _assert_netloc(self.stubber, '1234.s3-control.us-west-2.amazonaws.com') + def test_normal_ap_request_custom_endpoint(self): + self._bootstrap_client(endpoint_url='https://example.com/') + self.stubber.add_response() + with self.stubber: + self.client.get_access_point_policy(Name='MyAp', AccountId='1234') + _assert_netloc(self.stubber, '1234.example.com') + def test_normal_bucket_request_has_correct_endpoint(self): self.stubber.add_response() with self.stubber: diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index d55f03f8..0d4a93ba 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -10,6 +10,8 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. +import io + from tests import unittest from tests import RawResponse from dateutil.tz import tzutc, tzoffset @@ -918,6 +920,19 @@ class TestSwitchToVirtualHostStyle(unittest.TestCase): 'https://bucket.s3.amazonaws.com/key.txt') +class TestSwitchToChunkedEncodingForNonSeekableObjects(unittest.TestCase): + def test_switch_to_chunked_encodeing_for_stream_like_object(self): + request = AWSRequest( + method='POST', headers={}, + data=io.BufferedIOBase(b"some initial binary data"), + url='https://foo.amazonaws.com/bucket/key.txt' + ) + prepared_request = request.prepare() + self.assertEqual( + prepared_request.headers, {'Transfer-Encoding': 'chunked'} + ) + + class TestInstanceCache(unittest.TestCase): class DummyClass(object): def __init__(self, cache): @@ -1987,12 +2002,15 @@ class TestS3EndpointSetter(unittest.TestCase): ) self.assertEqual(request.url, expected_url) - def test_accesspoint_errors_for_custom_endpoint(self): + def test_accesspoint_supports_custom_endpoint(self): endpoint_setter = self.get_endpoint_setter( endpoint_url='https://custom.com') request = self.get_s3_accesspoint_request() - with self.assertRaises(UnsupportedS3AccesspointConfigurationError): - self.call_set_endpoint(endpoint_setter, request=request) + self.call_set_endpoint(endpoint_setter, request=request) + expected_url = 'https://%s-%s.custom.com/' % ( + self.accesspoint_name, self.account, + ) + self.assertEqual(request.url, expected_url) def test_errors_for_mismatching_partition(self): endpoint_setter = self.get_endpoint_setter(partition='aws-cn')