diff --git a/PKG-INFO b/PKG-INFO index b45fd7df..0fe5b674 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: botocore -Version: 1.15.21 +Version: 1.15.26 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services diff --git a/botocore.egg-info/PKG-INFO b/botocore.egg-info/PKG-INFO index b45fd7df..0fe5b674 100644 --- a/botocore.egg-info/PKG-INFO +++ b/botocore.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: botocore -Version: 1.15.21 +Version: 1.15.26 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services diff --git a/botocore/__init__.py b/botocore/__init__.py index 113458db..88ae6c5e 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re import logging -__version__ = '1.15.21' +__version__ = '1.15.26' class NullHandler(logging.Handler): diff --git a/botocore/data/acm/2015-12-08/service-2.json b/botocore/data/acm/2015-12-08/service-2.json index a748c481..103600b7 100644 --- a/botocore/data/acm/2015-12-08/service-2.json +++ b/botocore/data/acm/2015-12-08/service-2.json @@ -86,7 +86,7 @@ {"shape":"RequestInProgressException"}, {"shape":"InvalidArnException"} ], - "documentation":"

Retrieves a certificate specified by an ARN and its certificate chain . The chain is an ordered list of certificates that contains the end entity certificate, intermediate certificates of subordinate CAs, and the root certificate in that order. The certificate and certificate chain are base64 encoded. If you want to decode the certificate to see the individual fields, you can use OpenSSL.

" + "documentation":"

Retrieves an Amazon-issued certificate and its certificate chain. The chain consists of the certificate of the issuing CA and the intermediate certificates of any other subordinate CAs. All of the certificates are base64 encoded. You can use OpenSSL to decode the certificates and inspect individual fields.

" }, "ImportCertificate":{ "name":"ImportCertificate", @@ -498,7 +498,7 @@ }, "ResourceRecord":{ "shape":"ResourceRecord", - "documentation":"

Contains the CNAME record that you add to your DNS database for domain validation. For more information, see Use DNS to Validate Domain Ownership.

" + "documentation":"

Contains the CNAME record that you add to your DNS database for domain validation. For more information, see Use DNS to Validate Domain Ownership.

Note: The CNAME information that you need does not include the name of your domain. If you include
 your domain name in the DNS database CNAME record, validation fails.
 For example, if the name is \"_a79865eb4cd1a6ab990a45779b4e0b96.yourdomain.com\", only \"_a79865eb4cd1a6ab990a45779b4e0b96\" must be used.

" }, "ValidationMethod":{ "shape":"ValidationMethod", @@ -664,11 +664,11 @@ "members":{ "Certificate":{ "shape":"CertificateBody", - "documentation":"

String that contains the ACM certificate represented by the ARN specified at input.

" + "documentation":"

The ACM-issued certificate corresponding to the ARN specified as input.

" }, "CertificateChain":{ "shape":"CertificateChain", - "documentation":"

The certificate chain that contains the root certificate issued by the certificate authority (CA).

" + "documentation":"

Certificates forming the requested certificate's chain of trust. The chain consists of the certificate of the issuing CA and the intermediate certificates of any other subordinate CAs.

" } } }, @@ -822,7 +822,7 @@ "members":{ "message":{"shape":"String"} }, - "documentation":"

An ACM limit has been exceeded.

", + "documentation":"

An ACM quota has been exceeded.

", "exception":true }, "ListCertificatesRequest":{ @@ -885,7 +885,7 @@ }, "NextToken":{ "type":"string", - "max":320, + "max":10000, "min":1, "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u00FF]*" }, @@ -904,7 +904,7 @@ }, "PrivateKeyBlob":{ "type":"blob", - "max":524288, + "max":5120, "min":1, "sensitive":true }, @@ -996,7 +996,7 @@ }, "SubjectAlternativeNames":{ "shape":"DomainList", - "documentation":"

Additional FQDNs to be included in the Subject Alternative Name extension of the ACM certificate. For example, add the name www.example.net to a certificate for which the DomainName field is www.example.com if users can reach your site by using either name. The maximum number of domain names that you can add to an ACM certificate is 100. However, the initial limit is 10 domain names. If you need more than 10 names, you must request a limit increase. For more information, see Limits.

The maximum length of a SAN DNS name is 253 octets. The name is made up of multiple labels separated by periods. No label can be longer than 63 octets. Consider the following examples:

" + "documentation":"

Additional FQDNs to be included in the Subject Alternative Name extension of the ACM certificate. For example, add the name www.example.net to a certificate for which the DomainName field is www.example.com if users can reach your site by using either name. The maximum number of domain names that you can add to an ACM certificate is 100. However, the initial quota is 10 domain names. If you need more than 10 names, you must request a quota increase. For more information, see Quotas.

The maximum length of a SAN DNS name is 253 octets. The name is made up of multiple labels separated by periods. No label can be longer than 63 octets. Consider the following examples:

" }, "IdempotencyToken":{ "shape":"IdempotencyToken", diff --git a/botocore/data/cognito-idp/2016-04-18/service-2.json b/botocore/data/cognito-idp/2016-04-18/service-2.json index 30d785ca..13af0ec5 100644 --- a/botocore/data/cognito-idp/2016-04-18/service-2.json +++ b/botocore/data/cognito-idp/2016-04-18/service-2.json @@ -1109,7 +1109,7 @@ {"shape":"UserNotConfirmedException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Calling this API causes a message to be sent to the end user with a confirmation code that is required to change the user's password. For the Username parameter, you can use the username or user alias. If a verified phone number exists for the user, the confirmation code is sent to the phone number. Otherwise, if a verified email exists, the confirmation code is sent to the email. If neither a verified phone number nor a verified email exists, InvalidParameterException is thrown. To use the confirmation code for resetting the password, call .

", + "documentation":"

Calling this API causes a message to be sent to the end user with a confirmation code that is required to change the user's password. For the Username parameter, you can use the username or user alias. The method used to send the confirmation code is sent according to the specified AccountRecoverySetting. For more information, see Recovering User Accounts in the Amazon Cognito Developer Guide. If neither a verified phone number nor a verified email exists, an InvalidParameterException is thrown. To use the confirmation code for resetting the password, call .

", "authtype":"none" }, "GetCSVHeader":{ @@ -3033,7 +3033,7 @@ "documentation":"

If UserDataShared is true, Amazon Cognito will include user data in the events it publishes to Amazon Pinpoint analytics.

" } }, - "documentation":"

The Amazon Pinpoint analytics configuration for collecting metrics for a user pool.

" + "documentation":"

The Amazon Pinpoint analytics configuration for collecting metrics for a user pool.

Cognito User Pools only supports sending events to Amazon Pinpoint projects in the US East (N. Virginia) us-east-1 Region, regardless of the region in which the user pool resides.

" }, "AnalyticsMetadataType":{ "type":"structure", @@ -3043,7 +3043,7 @@ "documentation":"

The endpoint ID.

" } }, - "documentation":"

An Amazon Pinpoint analytics endpoint.

An endpoint uniquely identifies a mobile device, email address, or phone number that can receive messages from Amazon Pinpoint analytics.

" + "documentation":"

An Amazon Pinpoint analytics endpoint.

An endpoint uniquely identifies a mobile device, email address, or phone number that can receive messages from Amazon Pinpoint analytics.

Cognito User Pools only supports sending events to Amazon Pinpoint projects in the US East (N. Virginia) us-east-1 Region, regardless of the region in which the user pool resides.

" }, "ArnType":{ "type":"string", @@ -3685,7 +3685,7 @@ }, "ProviderDetails":{ "shape":"ProviderDetailsType", - "documentation":"

The identity provider details. The following list describes the provider detail keys for each identity provider type.

" + "documentation":"

The identity provider details. The following list describes the provider detail keys for each identity provider type.

" }, "AttributeMapping":{ "shape":"AttributeMappingType", @@ -3841,7 +3841,7 @@ }, "AnalyticsConfiguration":{ "shape":"AnalyticsConfigurationType", - "documentation":"

The Amazon Pinpoint analytics configuration for collecting metrics for this user pool.

" + "documentation":"

The Amazon Pinpoint analytics configuration for collecting metrics for this user pool.

Cognito User Pools only supports sending events to Amazon Pinpoint projects in the US East (N. Virginia) us-east-1 Region, regardless of the region in which the user pool resides.

" }, "PreventUserExistenceErrors":{ "shape":"PreventUserExistenceErrorTypes", @@ -4655,6 +4655,10 @@ "RiskLevel":{ "shape":"RiskLevelType", "documentation":"

The risk level.

" + }, + "CompromisedCredentialsDetected":{ + "shape":"WrappedBooleanType", + "documentation":"

Indicates whether compromised credentials were detected during an authentication event.

" } }, "documentation":"

The event risk type.

" @@ -7266,7 +7270,7 @@ }, "AnalyticsConfiguration":{ "shape":"AnalyticsConfigurationType", - "documentation":"

The Amazon Pinpoint analytics configuration for collecting metrics for this user pool.

" + "documentation":"

The Amazon Pinpoint analytics configuration for collecting metrics for this user pool.

Cognito User Pools only supports sending events to Amazon Pinpoint projects in the US East (N. Virginia) us-east-1 Region, regardless of the region in which the user pool resides.

" }, "PreventUserExistenceErrors":{ "shape":"PreventUserExistenceErrorTypes", @@ -7665,7 +7669,7 @@ }, "AnalyticsConfiguration":{ "shape":"AnalyticsConfigurationType", - "documentation":"

The Amazon Pinpoint analytics configuration for the user pool client.

" + "documentation":"

The Amazon Pinpoint analytics configuration for the user pool client.

Cognito User Pools only supports sending events to Amazon Pinpoint projects in the US East (N. Virginia) us-east-1 Region, regardless of the region in which the user pool resides.

" }, "PreventUserExistenceErrors":{ "shape":"PreventUserExistenceErrorTypes", diff --git a/botocore/data/ecs/2014-11-13/service-2.json b/botocore/data/ecs/2014-11-13/service-2.json index 44764a5d..b4b62e39 100644 --- a/botocore/data/ecs/2014-11-13/service-2.json +++ b/botocore/data/ecs/2014-11-13/service-2.json @@ -62,7 +62,7 @@ {"shape":"PlatformTaskDefinitionIncompatibilityException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Runs and maintains a desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see UpdateService.

In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service Load Balancing in the Amazon Elastic Container Service Developer Guide.

Tasks for services that do not use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that do use a load balancer are considered healthy if they're in the RUNNING state and the container instance that they're hosted on is reported as healthy by the load balancer.

There are two service scheduler strategies available:

You can optionally specify a deployment configuration for your service. The deployment is triggered by changing properties, such as the task definition or the desired count of a service, with an UpdateService operation. The default value for a replica service for minimumHealthyPercent is 100%. The default value for a daemon service for minimumHealthyPercent is 0%.

If a service is using the ECS deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment, as a percentage of the desired number of tasks (rounded up to the nearest integer), and while any container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. Tasks for services that do not use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that do use a load balancer are considered healthy if they're in the RUNNING state and they're reported as healthy by the load balancer. The default value for minimum healthy percent is 100%.

If a service is using the ECS deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment, as a percentage of the desired number of tasks (rounded down to the nearest integer), and while any container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. This parameter enables you to define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%.

If a service is using either the CODE_DEPLOY or EXTERNAL deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING state while the container instances are in the DRAINING state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used, although they're currently visible when describing your service.

When creating a service that uses the EXTERNAL deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS Deployment Types in the Amazon Elastic Container Service Developer Guide.

When the service scheduler launches new tasks, it determines task placement in your cluster using the following logic:

" + "documentation":"

Runs and maintains a desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see the UpdateService action.

In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service Load Balancing in the Amazon Elastic Container Service Developer Guide.

Tasks for services that do not use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that do use a load balancer are considered healthy if they're in the RUNNING state and the container instance that they're hosted on is reported as healthy by the load balancer.

There are two service scheduler strategies available:

You can optionally specify a deployment configuration for your service. The deployment is triggered by changing properties, such as the task definition or the desired count of a service, with an UpdateService operation. The default value for a replica service for minimumHealthyPercent is 100%. The default value for a daemon service for minimumHealthyPercent is 0%.

If a service is using the ECS deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment, as a percentage of the desired number of tasks (rounded up to the nearest integer), and while any container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. Tasks for services that do not use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that do use a load balancer are considered healthy if they're in the RUNNING state and they're reported as healthy by the load balancer. The default value for minimum healthy percent is 100%.

If a service is using the ECS deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment, as a percentage of the desired number of tasks (rounded down to the nearest integer), and while any container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. This parameter enables you to define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%.

If a service is using either the CODE_DEPLOY or EXTERNAL deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING state while the container instances are in the DRAINING state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used, although they're currently visible when describing your service.

When creating a service that uses the EXTERNAL deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS Deployment Types in the Amazon Elastic Container Service Developer Guide.

When the service scheduler launches new tasks, it determines task placement in your cluster using the following logic:

" }, "CreateTaskSet":{ "name":"CreateTaskSet", @@ -1362,11 +1362,11 @@ }, "startTimeout":{ "shape":"BoxedInteger", - "documentation":"

Time duration (in seconds) to wait before giving up on resolving dependencies for a container. For example, you specify two containers in a task definition with containerA having a dependency on containerB reaching a COMPLETE, SUCCESS, or HEALTHY status. If a startTimeout value is specified for containerB and it does not reach the desired status within that time then containerA will give up and not start. This results in the task transitioning to a STOPPED state.

For tasks using the EC2 launch type, the container instances require at least version 1.26.0 of the container agent to enable a container start timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

For tasks using the Fargate launch type, the task or service requires platform version 1.3.0 or later.

" + "documentation":"

Time duration (in seconds) to wait before giving up on resolving dependencies for a container. For example, you specify two containers in a task definition with containerA having a dependency on containerB reaching a COMPLETE, SUCCESS, or HEALTHY status. If a startTimeout value is specified for containerB and it does not reach the desired status within that time then containerA will give up and not start. This results in the task transitioning to a STOPPED state.

For tasks using the Fargate launch type, this parameter requires that the task or service uses platform version 1.3.0 or later. If this parameter is not specified, the default value of 3 minutes is used.

For tasks using the EC2 launch type, if the startTimeout parameter is not specified, the value set for the Amazon ECS container agent configuration variable ECS_CONTAINER_START_TIMEOUT is used by default. If neither the startTimeout parameter or the ECS_CONTAINER_START_TIMEOUT agent configuration variable are set, then the default values of 3 minutes for Linux containers and 8 minutes on Windows containers are used. Your container instances require at least version 1.26.0 of the container agent to enable a container start timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

" }, "stopTimeout":{ "shape":"BoxedInteger", - "documentation":"

Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit normally on its own.

For tasks using the Fargate launch type, the max stopTimeout value is 2 minutes and the task or service requires platform version 1.3.0 or later.

For tasks using the EC2 launch type, the stop timeout value for the container takes precedence over the ECS_CONTAINER_STOP_TIMEOUT container agent configuration parameter, if used. Container instances require at least version 1.26.0 of the container agent to enable a container stop timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit normally on its own.

For tasks using the Fargate launch type, the task or service requires platform version 1.3.0 or later. The max stop timeout value is 120 seconds and if the parameter is not specified, the default value of 30 seconds is used.

For tasks using the EC2 launch type, if the stopTimeout parameter is not specified, the value set for the Amazon ECS container agent configuration variable ECS_CONTAINER_STOP_TIMEOUT is used by default. If neither the stopTimeout parameter or the ECS_CONTAINER_STOP_TIMEOUT agent configuration variable are set, then the default values of 30 seconds for Linux containers and 30 seconds on Windows containers are used. Your container instances require at least version 1.26.0 of the container agent to enable a container stop timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

" }, "hostname":{ "shape":"String", @@ -1779,7 +1779,7 @@ }, "healthCheckGracePeriodSeconds":{ "shape":"BoxedInteger", - "documentation":"

The period of time, in seconds, that the Amazon ECS service scheduler should ignore unhealthy Elastic Load Balancing target health checks after a task has first started. This is only valid if your service is configured to use a load balancer. If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you can specify a health check grace period of up to 2,147,483,647 seconds. During that time, the ECS service scheduler ignores health check status. This grace period can prevent the ECS service scheduler from marking tasks as unhealthy and stopping them before they have time to come up.

" + "documentation":"

The period of time, in seconds, that the Amazon ECS service scheduler should ignore unhealthy Elastic Load Balancing target health checks after a task has first started. This is only used when your service is configured to use a load balancer. If your service has a load balancer defined and you don't specify a health check grace period value, the default value of 0 is used.

If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you can specify a health check grace period of up to 2,147,483,647 seconds. During that time, the Amazon ECS service scheduler ignores health check status. This grace period can prevent the service scheduler from marking tasks as unhealthy and stopping them before they have time to come up.

" }, "schedulingStrategy":{ "shape":"SchedulingStrategy", @@ -3066,11 +3066,11 @@ "members":{ "targetGroupArn":{ "shape":"String", - "documentation":"

The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group or groups associated with a service or task set.

A target group ARN is only specified when using an Application Load Balancer or Network Load Balancer. If you are using a Classic Load Balancer this should be omitted.

For services using the ECS deployment controller, you can specify one or multiple target groups. For more information, see Registering Multiple Target Groups with a Service in the Amazon Elastic Container Service Developer Guide.

For services using the CODE_DEPLOY deployment controller, you are required to define two target groups for the load balancer. For more information, see Blue/Green Deployment with CodeDeploy in the Amazon Elastic Container Service Developer Guide.

If your service's task definition uses the awsvpc network mode (which is required for the Fargate launch type), you must choose ip as the target type, not instance, when creating your target groups because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance.

" + "documentation":"

The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group or groups associated with a service or task set.

A target group ARN is only specified when using an Application Load Balancer or Network Load Balancer. If you are using a Classic Load Balancer the target group ARN should be omitted.

For services using the ECS deployment controller, you can specify one or multiple target groups. For more information, see Registering Multiple Target Groups with a Service in the Amazon Elastic Container Service Developer Guide.

For services using the CODE_DEPLOY deployment controller, you are required to define two target groups for the load balancer. For more information, see Blue/Green Deployment with CodeDeploy in the Amazon Elastic Container Service Developer Guide.

If your service's task definition uses the awsvpc network mode (which is required for the Fargate launch type), you must choose ip as the target type, not instance, when creating your target groups because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance.

" }, "loadBalancerName":{ "shape":"String", - "documentation":"

The name of the load balancer to associate with the Amazon ECS service or task set.

A load balancer name is only specified when using a Classic Load Balancer. If you are using an Application Load Balancer or a Network Load Balancer this should be omitted.

" + "documentation":"

The name of the load balancer to associate with the Amazon ECS service or task set.

A load balancer name is only specified when using a Classic Load Balancer. If you are using an Application Load Balancer or a Network Load Balancer the load balancer name parameter should be omitted.

" }, "containerName":{ "shape":"String", @@ -3081,7 +3081,7 @@ "documentation":"

The port on the container to associate with the load balancer. This port must correspond to a containerPort in the task definition the tasks in the service are using. For tasks that use the EC2 launch type, the container instance they are launched on must allow ingress traffic on the hostPort of the port mapping.

" } }, - "documentation":"

Details on the load balancer or load balancers to use with a service or task set.

" + "documentation":"

The load balancer configuration to use with a service or task set.

For specific notes and restrictions regarding the use of load balancers with services and task sets, see the CreateService and CreateTaskSet actions.

" }, "LoadBalancers":{ "type":"list", @@ -5140,6 +5140,14 @@ "documentation":"

Optional deployment parameters that control how many tasks run during the deployment and the ordering of stopping and starting tasks.

" }, "networkConfiguration":{"shape":"NetworkConfiguration"}, + "placementConstraints":{ + "shape":"PlacementConstraints", + "documentation":"

An array of task placement constraint objects to update the service to use. If no value is specified, the existing placement constraints for the service will remain unchanged. If this value is specified, it will override any existing placement constraints defined for the service. To remove all existing placement constraints, specify an empty array.

You can specify a maximum of 10 constraints per task (this limit includes constraints in the task definition and those specified at runtime).

" + }, + "placementStrategy":{ + "shape":"PlacementStrategies", + "documentation":"

The task placement strategy objects to update the service to use. If no value is specified, the existing placement strategy for the service will remain unchanged. If this value is specified, it will override the existing placement strategy defined for the service. To remove an existing placement strategy, specify an empty object.

You can specify a maximum of five strategy rules per service.

" + }, "platformVersion":{ "shape":"String", "documentation":"

The platform version on which your tasks in the service are running. A platform version is only specified for tasks using the Fargate launch type. If a platform version is not specified, the LATEST platform version is used by default. For more information, see AWS Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

" diff --git a/botocore/data/elasticache/2015-02-02/paginators-1.json b/botocore/data/elasticache/2015-02-02/paginators-1.json index 9ee5996e..e0beae5a 100644 --- a/botocore/data/elasticache/2015-02-02/paginators-1.json +++ b/botocore/data/elasticache/2015-02-02/paginators-1.json @@ -83,6 +83,12 @@ "limit_key": "MaxRecords", "output_token": "Marker", "result_key": "UpdateActions" + }, + "DescribeGlobalReplicationGroups": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "GlobalReplicationGroups" } } } diff --git a/botocore/data/elasticache/2015-02-02/service-2.json b/botocore/data/elasticache/2015-02-02/service-2.json index 43631a56..9ac469cc 100644 --- a/botocore/data/elasticache/2015-02-02/service-2.json +++ b/botocore/data/elasticache/2015-02-02/service-2.json @@ -210,6 +210,26 @@ ], "documentation":"

Creates a new cache subnet group.

Use this parameter only when you are creating a cluster in an Amazon Virtual Private Cloud (Amazon VPC).

" }, + "CreateGlobalReplicationGroup":{ + "name":"CreateGlobalReplicationGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateGlobalReplicationGroupMessage"}, + "output":{ + "shape":"CreateGlobalReplicationGroupResult", + "resultWrapper":"CreateGlobalReplicationGroupResult" + }, + "errors":[ + {"shape":"ReplicationGroupNotFoundFault"}, + {"shape":"InvalidReplicationGroupStateFault"}, + {"shape":"GlobalReplicationGroupAlreadyExistsFault"}, + {"shape":"ServiceLinkedRoleNotFoundFault"}, + {"shape":"InvalidParameterValueException"} + ], + "documentation":"

Global Datastore for Redis offers fully managed, fast, reliable and secure cross-region replication. Using Global Datastore for Redis, you can create cross-region read replica clusters for ElastiCache for Redis to enable low-latency reads and disaster recovery across regions. For more information, see Replication Across Regions Using Global Datastore.

" + }, "CreateReplicationGroup":{ "name":"CreateReplicationGroup", "http":{ @@ -235,10 +255,12 @@ {"shape":"InvalidVPCNetworkStateFault"}, {"shape":"TagQuotaPerResourceExceeded"}, {"shape":"NodeGroupsPerReplicationGroupQuotaExceededFault"}, + {"shape":"GlobalReplicationGroupNotFoundFault"}, + {"shape":"InvalidGlobalReplicationGroupStateFault"}, {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Creates a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group.

A Redis (cluster mode disabled) replication group is a collection of clusters, where one of the clusters is a read/write primary and the others are read-only replicas. Writes to the primary are asynchronously propagated to the replicas.

A Redis (cluster mode enabled) replication group is a collection of 1 to 90 node groups (shards). Each node group (shard) has one read/write primary node and up to 5 read-only replica nodes. Writes to the primary are asynchronously propagated to the replicas. Redis (cluster mode enabled) replication groups partition the data across node groups (shards).

When a Redis (cluster mode disabled) replication group has been successfully created, you can add one or more read replicas to it, up to a total of 5 read replicas. You cannot alter a Redis (cluster mode enabled) replication group after it has been created. However, if you need to increase or decrease the number of node groups (console: shards), you can avail yourself of ElastiCache for Redis' enhanced backup and restore. For more information, see Restoring From a Backup with Cluster Resizing in the ElastiCache User Guide.

This operation is valid for Redis only.

" + "documentation":"

Creates a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group.

This API can be used to create a standalone regional replication group or a secondary replication group associated with a Global Datastore.

A Redis (cluster mode disabled) replication group is a collection of clusters, where one of the clusters is a read/write primary and the others are read-only replicas. Writes to the primary are asynchronously propagated to the replicas.

A Redis (cluster mode enabled) replication group is a collection of 1 to 90 node groups (shards). Each node group (shard) has one read/write primary node and up to 5 read-only replica nodes. Writes to the primary are asynchronously propagated to the replicas. Redis (cluster mode enabled) replication groups partition the data across node groups (shards).

When a Redis (cluster mode disabled) replication group has been successfully created, you can add one or more read replicas to it, up to a total of 5 read replicas. You cannot alter a Redis (cluster mode enabled) replication group after it has been created. However, if you need to increase or decrease the number of node groups (console: shards), you can avail yourself of ElastiCache for Redis' enhanced backup and restore. For more information, see Restoring From a Backup with Cluster Resizing in the ElastiCache User Guide.

This operation is valid for Redis only.

" }, "CreateSnapshot":{ "name":"CreateSnapshot", @@ -264,6 +286,25 @@ ], "documentation":"

Creates a copy of an entire cluster or replication group at a specific moment in time.

This operation is valid for Redis only.

" }, + "DecreaseNodeGroupsInGlobalReplicationGroup":{ + "name":"DecreaseNodeGroupsInGlobalReplicationGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DecreaseNodeGroupsInGlobalReplicationGroupMessage"}, + "output":{ + "shape":"DecreaseNodeGroupsInGlobalReplicationGroupResult", + "resultWrapper":"DecreaseNodeGroupsInGlobalReplicationGroupResult" + }, + "errors":[ + {"shape":"GlobalReplicationGroupNotFoundFault"}, + {"shape":"InvalidGlobalReplicationGroupStateFault"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ], + "documentation":"

Decreases the number of node groups in a Global Datastore

" + }, "DecreaseReplicaCount":{ "name":"DecreaseReplicaCount", "http":{ @@ -289,7 +330,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Dynamically decreases the number of replics in a Redis (cluster mode disabled) replication group or the number of replica nodes in one or more node groups (shards) of a Redis (cluster mode enabled) replication group. This operation is performed with no cluster down time.

" + "documentation":"

Dynamically decreases the number of replicas in a Redis (cluster mode disabled) replication group or the number of replica nodes in one or more node groups (shards) of a Redis (cluster mode enabled) replication group. This operation is performed with no cluster down time.

" }, "DeleteCacheCluster":{ "name":"DeleteCacheCluster", @@ -356,6 +397,24 @@ ], "documentation":"

Deletes a cache subnet group.

You cannot delete a cache subnet group if it is associated with any clusters.

" }, + "DeleteGlobalReplicationGroup":{ + "name":"DeleteGlobalReplicationGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteGlobalReplicationGroupMessage"}, + "output":{ + "shape":"DeleteGlobalReplicationGroupResult", + "resultWrapper":"DeleteGlobalReplicationGroupResult" + }, + "errors":[ + {"shape":"GlobalReplicationGroupNotFoundFault"}, + {"shape":"InvalidGlobalReplicationGroupStateFault"}, + {"shape":"InvalidParameterValueException"} + ], + "documentation":"

Deleting a Global Datastore is a two-step process:

Since the Global Datastore has only a primary cluster, you can delete the Global Datastore while retaining the primary by setting RetainPrimaryCluster=true.

When you receive a successful response from this operation, Amazon ElastiCache immediately begins deleting the selected resources; you cannot cancel or revert this operation.

This operation is valid for Redis only.

" + }, "DeleteReplicationGroup":{ "name":"DeleteReplicationGroup", "http":{ @@ -532,6 +591,24 @@ ], "documentation":"

Returns events related to clusters, cache security groups, and cache parameter groups. You can obtain events specific to a particular cluster, cache security group, or cache parameter group by providing the name as a parameter.

By default, only the events occurring within the last hour are returned; however, you can retrieve up to 14 days' worth of events if necessary.

" }, + "DescribeGlobalReplicationGroups":{ + "name":"DescribeGlobalReplicationGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeGlobalReplicationGroupsMessage"}, + "output":{ + "shape":"DescribeGlobalReplicationGroupsResult", + "resultWrapper":"DescribeGlobalReplicationGroupsResult" + }, + "errors":[ + {"shape":"GlobalReplicationGroupNotFoundFault"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ], + "documentation":"

Returns information about a particular global replication group. If no identifier is specified, returns information about all Global Datastores.

" + }, "DescribeReplicationGroups":{ "name":"DescribeReplicationGroups", "http":{ @@ -640,6 +717,62 @@ ], "documentation":"

Returns details of the update actions

" }, + "DisassociateGlobalReplicationGroup":{ + "name":"DisassociateGlobalReplicationGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateGlobalReplicationGroupMessage"}, + "output":{ + "shape":"DisassociateGlobalReplicationGroupResult", + "resultWrapper":"DisassociateGlobalReplicationGroupResult" + }, + "errors":[ + {"shape":"GlobalReplicationGroupNotFoundFault"}, + {"shape":"InvalidGlobalReplicationGroupStateFault"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ], + "documentation":"

Remove a secondary cluster from the Global Datastore using the Global Datastore name. The secondary cluster will no longer receive updates from the primary cluster, but will remain as a standalone cluster in that AWS region.

" + }, + "FailoverGlobalReplicationGroup":{ + "name":"FailoverGlobalReplicationGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"FailoverGlobalReplicationGroupMessage"}, + "output":{ + "shape":"FailoverGlobalReplicationGroupResult", + "resultWrapper":"FailoverGlobalReplicationGroupResult" + }, + "errors":[ + {"shape":"GlobalReplicationGroupNotFoundFault"}, + {"shape":"InvalidGlobalReplicationGroupStateFault"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ], + "documentation":"

Used to failover the primary region to a selected secondary region.

" + }, + "IncreaseNodeGroupsInGlobalReplicationGroup":{ + "name":"IncreaseNodeGroupsInGlobalReplicationGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"IncreaseNodeGroupsInGlobalReplicationGroupMessage"}, + "output":{ + "shape":"IncreaseNodeGroupsInGlobalReplicationGroupResult", + "resultWrapper":"IncreaseNodeGroupsInGlobalReplicationGroupResult" + }, + "errors":[ + {"shape":"GlobalReplicationGroupNotFoundFault"}, + {"shape":"InvalidGlobalReplicationGroupStateFault"}, + {"shape":"InvalidParameterValueException"} + ], + "documentation":"

Increase the number of node groups in the Global Datastore

" + }, "IncreaseReplicaCount":{ "name":"IncreaseReplicaCount", "http":{ @@ -745,7 +878,8 @@ {"shape":"CacheParameterGroupNotFoundFault"}, {"shape":"InvalidCacheParameterGroupStateFault"}, {"shape":"InvalidParameterValueException"}, - {"shape":"InvalidParameterCombinationException"} + {"shape":"InvalidParameterCombinationException"}, + {"shape":"InvalidGlobalReplicationGroupStateFault"} ], "documentation":"

Modifies the parameters of a cache parameter group. You can modify up to 20 parameters in a single request by submitting a list parameter name and value pairs.

" }, @@ -768,6 +902,24 @@ ], "documentation":"

Modifies an existing cache subnet group.

" }, + "ModifyGlobalReplicationGroup":{ + "name":"ModifyGlobalReplicationGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyGlobalReplicationGroupMessage"}, + "output":{ + "shape":"ModifyGlobalReplicationGroupResult", + "resultWrapper":"ModifyGlobalReplicationGroupResult" + }, + "errors":[ + {"shape":"GlobalReplicationGroupNotFoundFault"}, + {"shape":"InvalidGlobalReplicationGroupStateFault"}, + {"shape":"InvalidParameterValueException"} + ], + "documentation":"

Modifies the settings for a Global Datastore.

" + }, "ModifyReplicationGroup":{ "name":"ModifyReplicationGroup", "http":{ @@ -842,6 +994,24 @@ ], "documentation":"

Allows you to purchase a reserved cache node offering.

" }, + "RebalanceSlotsInGlobalReplicationGroup":{ + "name":"RebalanceSlotsInGlobalReplicationGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebalanceSlotsInGlobalReplicationGroupMessage"}, + "output":{ + "shape":"RebalanceSlotsInGlobalReplicationGroupResult", + "resultWrapper":"RebalanceSlotsInGlobalReplicationGroupResult" + }, + "errors":[ + {"shape":"GlobalReplicationGroupNotFoundFault"}, + {"shape":"InvalidGlobalReplicationGroupStateFault"}, + {"shape":"InvalidParameterValueException"} + ], + "documentation":"

Redistribute slots to ensure unifirom distribution across existing shards in the cluster.

" + }, "RebootCacheCluster":{ "name":"RebootCacheCluster", "http":{ @@ -893,7 +1063,8 @@ {"shape":"InvalidCacheParameterGroupStateFault"}, {"shape":"CacheParameterGroupNotFoundFault"}, {"shape":"InvalidParameterValueException"}, - {"shape":"InvalidParameterCombinationException"} + {"shape":"InvalidParameterCombinationException"}, + {"shape":"InvalidGlobalReplicationGroupStateFault"} ], "documentation":"

Modifies the parameters of a cache parameter group to the engine or system default value. You can reset specific parameters by submitting a list of parameter names. To reset the entire cache parameter group, specify the ResetAllParameters and CacheParameterGroupName parameters.

" }, @@ -1014,7 +1185,7 @@ }, "ScaleDownModifications":{ "shape":"NodeTypeList", - "documentation":"

A string list, each element of which specifies a cache node type which you can use to scale your cluster or replication group.

When scaling down on a Redis cluster or replication group using ModifyCacheCluster or ModifyReplicationGroup, use a value from this list for the CacheNodeType parameter.

" + "documentation":"

A string list, each element of which specifies a cache node type which you can use to scale your cluster or replication group. When scaling down a Redis cluster or replication group using ModifyCacheCluster or ModifyReplicationGroup, use a value from this list for the CacheNodeType parameter.

" } }, "documentation":"

Represents the allowed node types you can use to modify your cluster or replication group.

" @@ -1169,7 +1340,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The name of the compute and memory capacity node type for the cluster.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" + "documentation":"

The name of the compute and memory capacity node type for the cluster.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" }, "Engine":{ "shape":"String", @@ -1366,7 +1537,7 @@ }, "CacheNodeStatus":{ "shape":"String", - "documentation":"

The current state of this cache node.

" + "documentation":"

The current state of this cache node, one of the following values: available, creating, rebooting, or deleting.

" }, "CacheNodeCreateTime":{ "shape":"TStamp", @@ -1389,7 +1560,7 @@ "documentation":"

The Availability Zone where this node was created and now resides.

" } }, - "documentation":"

Represents an individual cache node within a cluster. Each cache node runs its own instance of the cluster's protocol-compliant caching software - either Memcached or Redis.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" + "documentation":"

Represents an individual cache node within a cluster. Each cache node runs its own instance of the cluster's protocol-compliant caching software - either Memcached or Redis.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" }, "CacheNodeIdsList":{ "type":"list", @@ -1534,6 +1705,10 @@ "Description":{ "shape":"String", "documentation":"

The description for this cache parameter group.

" + }, + "IsGlobal":{ + "shape":"Boolean", + "documentation":"

Indicates whether the parameter group is associated with a Global Datastore

" } }, "documentation":"

Represents the output of a CreateCacheParameterGroup operation.

", @@ -1984,7 +2159,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The compute and memory capacity of the nodes in the node group (shard).

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" + "documentation":"

The compute and memory capacity of the nodes in the node group (shard).

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" }, "Engine":{ "shape":"String", @@ -2141,6 +2316,33 @@ "CacheSubnetGroup":{"shape":"CacheSubnetGroup"} } }, + "CreateGlobalReplicationGroupMessage":{ + "type":"structure", + "required":[ + "GlobalReplicationGroupIdSuffix", + "PrimaryReplicationGroupId" + ], + "members":{ + "GlobalReplicationGroupIdSuffix":{ + "shape":"String", + "documentation":"

The suffix for name of a Global Datastore. The suffix guarantees uniqueness of the Global Datastore name across multiple regions.

" + }, + "GlobalReplicationGroupDescription":{ + "shape":"String", + "documentation":"

Provides details of the Global Datastore

" + }, + "PrimaryReplicationGroupId":{ + "shape":"String", + "documentation":"

The name of the primary cluster that accepts writes and will replicate updates to the secondary cluster.

" + } + } + }, + "CreateGlobalReplicationGroupResult":{ + "type":"structure", + "members":{ + "GlobalReplicationGroup":{"shape":"GlobalReplicationGroup"} + } + }, "CreateReplicationGroupMessage":{ "type":"structure", "required":[ @@ -2156,6 +2358,10 @@ "shape":"String", "documentation":"

A user-created description for the replication group.

" }, + "GlobalReplicationGroupId":{ + "shape":"String", + "documentation":"

The name of the Global Datastore

" + }, "PrimaryClusterId":{ "shape":"String", "documentation":"

The identifier of the cluster that serves as the primary for this replication group. This cluster must already exist and have a status of available.

This parameter is not required if NumCacheClusters, NumNodeGroups, or ReplicasPerNodeGroup is specified.

" @@ -2186,7 +2392,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The compute and memory capacity of the nodes in the node group (shard).

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" + "documentation":"

The compute and memory capacity of the nodes in the node group (shard).

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" }, "Engine":{ "shape":"String", @@ -2262,7 +2468,7 @@ }, "KmsKeyId":{ "shape":"String", - "documentation":"

The ID of the KMS key used to encrypt the disk on the cluster.

" + "documentation":"

The ID of the KMS key used to encrypt the disk in the cluster.

" } }, "documentation":"

Represents the input of a CreateReplicationGroup operation.

" @@ -2320,6 +2526,42 @@ "type":"list", "member":{"shape":"CustomerNodeEndpoint"} }, + "DecreaseNodeGroupsInGlobalReplicationGroupMessage":{ + "type":"structure", + "required":[ + "GlobalReplicationGroupId", + "NodeGroupCount", + "ApplyImmediately" + ], + "members":{ + "GlobalReplicationGroupId":{ + "shape":"String", + "documentation":"

The name of the Global Datastore

" + }, + "NodeGroupCount":{ + "shape":"Integer", + "documentation":"

The number of node groups (shards) that results from the modification of the shard configuration

" + }, + "GlobalNodeGroupsToRemove":{ + "shape":"GlobalNodeGroupIdList", + "documentation":"

If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. NodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster. ElastiCache for Redis will attempt to remove all node groups listed by NodeGroupsToRemove from the cluster.

" + }, + "GlobalNodeGroupsToRetain":{ + "shape":"GlobalNodeGroupIdList", + "documentation":"

If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. NodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster. ElastiCache for Redis will attempt to remove all node groups listed by NodeGroupsToRemove from the cluster.

" + }, + "ApplyImmediately":{ + "shape":"Boolean", + "documentation":"

Indicates that the shard reconfiguration process begins immediately. At present, the only permitted value for this parameter is true.

" + } + } + }, + "DecreaseNodeGroupsInGlobalReplicationGroupResult":{ + "type":"structure", + "members":{ + "GlobalReplicationGroup":{"shape":"GlobalReplicationGroup"} + } + }, "DecreaseReplicaCountMessage":{ "type":"structure", "required":[ @@ -2409,6 +2651,29 @@ }, "documentation":"

Represents the input of a DeleteCacheSubnetGroup operation.

" }, + "DeleteGlobalReplicationGroupMessage":{ + "type":"structure", + "required":[ + "GlobalReplicationGroupId", + "RetainPrimaryReplicationGroup" + ], + "members":{ + "GlobalReplicationGroupId":{ + "shape":"String", + "documentation":"

The name of the Global Datastore

" + }, + "RetainPrimaryReplicationGroup":{ + "shape":"Boolean", + "documentation":"

If set to true, the primary replication is retained as a standalone replication group.

" + } + } + }, + "DeleteGlobalReplicationGroupResult":{ + "type":"structure", + "members":{ + "GlobalReplicationGroup":{"shape":"GlobalReplicationGroup"} + } + }, "DeleteReplicationGroupMessage":{ "type":"structure", "required":["ReplicationGroupId"], @@ -2643,6 +2908,40 @@ }, "documentation":"

Represents the input of a DescribeEvents operation.

" }, + "DescribeGlobalReplicationGroupsMessage":{ + "type":"structure", + "members":{ + "GlobalReplicationGroupId":{ + "shape":"String", + "documentation":"

The name of the Global Datastore

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + }, + "ShowMemberInfo":{ + "shape":"BooleanOptional", + "documentation":"

Returns the list of members that comprise the Global Datastore.

" + } + } + }, + "DescribeGlobalReplicationGroupsResult":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "documentation":"

An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. >

" + }, + "GlobalReplicationGroups":{ + "shape":"GlobalReplicationGroupList", + "documentation":"

Indicates the slot configuration and global identifier for each slice group.

" + } + } + }, "DescribeReplicationGroupsMessage":{ "type":"structure", "members":{ @@ -2674,7 +2973,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The cache node type filter value. Use this parameter to show only those reservations matching the specified cache node type.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" + "documentation":"

The cache node type filter value. Use this parameter to show only those reservations matching the specified cache node type.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" }, "Duration":{ "shape":"String", @@ -2708,7 +3007,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The cache node type filter value. Use this parameter to show only the available offerings matching the specified cache node type.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" + "documentation":"

The cache node type filter value. Use this parameter to show only the available offerings matching the specified cache node type.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" }, "Duration":{ "shape":"String", @@ -2847,6 +3146,34 @@ } } }, + "DisassociateGlobalReplicationGroupMessage":{ + "type":"structure", + "required":[ + "GlobalReplicationGroupId", + "ReplicationGroupId", + "ReplicationGroupRegion" + ], + "members":{ + "GlobalReplicationGroupId":{ + "shape":"String", + "documentation":"

The name of the Global Datastore

" + }, + "ReplicationGroupId":{ + "shape":"String", + "documentation":"

The name of the secondary cluster you wish to remove from the Global Datastore

" + }, + "ReplicationGroupRegion":{ + "shape":"String", + "documentation":"

The AWS region of secondary cluster you wish to remove from the Global Datastore

" + } + } + }, + "DisassociateGlobalReplicationGroupResult":{ + "type":"structure", + "members":{ + "GlobalReplicationGroup":{"shape":"GlobalReplicationGroup"} + } + }, "Double":{"type":"double"}, "EC2SecurityGroup":{ "type":"structure", @@ -2953,6 +3280,228 @@ }, "documentation":"

Represents the output of a DescribeEvents operation.

" }, + "FailoverGlobalReplicationGroupMessage":{ + "type":"structure", + "required":[ + "GlobalReplicationGroupId", + "PrimaryRegion", + "PrimaryReplicationGroupId" + ], + "members":{ + "GlobalReplicationGroupId":{ + "shape":"String", + "documentation":"

The name of the Global Datastore

" + }, + "PrimaryRegion":{ + "shape":"String", + "documentation":"

The AWS region of the primary cluster of the Global Datastore

" + }, + "PrimaryReplicationGroupId":{ + "shape":"String", + "documentation":"

The name of the primary replication group

" + } + } + }, + "FailoverGlobalReplicationGroupResult":{ + "type":"structure", + "members":{ + "GlobalReplicationGroup":{"shape":"GlobalReplicationGroup"} + } + }, + "GlobalNodeGroup":{ + "type":"structure", + "members":{ + "GlobalNodeGroupId":{ + "shape":"String", + "documentation":"

The name of the global node group

" + }, + "Slots":{ + "shape":"String", + "documentation":"

The keyspace for this node group

" + } + }, + "documentation":"

Indicates the slot configuration and global identifier for a slice group.

" + }, + "GlobalNodeGroupIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"GlobalNodeGroupId" + } + }, + "GlobalNodeGroupList":{ + "type":"list", + "member":{ + "shape":"GlobalNodeGroup", + "locationName":"GlobalNodeGroup" + } + }, + "GlobalReplicationGroup":{ + "type":"structure", + "members":{ + "GlobalReplicationGroupId":{ + "shape":"String", + "documentation":"

The name of the Global Datastore

" + }, + "GlobalReplicationGroupDescription":{ + "shape":"String", + "documentation":"

The optional description of the Global Datastore

" + }, + "Status":{ + "shape":"String", + "documentation":"

The status of the Global Datastore

" + }, + "CacheNodeType":{ + "shape":"String", + "documentation":"

The cache node type of the Global Datastore

" + }, + "Engine":{ + "shape":"String", + "documentation":"

The Elasticache engine. For preview, it is Redis only.

" + }, + "EngineVersion":{ + "shape":"String", + "documentation":"

The Elasticache Redis engine version. For preview, it is Redis version 5.0.5 only.

" + }, + "Members":{ + "shape":"GlobalReplicationGroupMemberList", + "documentation":"

The replication groups that comprise the Global Datastore.

" + }, + "ClusterEnabled":{ + "shape":"BooleanOptional", + "documentation":"

A flag that indicates whether the Global Datastore is cluster enabled.

" + }, + "GlobalNodeGroups":{ + "shape":"GlobalNodeGroupList", + "documentation":"

Indicates the slot configuration and global identifier for each slice group.

" + }, + "AuthTokenEnabled":{ + "shape":"BooleanOptional", + "documentation":"

A flag that enables using an AuthToken (password) when issuing Redis commands.

Default: false

" + }, + "TransitEncryptionEnabled":{ + "shape":"BooleanOptional", + "documentation":"

A flag that enables in-transit encryption when set to true. You cannot modify the value of TransitEncryptionEnabled after the cluster is created. To enable in-transit encryption on a cluster you must set TransitEncryptionEnabled to true when you create a cluster.

" + }, + "AtRestEncryptionEnabled":{ + "shape":"BooleanOptional", + "documentation":"

A flag that enables encryption at rest when set to true.

You cannot modify the value of AtRestEncryptionEnabled after the replication group is created. To enable encryption at rest on a replication group you must set AtRestEncryptionEnabled to true when you create the replication group.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later.

" + } + }, + "documentation":"

Consists of a primary cluster that accepts writes and an associated secondary cluster that resides in a different AWS region. The secondary cluster accepts only reads. The primary cluster automatically replicates updates to the secondary cluster.

", + "wrapper":true + }, + "GlobalReplicationGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The Global Datastore name already exists.

", + "error":{ + "code":"GlobalReplicationGroupAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "GlobalReplicationGroupInfo":{ + "type":"structure", + "members":{ + "GlobalReplicationGroupId":{ + "shape":"String", + "documentation":"

The name of the Global Datastore

" + }, + "GlobalReplicationGroupMemberRole":{ + "shape":"String", + "documentation":"

The role of the replication group in a Global Datastore. Can be primary or secondary.

" + } + }, + "documentation":"

The name of the Global Datastore and role of this replication group in the Global Datastore.

" + }, + "GlobalReplicationGroupList":{ + "type":"list", + "member":{ + "shape":"GlobalReplicationGroup", + "locationName":"GlobalReplicationGroup" + } + }, + "GlobalReplicationGroupMember":{ + "type":"structure", + "members":{ + "ReplicationGroupId":{ + "shape":"String", + "documentation":"

The replication group id of the Global Datastore member.

" + }, + "ReplicationGroupRegion":{ + "shape":"String", + "documentation":"

The AWS region of the Global Datastore member.

" + }, + "Role":{ + "shape":"String", + "documentation":"

Indicates the role of the replication group, primary or secondary.

" + }, + "AutomaticFailover":{ + "shape":"AutomaticFailoverStatus", + "documentation":"

Indicates whether automatic failover is enabled for the replication group.

" + }, + "Status":{ + "shape":"String", + "documentation":"

The status of the membership of the replication group.

" + } + }, + "documentation":"

A member of a Global Datastore. It contains the Replication Group Id, the AWS region and the role of the replication group.

", + "wrapper":true + }, + "GlobalReplicationGroupMemberList":{ + "type":"list", + "member":{ + "shape":"GlobalReplicationGroupMember", + "locationName":"GlobalReplicationGroupMember" + } + }, + "GlobalReplicationGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The Global Datastore does not exist

", + "error":{ + "code":"GlobalReplicationGroupNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "IncreaseNodeGroupsInGlobalReplicationGroupMessage":{ + "type":"structure", + "required":[ + "GlobalReplicationGroupId", + "NodeGroupCount", + "ApplyImmediately" + ], + "members":{ + "GlobalReplicationGroupId":{ + "shape":"String", + "documentation":"

The name of the Global Datastore

" + }, + "NodeGroupCount":{ + "shape":"Integer", + "documentation":"

The number of node groups you wish to add

" + }, + "RegionalConfigurations":{ + "shape":"RegionalConfigurationList", + "documentation":"

Describes the replication group IDs, the AWS regions where they are stored and the shard configuration for each that comprise the Global Datastore

" + }, + "ApplyImmediately":{ + "shape":"Boolean", + "documentation":"

Indicates that the process begins immediately. At present, the only permitted value for this parameter is true.

" + } + } + }, + "IncreaseNodeGroupsInGlobalReplicationGroupResult":{ + "type":"structure", + "members":{ + "GlobalReplicationGroup":{"shape":"GlobalReplicationGroup"} + } + }, "IncreaseReplicaCountMessage":{ "type":"structure", "required":[ @@ -3046,6 +3595,18 @@ }, "exception":true }, + "InvalidGlobalReplicationGroupStateFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The Global Datastore is not available

", + "error":{ + "code":"InvalidGlobalReplicationGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "InvalidKMSKeyFault":{ "type":"structure", "members":{ @@ -3301,6 +3862,45 @@ "CacheSubnetGroup":{"shape":"CacheSubnetGroup"} } }, + "ModifyGlobalReplicationGroupMessage":{ + "type":"structure", + "required":[ + "GlobalReplicationGroupId", + "ApplyImmediately" + ], + "members":{ + "GlobalReplicationGroupId":{ + "shape":"String", + "documentation":"

The name of the Global Datastore

" + }, + "ApplyImmediately":{ + "shape":"Boolean", + "documentation":"

If true, this parameter causes the modifications in this request and any pending modifications to be applied, asynchronously and as soon as possible, regardless of the PreferredMaintenanceWindow setting for the replication group. If false, changes to the nodes in the replication group are applied on the next maintenance reboot, or the next failure reboot, whichever occurs first.

" + }, + "CacheNodeType":{ + "shape":"String", + "documentation":"

A valid cache node type that you want to scale this Global Datastore to.

" + }, + "EngineVersion":{ + "shape":"String", + "documentation":"

The upgraded version of the cache engine to be run on the clusters in the Global Datastore.

" + }, + "GlobalReplicationGroupDescription":{ + "shape":"String", + "documentation":"

A description of the Global Datastore

" + }, + "AutomaticFailoverEnabled":{ + "shape":"BooleanOptional", + "documentation":"

Determines whether a read replica is automatically promoted to read/write primary if the existing primary encounters a failure.

" + } + } + }, + "ModifyGlobalReplicationGroupResult":{ + "type":"structure", + "members":{ + "GlobalReplicationGroup":{"shape":"GlobalReplicationGroup"} + } + }, "ModifyReplicationGroupMessage":{ "type":"structure", "required":["ReplicationGroupId"], @@ -3925,6 +4525,29 @@ "ReservedCacheNode":{"shape":"ReservedCacheNode"} } }, + "RebalanceSlotsInGlobalReplicationGroupMessage":{ + "type":"structure", + "required":[ + "GlobalReplicationGroupId", + "ApplyImmediately" + ], + "members":{ + "GlobalReplicationGroupId":{ + "shape":"String", + "documentation":"

The name of the Global Datastore

" + }, + "ApplyImmediately":{ + "shape":"Boolean", + "documentation":"

If True, redistribution is applied immediately.

" + } + } + }, + "RebalanceSlotsInGlobalReplicationGroupResult":{ + "type":"structure", + "members":{ + "GlobalReplicationGroup":{"shape":"GlobalReplicationGroup"} + } + }, "RebootCacheClusterMessage":{ "type":"structure", "required":[ @@ -3971,6 +4594,36 @@ "locationName":"RecurringCharge" } }, + "RegionalConfiguration":{ + "type":"structure", + "required":[ + "ReplicationGroupId", + "ReplicationGroupRegion", + "ReshardingConfiguration" + ], + "members":{ + "ReplicationGroupId":{ + "shape":"String", + "documentation":"

The name of the secondary cluster

" + }, + "ReplicationGroupRegion":{ + "shape":"String", + "documentation":"

The AWS region where the cluster is stored

" + }, + "ReshardingConfiguration":{ + "shape":"ReshardingConfigurationList", + "documentation":"

A list of PreferredAvailabilityZones objects that specifies the configuration of a node group in the resharded cluster.

" + } + }, + "documentation":"

A list of the replication groups

" + }, + "RegionalConfigurationList":{ + "type":"list", + "member":{ + "shape":"RegionalConfiguration", + "locationName":"RegionalConfiguration" + } + }, "RemoveReplicasList":{ "type":"list", "member":{"shape":"String"} @@ -4011,6 +4664,10 @@ "shape":"String", "documentation":"

The user supplied description of the replication group.

" }, + "GlobalReplicationGroupInfo":{ + "shape":"GlobalReplicationGroupInfo", + "documentation":"

The name of the Global Datastore and role of this replication group in the Global Datastore.

" + }, "Status":{ "shape":"String", "documentation":"

The current state of this replication group - creating, available, modifying, deleting, create-failed, snapshotting.

" @@ -4188,7 +4845,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The cache node type for the reserved cache nodes.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" + "documentation":"

The cache node type for the reserved cache nodes.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" }, "StartTime":{ "shape":"TStamp", @@ -4300,7 +4957,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The cache node type for the reserved cache node.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" + "documentation":"

The cache node type for the reserved cache node.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" }, "Duration":{ "shape":"Integer", @@ -4638,7 +5295,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The name of the compute and memory capacity node type for the source cluster.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" + "documentation":"

The name of the compute and memory capacity node type for the source cluster.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" }, "Engine":{ "shape":"String", diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index ba6d7aa3..cffb6ff6 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -939,6 +939,25 @@ "us-west-2" : { } } }, + "codestar-connections" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, "cognito-identity" : { "endpoints" : { "ap-northeast-1" : { }, @@ -950,6 +969,24 @@ "eu-central-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "cognito-identity-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "cognito-identity-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "cognito-identity-fips.us-west-2.amazonaws.com" + }, "us-east-1" : { }, "us-east-2" : { }, "us-west-2" : { } @@ -966,6 +1003,24 @@ "eu-central-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "cognito-idp-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "cognito-idp-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "cognito-idp-fips.us-west-2.amazonaws.com" + }, "us-east-1" : { }, "us-east-2" : { }, "us-west-2" : { } @@ -1229,6 +1284,12 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ca-central-1" : { }, + "dms-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "dms-fips.us-west-1.amazonaws.com" + }, "eu-central-1" : { }, "eu-north-1" : { }, "eu-west-1" : { }, @@ -1702,6 +1763,96 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-ap-northeast-1" : { + "credentialScope" : { + "region" : "ap-northeast-1" + }, + "hostname" : "fms-fips.ap-northeast-1.amazonaws.com" + }, + "fips-ap-northeast-2" : { + "credentialScope" : { + "region" : "ap-northeast-2" + }, + "hostname" : "fms-fips.ap-northeast-2.amazonaws.com" + }, + "fips-ap-south-1" : { + "credentialScope" : { + "region" : "ap-south-1" + }, + "hostname" : "fms-fips.ap-south-1.amazonaws.com" + }, + "fips-ap-southeast-1" : { + "credentialScope" : { + "region" : "ap-southeast-1" + }, + "hostname" : "fms-fips.ap-southeast-1.amazonaws.com" + }, + "fips-ap-southeast-2" : { + "credentialScope" : { + "region" : "ap-southeast-2" + }, + "hostname" : "fms-fips.ap-southeast-2.amazonaws.com" + }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "fms-fips.ca-central-1.amazonaws.com" + }, + "fips-eu-central-1" : { + "credentialScope" : { + "region" : "eu-central-1" + }, + "hostname" : "fms-fips.eu-central-1.amazonaws.com" + }, + "fips-eu-west-1" : { + "credentialScope" : { + "region" : "eu-west-1" + }, + "hostname" : "fms-fips.eu-west-1.amazonaws.com" + }, + "fips-eu-west-2" : { + "credentialScope" : { + "region" : "eu-west-2" + }, + "hostname" : "fms-fips.eu-west-2.amazonaws.com" + }, + "fips-eu-west-3" : { + "credentialScope" : { + "region" : "eu-west-3" + }, + "hostname" : "fms-fips.eu-west-3.amazonaws.com" + }, + "fips-sa-east-1" : { + "credentialScope" : { + "region" : "sa-east-1" + }, + "hostname" : "fms-fips.sa-east-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "fms-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "fms-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "fms-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "fms-fips.us-west-2.amazonaws.com" + }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -1713,7 +1864,10 @@ "endpoints" : { "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, "eu-west-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -1723,7 +1877,11 @@ "forecastquery" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, "eu-west-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -1780,6 +1938,36 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "glacier-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "glacier-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "glacier-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "glacier-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "glacier-fips.us-west-2.amazonaws.com" + }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, @@ -2300,7 +2488,9 @@ }, "managedblockchain" : { "endpoints" : { + "ap-northeast-1" : { }, "ap-southeast-1" : { }, + "eu-west-1" : { }, "us-east-1" : { } } }, @@ -3610,6 +3800,30 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "sms-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "sms-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "sms-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "sms-fips.us-west-2.amazonaws.com" + }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, @@ -4442,6 +4656,12 @@ "cn-northwest-1" : { } } }, + "iotsecuredtunneling" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "kinesis" : { "endpoints" : { "cn-north-1" : { }, @@ -4784,6 +5004,18 @@ }, "athena" : { "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "athena-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "athena-fips.us-gov-west-1.amazonaws.com" + }, "us-gov-east-1" : { }, "us-gov-west-1" : { } } @@ -4930,6 +5162,12 @@ }, "dms" : { "endpoints" : { + "dms-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "dms.us-gov-west-1.amazonaws.com" + }, "us-gov-east-1" : { }, "us-gov-west-1" : { } } @@ -5036,8 +5274,17 @@ }, "glacier" : { "endpoints" : { - "us-gov-east-1" : { }, + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "glacier.us-gov-east-1.amazonaws.com" + }, "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "glacier.us-gov-west-1.amazonaws.com", "protocols" : [ "http", "https" ] } } @@ -5360,6 +5607,18 @@ }, "sms" : { "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "sms-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "sms-fips.us-gov-west-1.amazonaws.com" + }, "us-gov-east-1" : { }, "us-gov-west-1" : { } } @@ -5580,6 +5839,12 @@ }, "dms" : { "endpoints" : { + "dms-fips" : { + "credentialScope" : { + "region" : "us-iso-east-1" + }, + "hostname" : "dms.us-iso-east-1.c2s.ic.gov" + }, "us-iso-east-1" : { } } }, @@ -5840,6 +6105,12 @@ }, "dms" : { "endpoints" : { + "dms-fips" : { + "credentialScope" : { + "region" : "us-isob-east-1" + }, + "hostname" : "dms.us-isob-east-1.sc2s.sgov.gov" + }, "us-isob-east-1" : { } } }, diff --git a/botocore/data/mediaconnect/2018-11-14/service-2.json b/botocore/data/mediaconnect/2018-11-14/service-2.json index 63c04400..c55acdb3 100644 --- a/botocore/data/mediaconnect/2018-11-14/service-2.json +++ b/botocore/data/mediaconnect/2018-11-14/service-2.json @@ -28,7 +28,7 @@ "errors": [ { "shape": "AddFlowOutputs420Exception", - "documentation": "AWS Elemental MediaConnect can't complete this request because this flow already has the maximum number of allowed outputs (20). For more information, contact AWS Customer Support." + "documentation": "AWS Elemental MediaConnect can't complete this request because this flow already has the maximum number of allowed outputs (50). For more information, contact AWS Customer Support." }, { "shape": "BadRequestException", @@ -55,7 +55,49 @@ "documentation": "You have exceeded the service request rate limit for your AWS Elemental MediaConnect account." } ], - "documentation": "Adds outputs to an existing flow. You can create up to 20 outputs per flow." + "documentation": "Adds outputs to an existing flow. You can create up to 50 outputs per flow." + }, + "AddFlowSources": { + "name": "AddFlowSources", + "http": { + "method": "POST", + "requestUri": "/v1/flows/{flowArn}/source", + "responseCode": 201 + }, + "input": { + "shape": "AddFlowSourcesRequest" + }, + "output": { + "shape": "AddFlowSourcesResponse", + "documentation": "AWS Elemental MediaConnect added sources to the flow successfully." + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "The request that you submitted is not valid." + }, + { + "shape": "InternalServerErrorException", + "documentation": "AWS Elemental MediaConnect can't fulfill your request because it encountered an unexpected condition." + }, + { + "shape": "ForbiddenException", + "documentation": "You don't have the required permissions to perform this operation." + }, + { + "shape": "NotFoundException", + "documentation": "AWS Elemental MediaConnect did not find the resource that you specified in the request." + }, + { + "shape": "ServiceUnavailableException", + "documentation": "AWS Elemental MediaConnect is currently unavailable. Try again later." + }, + { + "shape": "TooManyRequestsException", + "documentation": "You have exceeded the service request rate limit for your AWS Elemental MediaConnect account." + } + ], + "documentation": "Adds Sources to flow" }, "CreateFlow": { "name": "CreateFlow", @@ -97,7 +139,7 @@ "documentation": "You have exceeded the service request rate limit for your AWS Elemental MediaConnect account." } ], - "documentation": "Creates a new flow. The request must include one source. The request optionally can include outputs (up to 20) and entitlements (up to 50)." + "documentation": "Creates a new flow. The request must include one source. The request optionally can include outputs (up to 50) and entitlements (up to 50)." }, "DeleteFlow": { "name": "DeleteFlow", @@ -369,6 +411,48 @@ ], "documentation": "Removes an output from an existing flow. This request can be made only on an output that does not have an entitlement associated with it. If the output has an entitlement, you must revoke the entitlement instead. When an entitlement is revoked from a flow, the service automatically removes the associated output." }, + "RemoveFlowSource": { + "name": "RemoveFlowSource", + "http": { + "method": "DELETE", + "requestUri": "/v1/flows/{flowArn}/source/{sourceArn}", + "responseCode": 202 + }, + "input": { + "shape": "RemoveFlowSourceRequest" + }, + "output": { + "shape": "RemoveFlowSourceResponse", + "documentation": "source successfully removed from flow configuration." + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "The request that you submitted is not valid." + }, + { + "shape": "InternalServerErrorException", + "documentation": "AWS Elemental MediaConnect can't fulfill your request because it encountered an unexpected condition." + }, + { + "shape": "ForbiddenException", + "documentation": "You don't have the required permissions to perform this operation." + }, + { + "shape": "NotFoundException", + "documentation": "AWS Elemental MediaConnect did not find the resource that you specified in the request." + }, + { + "shape": "ServiceUnavailableException", + "documentation": "AWS Elemental MediaConnect is currently unavailable. Try again later." + }, + { + "shape": "TooManyRequestsException", + "documentation": "You have exceeded the service request rate limit for your AWS Elemental MediaConnect account." + } + ], + "documentation": "Removes a source from an existing flow. This request can be made only if there is more than one source on the flow." + }, "RevokeFlowEntitlement": { "name": "RevokeFlowEntitlement", "http": { @@ -547,6 +631,48 @@ ], "documentation": "Deletes specified tags from a resource." }, + "UpdateFlow": { + "name": "UpdateFlow", + "http": { + "method": "PUT", + "requestUri": "/v1/flows/{flowArn}", + "responseCode": 202 + }, + "input": { + "shape": "UpdateFlowRequest" + }, + "output": { + "shape": "UpdateFlowResponse", + "documentation": "AWS Elemental MediaConnect updated the flow successfully." + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "The request that you submitted is not valid." + }, + { + "shape": "InternalServerErrorException", + "documentation": "AWS Elemental MediaConnect can't fulfill your request because it encountered an unexpected condition." + }, + { + "shape": "ForbiddenException", + "documentation": "You don't have the required permissions to perform this operation." + }, + { + "shape": "NotFoundException", + "documentation": "AWS Elemental MediaConnect did not find the resource that you specified in the request." + }, + { + "shape": "ServiceUnavailableException", + "documentation": "AWS Elemental MediaConnect is currently unavailable. Try again later." + }, + { + "shape": "TooManyRequestsException", + "documentation": "You have exceeded the service request rate limit for your AWS Elemental MediaConnect account." + } + ], + "documentation": "Updates flow" + }, "UpdateFlowEntitlement": { "name": "UpdateFlowEntitlement", "http": { @@ -729,6 +855,42 @@ } } }, + "AddFlowSourcesRequest": { + "type": "structure", + "members": { + "FlowArn": { + "shape": "__string", + "location": "uri", + "locationName": "flowArn", + "documentation": "The flow that you want to mutate." + }, + "Sources": { + "shape": "__listOfSetSourceRequest", + "locationName": "sources", + "documentation": "A list of sources that you want to add." + } + }, + "documentation": "A request to add sources to the flow.", + "required": [ + "FlowArn", + "Sources" + ] + }, + "AddFlowSourcesResponse": { + "type": "structure", + "members": { + "FlowArn": { + "shape": "__string", + "locationName": "flowArn", + "documentation": "The ARN of the flow that these sources were added to." + }, + "Sources": { + "shape": "__listOfSource", + "locationName": "sources", + "documentation": "The details of the newly added sources." + } + } + }, "AddOutputRequest": { "type": "structure", "members": { @@ -863,11 +1025,18 @@ "Source": { "shape": "SetSourceRequest", "locationName": "source" + }, + "SourceFailoverConfig": { + "shape": "FailoverConfig", + "locationName": "sourceFailoverConfig" + }, + "Sources": { + "shape": "__listOfSetSourceRequest", + "locationName": "sources" } }, - "documentation": "Creates a new flow. The request must include one source. The request optionally can include outputs (up to 20) and entitlements (up to 50).", + "documentation": "Creates a new flow. The request must include one source. The request optionally can include outputs (up to 50) and entitlements (up to 50).", "required": [ - "Source", "Name" ] }, @@ -1032,6 +1201,21 @@ "Name" ] }, + "FailoverConfig": { + "type": "structure", + "members": { + "RecoveryWindow": { + "shape": "__integer", + "locationName": "recoveryWindow", + "documentation": "Search window time to look for dash-7 packets" + }, + "State": { + "shape": "State", + "locationName": "state" + } + }, + "documentation": "The settings for source failover" + }, "Flow": { "type": "structure", "members": { @@ -1074,6 +1258,14 @@ "shape": "Source", "locationName": "source" }, + "SourceFailoverConfig": { + "shape": "FailoverConfig", + "locationName": "sourceFailoverConfig" + }, + "Sources": { + "shape": "__listOfSource", + "locationName": "sources" + }, "Status": { "shape": "Status", "locationName": "status", @@ -1522,6 +1714,42 @@ } } }, + "RemoveFlowSourceRequest": { + "type": "structure", + "members": { + "FlowArn": { + "shape": "__string", + "location": "uri", + "locationName": "flowArn", + "documentation": "The flow that you want to remove a source from." + }, + "SourceArn": { + "shape": "__string", + "location": "uri", + "locationName": "sourceArn", + "documentation": "The ARN of the source that you want to remove." + } + }, + "required": [ + "FlowArn", + "SourceArn" + ] + }, + "RemoveFlowSourceResponse": { + "type": "structure", + "members": { + "FlowArn": { + "shape": "__string", + "locationName": "flowArn", + "documentation": "The ARN of the flow that is associated with the source you removed." + }, + "SourceArn": { + "shape": "__string", + "locationName": "sourceArn", + "documentation": "The ARN of the source that was removed." + } + } + }, "ResponseError": { "type": "structure", "members": { @@ -1742,6 +1970,13 @@ } } }, + "State": { + "type": "string", + "enum": [ + "ENABLED", + "DISABLED" + ] + }, "Status": { "type": "string", "enum": [ @@ -1938,6 +2173,21 @@ }, "documentation": "Information about the encryption of the flow." }, + "UpdateFailoverConfig": { + "type": "structure", + "members": { + "RecoveryWindow": { + "shape": "__integer", + "locationName": "recoveryWindow", + "documentation": "Recovery window time to look for dash-7 packets" + }, + "State": { + "shape": "State", + "locationName": "state" + } + }, + "documentation": "The settings for source failover" + }, "UpdateFlowEntitlementRequest": { "type": "structure", "members": { @@ -2075,6 +2325,34 @@ } } }, + "UpdateFlowRequest": { + "type": "structure", + "members": { + "FlowArn": { + "shape": "__string", + "location": "uri", + "locationName": "flowArn", + "documentation": "The flow that you want to update." + }, + "SourceFailoverConfig": { + "shape": "UpdateFailoverConfig", + "locationName": "sourceFailoverConfig" + } + }, + "documentation": "A request to update flow.", + "required": [ + "FlowArn" + ] + }, + "UpdateFlowResponse": { + "type": "structure", + "members": { + "Flow": { + "shape": "Flow", + "locationName": "flow" + } + } + }, "UpdateFlowSourceRequest": { "type": "structure", "members": { @@ -2202,6 +2480,24 @@ "shape": "Output" } }, + "__listOfSetSourceRequest": { + "type": "list", + "member": { + "shape": "SetSourceRequest" + } + }, + "__listOfSource": { + "type": "list", + "member": { + "shape": "Source" + } + }, + "__listOf__integer": { + "type": "list", + "member": { + "shape": "__integer" + } + }, "__listOf__string": { "type": "list", "member": { diff --git a/botocore/data/mediaconvert/2017-08-29/service-2.json b/botocore/data/mediaconvert/2017-08-29/service-2.json index d9ec070e..a131164b 100644 --- a/botocore/data/mediaconvert/2017-08-29/service-2.json +++ b/botocore/data/mediaconvert/2017-08-29/service-2.json @@ -1672,6 +1672,131 @@ "USE_CONFIGURED" ] }, + "Av1AdaptiveQuantization": { + "type": "string", + "documentation": "Adaptive quantization. Allows intra-frame quantizers to vary to improve visual quality.", + "enum": [ + "OFF", + "LOW", + "MEDIUM", + "HIGH", + "HIGHER", + "MAX" + ] + }, + "Av1FramerateControl": { + "type": "string", + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", + "enum": [ + "INITIALIZE_FROM_SOURCE", + "SPECIFIED" + ] + }, + "Av1FramerateConversionAlgorithm": { + "type": "string", + "documentation": "When set to INTERPOLATE, produces smoother motion during frame rate conversion.", + "enum": [ + "DUPLICATE_DROP", + "INTERPOLATE" + ] + }, + "Av1QvbrSettings": { + "type": "structure", + "members": { + "QvbrQualityLevel": { + "shape": "__integerMin1Max10", + "locationName": "qvbrQualityLevel", + "documentation": "Required when you use QVBR rate control mode. That is, when you specify qvbrSettings within av1Settings. Specify the general target quality level for this output, from 1 to 10. Use higher numbers for greater quality. Level 10 results in nearly lossless compression. The quality level for most broadcast-quality transcodes is between 6 and 9. Optionally, to specify a value between whole numbers, also provide a value for the setting qvbrQualityLevelFineTune. For example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33." + }, + "QvbrQualityLevelFineTune": { + "shape": "__doubleMin0Max1", + "locationName": "qvbrQualityLevelFineTune", + "documentation": "Optional. Specify a value here to set the QVBR quality to a level that is between whole numbers. For example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33. MediaConvert rounds your QVBR quality level to the nearest third of a whole number. For example, if you set qvbrQualityLevel to 7 and you set qvbrQualityLevelFineTune to .25, your actual QVBR quality level is 7.33." + } + }, + "documentation": "Settings for quality-defined variable bitrate encoding with the AV1 codec. Required when you set Rate control mode to QVBR. Not valid when you set Rate control mode to a value other than QVBR, or when you don't define Rate control mode." + }, + "Av1RateControlMode": { + "type": "string", + "documentation": "'With AV1 outputs, for rate control mode, MediaConvert supports only quality-defined variable bitrate (QVBR). You can''t use CBR or VBR.'", + "enum": [ + "QVBR" + ] + }, + "Av1Settings": { + "type": "structure", + "members": { + "AdaptiveQuantization": { + "shape": "Av1AdaptiveQuantization", + "locationName": "adaptiveQuantization", + "documentation": "Adaptive quantization. Allows intra-frame quantizers to vary to improve visual quality." + }, + "FramerateControl": { + "shape": "Av1FramerateControl", + "locationName": "framerateControl", + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." + }, + "FramerateConversionAlgorithm": { + "shape": "Av1FramerateConversionAlgorithm", + "locationName": "framerateConversionAlgorithm", + "documentation": "When set to INTERPOLATE, produces smoother motion during frame rate conversion." + }, + "FramerateDenominator": { + "shape": "__integerMin1Max2147483647", + "locationName": "framerateDenominator", + "documentation": "When you use the API for transcode jobs that use frame rate conversion, specify the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use FramerateDenominator to specify the denominator of this fraction. In this example, use 1001 for the value of FramerateDenominator. When you use the console for transcode jobs that use frame rate conversion, provide the value as a decimal number for Framerate. In this example, specify 23.976." + }, + "FramerateNumerator": { + "shape": "__integerMin1Max2147483647", + "locationName": "framerateNumerator", + "documentation": "When you use the API for transcode jobs that use frame rate conversion, specify the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use FramerateNumerator to specify the numerator of this fraction. In this example, use 24000 for the value of FramerateNumerator. When you use the console for transcode jobs that use frame rate conversion, provide the value as a decimal number for Framerate. In this example, specify 23.976." + }, + "GopSize": { + "shape": "__doubleMin0", + "locationName": "gopSize", + "documentation": "Specify the GOP length (keyframe interval) in frames. With AV1, MediaConvert doesn't support GOP length in seconds. This value must be greater than zero and preferably equal to 1 + ((numberBFrames + 1) * x), where x is an integer value." + }, + "MaxBitrate": { + "shape": "__integerMin1000Max1152000000", + "locationName": "maxBitrate", + "documentation": "Maximum bitrate in bits/second. For example, enter five megabits per second as 5000000. Required when Rate control mode is QVBR." + }, + "NumberBFramesBetweenReferenceFrames": { + "shape": "__integerMin7Max15", + "locationName": "numberBFramesBetweenReferenceFrames", + "documentation": "Specify the number of B-frames. With AV1, MediaConvert supports only 7 or 15." + }, + "QvbrSettings": { + "shape": "Av1QvbrSettings", + "locationName": "qvbrSettings", + "documentation": "Settings for quality-defined variable bitrate encoding with the AV1 codec. Required when you set Rate control mode to QVBR. Not valid when you set Rate control mode to a value other than QVBR, or when you don't define Rate control mode." + }, + "RateControlMode": { + "shape": "Av1RateControlMode", + "locationName": "rateControlMode", + "documentation": "'With AV1 outputs, for rate control mode, MediaConvert supports only quality-defined variable bitrate (QVBR). You can''t use CBR or VBR.'" + }, + "Slices": { + "shape": "__integerMin1Max32", + "locationName": "slices", + "documentation": "Specify the number of slices per picture. This value must be 1, 2, 4, 8, 16, or 32. For progressive pictures, this value must be less than or equal to the number of macroblock rows. For interlaced pictures, this value must be less than or equal to half the number of macroblock rows." + }, + "SpatialAdaptiveQuantization": { + "shape": "Av1SpatialAdaptiveQuantization", + "locationName": "spatialAdaptiveQuantization", + "documentation": "Adjust quantization within each frame based on spatial variation of content complexity." + } + }, + "documentation": "Required when you set Codec, under VideoDescription>CodecSettings to the value AV1." + }, + "Av1SpatialAdaptiveQuantization": { + "type": "string", + "documentation": "Adjust quantization within each frame based on spatial variation of content complexity.", + "enum": [ + "DISABLED", + "ENABLED" + ] + }, "AvailBlanking": { "type": "structure", "members": { @@ -2387,7 +2512,7 @@ "ColorSpaceConversion": { "shape": "ColorSpaceConversion", "locationName": "colorSpaceConversion", - "documentation": "Specify the color space you want for this output. The service supports conversion between HDR formats, between SDR formats, and from SDR to HDR. The service doesn't support conversion from HDR to SDR. SDR to HDR conversion doesn't upgrade the dynamic range. The converted video has an HDR format, but visually appears the same as an unconverted output." + "documentation": "Specify the color space you want for this output. The service supports conversion between HDR formats, between SDR formats, from SDR to HDR, and from HDR to SDR. SDR to HDR conversion doesn't upgrade the dynamic range. The converted video has an HDR format, but visually appears the same as an unconverted output. HDR to SDR conversion uses Elemental tone mapping technology to approximate the outcome of manually regrading from HDR to SDR." }, "Contrast": { "shape": "__integerMin1Max100", @@ -2433,7 +2558,7 @@ }, "ColorSpaceConversion": { "type": "string", - "documentation": "Specify the color space you want for this output. The service supports conversion between HDR formats, between SDR formats, and from SDR to HDR. The service doesn't support conversion from HDR to SDR. SDR to HDR conversion doesn't upgrade the dynamic range. The converted video has an HDR format, but visually appears the same as an unconverted output.", + "documentation": "Specify the color space you want for this output. The service supports conversion between HDR formats, between SDR formats, from SDR to HDR, and from HDR to SDR. SDR to HDR conversion doesn't upgrade the dynamic range. The converted video has an HDR format, but visually appears the same as an unconverted output. HDR to SDR conversion uses Elemental tone mapping technology to approximate the outcome of manually regrading from HDR to SDR.", "enum": [ "NONE", "FORCE_601", @@ -9094,6 +9219,7 @@ "documentation": "Type of video codec", "enum": [ "FRAME_CAPTURE", + "AV1", "H_264", "H_265", "MPEG2", @@ -9103,6 +9229,11 @@ "VideoCodecSettings": { "type": "structure", "members": { + "Av1Settings": { + "shape": "Av1Settings", + "locationName": "av1Settings", + "documentation": "Required when you set Codec, under VideoDescription>CodecSettings to the value AV1." + }, "Codec": { "shape": "VideoCodec", "locationName": "codec", @@ -9134,7 +9265,7 @@ "documentation": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value PRORES." } }, - "documentation": "Video codec settings, (CodecSettings) under (VideoDescription), contains the group of settings related to video encoding. The settings in this group vary depending on the value that you choose for Video codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * FRAME_CAPTURE, FrameCaptureSettings * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings" + "documentation": "Video codec settings, (CodecSettings) under (VideoDescription), contains the group of settings related to video encoding. The settings in this group vary depending on the value that you choose for Video codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * FRAME_CAPTURE, FrameCaptureSettings * AV1, Av1Settings * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings" }, "VideoDescription": { "type": "structure", @@ -9152,7 +9283,7 @@ "CodecSettings": { "shape": "VideoCodecSettings", "locationName": "codecSettings", - "documentation": "Video codec settings, (CodecSettings) under (VideoDescription), contains the group of settings related to video encoding. The settings in this group vary depending on the value that you choose for Video codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * FRAME_CAPTURE, FrameCaptureSettings * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings" + "documentation": "Video codec settings, (CodecSettings) under (VideoDescription), contains the group of settings related to video encoding. The settings in this group vary depending on the value that you choose for Video codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * FRAME_CAPTURE, FrameCaptureSettings * AV1, Av1Settings * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings" }, "ColorMetadata": { "shape": "ColorMetadata", @@ -9697,6 +9828,11 @@ "min": 64000, "max": 640000 }, + "__integerMin7Max15": { + "type": "integer", + "min": 7, + "max": 15 + }, "__integerMin8000Max192000": { "type": "integer", "min": 8000, diff --git a/botocore/data/outposts/2019-12-03/service-2.json b/botocore/data/outposts/2019-12-03/service-2.json index d3dcb3fb..592e2036 100644 --- a/botocore/data/outposts/2019-12-03/service-2.json +++ b/botocore/data/outposts/2019-12-03/service-2.json @@ -143,14 +143,14 @@ }, "AvailabilityZone":{ "type":"string", - "documentation":"

The Availability Zone.

", + "documentation":"

The Availability Zone.

You must specify AvailabilityZone or AvailabilityZoneId.

", "max":1000, "min":1, "pattern":"[a-z\\d-]+" }, "AvailabilityZoneId":{ "type":"string", - "documentation":"

The ID of the Availability Zone.

", + "documentation":"

The ID of the Availability Zone.

You must specify AvailabilityZone or AvailabilityZoneId.

", "max":255, "min":1, "pattern":"[a-z]+[0-9]+-az[0-9]+" diff --git a/botocore/data/personalize/2018-05-22/service-2.json b/botocore/data/personalize/2018-05-22/service-2.json index 32f02006..f9d3a0bb 100644 --- a/botocore/data/personalize/2018-05-22/service-2.json +++ b/botocore/data/personalize/2018-05-22/service-2.json @@ -816,6 +816,10 @@ "failureReason":{ "shape":"FailureReason", "documentation":"

If the batch inference job failed, the reason for the failure.

" + }, + "solutionVersionArn":{ + "shape":"Arn", + "documentation":"

The ARN of the solution version used by the batch inference job.

" } }, "documentation":"

A truncated version of the BatchInferenceJob datatype. The ListBatchInferenceJobs operation returns a list of batch inference job summaries.

" @@ -2130,7 +2134,7 @@ "members":{ "type":{ "shape":"HPOObjectiveType", - "documentation":"

The data type of the metric.

" + "documentation":"

The type of the metric. Valid values are Maximize and Minimize.

" }, "metricName":{ "shape":"MetricName", @@ -2871,6 +2875,10 @@ "shape":"TrainingMode", "documentation":"

The scope of training used to create the solution version. The FULL option trains the solution version based on the entirety of the input solution's training data, while the UPDATE option processes only the training data that has changed since the creation of the last solution version. Choose UPDATE when you want to start recommending items added to the dataset without retraining the model.

The UPDATE option can only be used after you've created a solution version with the FULL option and the training solution uses the native-recipe-hrnn-coldstart.

" }, + "tunedHPOParams":{ + "shape":"TunedHPOParams", + "documentation":"

If hyperparameter optimization was performed, contains the hyperparameter values of the best performing model.

" + }, "status":{ "shape":"Status", "documentation":"

The status of the solution version.

A solution version can be in one of the following states:

" @@ -2954,6 +2962,16 @@ "min":1 }, "Tunable":{"type":"boolean"}, + "TunedHPOParams":{ + "type":"structure", + "members":{ + "algorithmHyperParameters":{ + "shape":"HyperParameters", + "documentation":"

A list of the hyperparameter values of the best performing model.

" + } + }, + "documentation":"

If hyperparameter optimization (HPO) was performed, contains the hyperparameter values of the best performing model.

" + }, "UpdateCampaignRequest":{ "type":"structure", "required":["campaignArn"], diff --git a/botocore/data/rds/2014-10-31/service-2.json b/botocore/data/rds/2014-10-31/service-2.json index 18989240..076c6c69 100644 --- a/botocore/data/rds/2014-10-31/service-2.json +++ b/botocore/data/rds/2014-10-31/service-2.json @@ -7389,7 +7389,7 @@ "documentation":"

An optional pagination token provided by a previous DescribeExportTasks request. If you specify this parameter, the response includes only records beyond the marker, up to the value specified by the MaxRecords parameter.

" }, "MaxRecords":{ - "shape":"String", + "shape":"MaxRecords", "documentation":"

The maximum number of records to include in the response. If more records exist than the specified value, a pagination token called a marker is included in the response. You can use the marker in a later DescribeExportTasks request to retrieve the remaining results.

Default: 100

Constraints: Minimum 20, maximum 100.

" } } diff --git a/botocore/data/s3control/2018-08-20/service-2.json b/botocore/data/s3control/2018-08-20/service-2.json index 489cc982..b235e6e3 100644 --- a/botocore/data/s3control/2018-08-20/service-2.json +++ b/botocore/data/s3control/2018-08-20/service-2.json @@ -62,6 +62,21 @@ "input":{"shape":"DeleteAccessPointPolicyRequest"}, "documentation":"

Deletes the access point policy for the specified access point.

" }, + "DeleteJobTagging":{ + "name":"DeleteJobTagging", + "http":{ + "method":"DELETE", + "requestUri":"/v20180820/jobs/{id}/tagging" + }, + "input":{"shape":"DeleteJobTaggingRequest"}, + "output":{"shape":"DeleteJobTaggingResult"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

Delete the tags on a Amazon S3 batch operations job, if any.

" + }, "DeletePublicAccessBlock":{ "name":"DeletePublicAccessBlock", "http":{ @@ -117,6 +132,21 @@ "output":{"shape":"GetAccessPointPolicyStatusResult"}, "documentation":"

Indicates whether the specified access point currently has a policy that allows public access. For more information about public access through access points, see Managing Data Access with Amazon S3 Access Points in the Amazon Simple Storage Service Developer Guide.

" }, + "GetJobTagging":{ + "name":"GetJobTagging", + "http":{ + "method":"GET", + "requestUri":"/v20180820/jobs/{id}/tagging" + }, + "input":{"shape":"GetJobTaggingRequest"}, + "output":{"shape":"GetJobTaggingResult"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

Retrieve the tags on a Amazon S3 batch operations job.

" + }, "GetPublicAccessBlock":{ "name":"GetPublicAccessBlock", "http":{ @@ -168,6 +198,26 @@ }, "documentation":"

Associates an access policy with the specified access point. Each access point can have only one policy, so a request made to this API replaces any existing policy associated with the specified access point.

" }, + "PutJobTagging":{ + "name":"PutJobTagging", + "http":{ + "method":"PUT", + "requestUri":"/v20180820/jobs/{id}/tagging" + }, + "input":{ + "shape":"PutJobTaggingRequest", + "locationName":"PutJobTaggingRequest", + "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} + }, + "output":{"shape":"PutJobTaggingResult"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyTagsException"} + ], + "documentation":"

Replace the set of tags on a Amazon S3 batch operations job.

" + }, "PutPublicAccessBlock":{ "name":"PutPublicAccessBlock", "http":{ @@ -353,6 +403,10 @@ "RoleArn":{ "shape":"IAMRoleArn", "documentation":"

The Amazon Resource Name (ARN) for the Identity and Access Management (IAM) Role that batch operations will use to execute this job's operation on each object in the manifest.

" + }, + "Tags":{ + "shape":"S3TagSet", + "documentation":"

An optional set of tags to associate with the job when it is created.

" } } }, @@ -408,6 +462,32 @@ } } }, + "DeleteJobTaggingRequest":{ + "type":"structure", + "required":[ + "AccountId", + "JobId" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

The account ID for the Amazon Web Services account associated with the Amazon S3 batch operations job you want to remove tags from.

", + "location":"header", + "locationName":"x-amz-account-id" + }, + "JobId":{ + "shape":"JobId", + "documentation":"

The ID for the job whose tags you want to delete.

", + "location":"uri", + "locationName":"id" + } + } + }, + "DeleteJobTaggingResult":{ + "type":"structure", + "members":{ + } + }, "DeletePublicAccessBlockRequest":{ "type":"structure", "required":["AccountId"], @@ -562,6 +642,36 @@ } } }, + "GetJobTaggingRequest":{ + "type":"structure", + "required":[ + "AccountId", + "JobId" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

The account ID for the Amazon Web Services account associated with the Amazon S3 batch operations job you want to retrieve tags for.

", + "location":"header", + "locationName":"x-amz-account-id" + }, + "JobId":{ + "shape":"JobId", + "documentation":"

The ID for the job whose tags you want to retrieve.

", + "location":"uri", + "locationName":"id" + } + } + }, + "GetJobTaggingResult":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"S3TagSet", + "documentation":"

The set of tags associated with the job.

" + } + } + }, "GetPublicAccessBlockOutput":{ "type":"structure", "members":{ @@ -1245,6 +1355,37 @@ } } }, + "PutJobTaggingRequest":{ + "type":"structure", + "required":[ + "AccountId", + "JobId", + "Tags" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

The account ID for the Amazon Web Services account associated with the Amazon S3 batch operations job you want to replace tags on.

", + "location":"header", + "locationName":"x-amz-account-id" + }, + "JobId":{ + "shape":"JobId", + "documentation":"

The ID for the job whose tags you want to replace.

", + "location":"uri", + "locationName":"id" + }, + "Tags":{ + "shape":"S3TagSet", + "documentation":"

The set of tags to associate with the job.

" + } + } + }, + "PutJobTaggingResult":{ + "type":"structure", + "members":{ + } + }, "PutPublicAccessBlockRequest":{ "type":"structure", "required":[ @@ -1661,6 +1802,13 @@ "documentation":"

", "exception":true }, + "TooManyTagsException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, "UpdateJobPriorityRequest":{ "type":"structure", "required":[ diff --git a/botocore/data/servicecatalog/2015-12-10/service-2.json b/botocore/data/servicecatalog/2015-12-10/service-2.json index 99464f96..d95463d7 100644 --- a/botocore/data/servicecatalog/2015-12-10/service-2.json +++ b/botocore/data/servicecatalog/2015-12-10/service-2.json @@ -1524,6 +1524,14 @@ "Owner":{ "shape":"AccountId", "documentation":"

The owner of the constraint.

" + }, + "ProductId":{ + "shape":"Id", + "documentation":"

The identifier of the product the constraint applies to. Note that a constraint applies to a specific instance of a product within a certain portfolio.

" + }, + "PortfolioId":{ + "shape":"Id", + "documentation":"

The identifier of the portfolio the product resides in. The constraint applies only to the instance of the product that lives within this portfolio.

" } }, "documentation":"

Information about a constraint.

" diff --git a/botocore/data/ssm/2014-11-06/service-2.json b/botocore/data/ssm/2014-11-06/service-2.json index 35d82c90..0415aaf9 100644 --- a/botocore/data/ssm/2014-11-06/service-2.json +++ b/botocore/data/ssm/2014-11-06/service-2.json @@ -5533,7 +5533,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

" + "documentation":"

The token to use when requesting the next set of items.

" } } }, @@ -11300,8 +11300,8 @@ "box":true }, "ApproveUntilDate":{ - "shape":"PatchStringDate", - "documentation":"

The cutoff date for auto approval of released patches. Any patches released on or before this date will be installed automatically

", + "shape":"PatchStringDateTime", + "documentation":"

Example API

", "box":true }, "EnableNonSecurity":{ @@ -11405,11 +11405,10 @@ }, "documentation":"

Information about the approval status of a patch.

" }, - "PatchStringDate":{ + "PatchStringDateTime":{ "type":"string", "max":10, - "min":1, - "pattern":"^(\\d{4}-(0[1-9]|1[0-2])-(0[1-9]|[12]\\d|3[01]))$" + "min":1 }, "PatchTitle":{"type":"string"}, "PatchUnreportedNotApplicableCount":{"type":"integer"}, @@ -11969,6 +11968,21 @@ "exception":true }, "ResourceDataSyncCreatedTime":{"type":"timestamp"}, + "ResourceDataSyncDestinationDataSharing":{ + "type":"structure", + "members":{ + "DestinationDataSharingType":{ + "shape":"ResourceDataSyncDestinationDataSharingType", + "documentation":"

The sharing data type. Only Organization is supported.

" + } + }, + "documentation":"

Synchronize Systems Manager Inventory data from multiple AWS accounts defined in AWS Organizations to a centralized Amazon S3 bucket. Data is synchronized to individual key prefixes in the central bucket. Each key prefix represents a different AWS account ID.

" + }, + "ResourceDataSyncDestinationDataSharingType":{ + "type":"string", + "max":64, + "min":1 + }, "ResourceDataSyncIncludeFutureRegions":{"type":"boolean"}, "ResourceDataSyncInvalidConfigurationException":{ "type":"structure", @@ -12103,6 +12117,10 @@ "AWSKMSKeyARN":{ "shape":"ResourceDataSyncAWSKMSKeyARN", "documentation":"

The ARN of an encryption key for a destination in Amazon S3. Must belong to the same Region as the destination Amazon S3 bucket.

" + }, + "DestinationDataSharing":{ + "shape":"ResourceDataSyncDestinationDataSharing", + "documentation":"

Enables destination data sharing. By default, this field is null.

" } }, "documentation":"

Information about the target Amazon S3 bucket for the Resource Data Sync.

" diff --git a/botocore/handlers.py b/botocore/handlers.py index daa22886..a312f5c2 100644 --- a/botocore/handlers.py +++ b/botocore/handlers.py @@ -484,8 +484,7 @@ def parse_get_bucket_location(parsed, http_response, **kwargs): # The "parsed" passed in only has the ResponseMetadata # filled out. This handler will fill in the LocationConstraint # value. - if 'LocationConstraint' in parsed: - # Response already set - a stub? + if http_response.raw is None: return response_body = http_response.content parser = xml.etree.cElementTree.XMLParser( diff --git a/docs/source/conf.py b/docs/source/conf.py index 33225a6f..4eb7d03b 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -54,7 +54,7 @@ copyright = u'2013, Mitch Garnaat' # The short X.Y version. version = '1.15.' # The full version, including alpha/beta/rc tags. -release = '1.15.21' +release = '1.15.26' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/tests/functional/test_stub.py b/tests/functional/test_stub.py index f44b722b..752ba224 100644 --- a/tests/functional/test_stub.py +++ b/tests/functional/test_stub.py @@ -313,3 +313,21 @@ class TestStubber(unittest.TestCase): actual_response = self.client.list_objects(**expected_params) self.assertEqual(desired_response, actual_response) self.stubber.assert_no_pending_responses() + + def test_parse_get_bucket_location(self): + error_code = "NoSuchBucket" + error_message = "The specified bucket does not exist" + self.stubber.add_client_error( + 'get_bucket_location', error_code, error_message) + self.stubber.activate() + + with self.assertRaises(ClientError): + self.client.get_bucket_location(Bucket='foo') + + def test_parse_get_bucket_location_returns_response(self): + service_response = {"LocationConstraint": "us-west-2"} + self.stubber.add_response('get_bucket_location',service_response) + self.stubber.activate() + response = self.client.get_bucket_location(Bucket='foo') + self.assertEqual(response, service_response) + diff --git a/tests/unit/test_handlers.py b/tests/unit/test_handlers.py index 551d18f5..a41bacb2 100644 --- a/tests/unit/test_handlers.py +++ b/tests/unit/test_handlers.py @@ -921,13 +921,6 @@ class TestHandlers(BaseSessionTest): self.assertEqual(parsed['CommonPrefixes'][0]['Prefix'], u'\xe7\xf6s% asd\x08 c') - def test_get_bucket_location_optional(self): - # This handler should no-op if another hook (i.e. stubber) has already - # filled in response - response = {"LocationConstraint": "eu-west-1"} - handlers.parse_get_bucket_location(response, None), - self.assertEqual(response["LocationConstraint"], "eu-west-1") - def test_set_operation_specific_signer_no_auth_type(self): signing_name = 'myservice' context = {'auth_type': None}