diff --git a/PKG-INFO b/PKG-INFO index b45fd7df..0fe5b674 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: botocore -Version: 1.15.21 +Version: 1.15.26 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services diff --git a/botocore.egg-info/PKG-INFO b/botocore.egg-info/PKG-INFO index b45fd7df..0fe5b674 100644 --- a/botocore.egg-info/PKG-INFO +++ b/botocore.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: botocore -Version: 1.15.21 +Version: 1.15.26 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services diff --git a/botocore/__init__.py b/botocore/__init__.py index 113458db..88ae6c5e 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re import logging -__version__ = '1.15.21' +__version__ = '1.15.26' class NullHandler(logging.Handler): diff --git a/botocore/data/acm/2015-12-08/service-2.json b/botocore/data/acm/2015-12-08/service-2.json index a748c481..103600b7 100644 --- a/botocore/data/acm/2015-12-08/service-2.json +++ b/botocore/data/acm/2015-12-08/service-2.json @@ -86,7 +86,7 @@ {"shape":"RequestInProgressException"}, {"shape":"InvalidArnException"} ], - "documentation":"
Retrieves a certificate specified by an ARN and its certificate chain . The chain is an ordered list of certificates that contains the end entity certificate, intermediate certificates of subordinate CAs, and the root certificate in that order. The certificate and certificate chain are base64 encoded. If you want to decode the certificate to see the individual fields, you can use OpenSSL.
" + "documentation":"Retrieves an Amazon-issued certificate and its certificate chain. The chain consists of the certificate of the issuing CA and the intermediate certificates of any other subordinate CAs. All of the certificates are base64 encoded. You can use OpenSSL to decode the certificates and inspect individual fields.
" }, "ImportCertificate":{ "name":"ImportCertificate", @@ -498,7 +498,7 @@ }, "ResourceRecord":{ "shape":"ResourceRecord", - "documentation":"Contains the CNAME record that you add to your DNS database for domain validation. For more information, see Use DNS to Validate Domain Ownership.
" + "documentation":"Contains the CNAME record that you add to your DNS database for domain validation. For more information, see Use DNS to Validate Domain Ownership.
Note: The CNAME information that you need does not include the name of your domain. If you include your domain name in the DNS database CNAME record, validation fails. For example, if the name is \"_a79865eb4cd1a6ab990a45779b4e0b96.yourdomain.com\", only \"_a79865eb4cd1a6ab990a45779b4e0b96\" must be used.
" }, "ValidationMethod":{ "shape":"ValidationMethod", @@ -664,11 +664,11 @@ "members":{ "Certificate":{ "shape":"CertificateBody", - "documentation":"String that contains the ACM certificate represented by the ARN specified at input.
" + "documentation":"The ACM-issued certificate corresponding to the ARN specified as input.
" }, "CertificateChain":{ "shape":"CertificateChain", - "documentation":"The certificate chain that contains the root certificate issued by the certificate authority (CA).
" + "documentation":"Certificates forming the requested certificate's chain of trust. The chain consists of the certificate of the issuing CA and the intermediate certificates of any other subordinate CAs.
" } } }, @@ -822,7 +822,7 @@ "members":{ "message":{"shape":"String"} }, - "documentation":"An ACM limit has been exceeded.
", + "documentation":"An ACM quota has been exceeded.
", "exception":true }, "ListCertificatesRequest":{ @@ -885,7 +885,7 @@ }, "NextToken":{ "type":"string", - "max":320, + "max":10000, "min":1, "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u00FF]*" }, @@ -904,7 +904,7 @@ }, "PrivateKeyBlob":{ "type":"blob", - "max":524288, + "max":5120, "min":1, "sensitive":true }, @@ -996,7 +996,7 @@ }, "SubjectAlternativeNames":{ "shape":"DomainList", - "documentation":"Additional FQDNs to be included in the Subject Alternative Name extension of the ACM certificate. For example, add the name www.example.net to a certificate for which the DomainName
field is www.example.com if users can reach your site by using either name. The maximum number of domain names that you can add to an ACM certificate is 100. However, the initial limit is 10 domain names. If you need more than 10 names, you must request a limit increase. For more information, see Limits.
The maximum length of a SAN DNS name is 253 octets. The name is made up of multiple labels separated by periods. No label can be longer than 63 octets. Consider the following examples:
(63 octets).(63 octets).(63 octets).(61 octets)
is legal because the total length is 253 octets (63+1+63+1+63+1+61) and no label exceeds 63 octets.
(64 octets).(63 octets).(63 octets).(61 octets)
is not legal because the total length exceeds 253 octets (64+1+63+1+63+1+61) and the first label exceeds 63 octets.
(63 octets).(63 octets).(63 octets).(62 octets)
is not legal because the total length of the DNS name (63+1+63+1+63+1+62) exceeds 253 octets.
Additional FQDNs to be included in the Subject Alternative Name extension of the ACM certificate. For example, add the name www.example.net to a certificate for which the DomainName
field is www.example.com if users can reach your site by using either name. The maximum number of domain names that you can add to an ACM certificate is 100. However, the initial quota is 10 domain names. If you need more than 10 names, you must request a quota increase. For more information, see Quotas.
The maximum length of a SAN DNS name is 253 octets. The name is made up of multiple labels separated by periods. No label can be longer than 63 octets. Consider the following examples:
(63 octets).(63 octets).(63 octets).(61 octets)
is legal because the total length is 253 octets (63+1+63+1+63+1+61) and no label exceeds 63 octets.
(64 octets).(63 octets).(63 octets).(61 octets)
is not legal because the total length exceeds 253 octets (64+1+63+1+63+1+61) and the first label exceeds 63 octets.
(63 octets).(63 octets).(63 octets).(62 octets)
is not legal because the total length of the DNS name (63+1+63+1+63+1+62) exceeds 253 octets.
Calling this API causes a message to be sent to the end user with a confirmation code that is required to change the user's password. For the Username
parameter, you can use the username or user alias. If a verified phone number exists for the user, the confirmation code is sent to the phone number. Otherwise, if a verified email exists, the confirmation code is sent to the email. If neither a verified phone number nor a verified email exists, InvalidParameterException
is thrown. To use the confirmation code for resetting the password, call .
Calling this API causes a message to be sent to the end user with a confirmation code that is required to change the user's password. For the Username
parameter, you can use the username or user alias. The method used to send the confirmation code is sent according to the specified AccountRecoverySetting. For more information, see Recovering User Accounts in the Amazon Cognito Developer Guide. If neither a verified phone number nor a verified email exists, an InvalidParameterException
is thrown. To use the confirmation code for resetting the password, call .
If UserDataShared
is true
, Amazon Cognito will include user data in the events it publishes to Amazon Pinpoint analytics.
The Amazon Pinpoint analytics configuration for collecting metrics for a user pool.
" + "documentation":"The Amazon Pinpoint analytics configuration for collecting metrics for a user pool.
Cognito User Pools only supports sending events to Amazon Pinpoint projects in the US East (N. Virginia) us-east-1 Region, regardless of the region in which the user pool resides.
The endpoint ID.
" } }, - "documentation":"An Amazon Pinpoint analytics endpoint.
An endpoint uniquely identifies a mobile device, email address, or phone number that can receive messages from Amazon Pinpoint analytics.
" + "documentation":"An Amazon Pinpoint analytics endpoint.
An endpoint uniquely identifies a mobile device, email address, or phone number that can receive messages from Amazon Pinpoint analytics.
Cognito User Pools only supports sending events to Amazon Pinpoint projects in the US East (N. Virginia) us-east-1 Region, regardless of the region in which the user pool resides.
The identity provider details. The following list describes the provider detail keys for each identity provider type.
For Google, Facebook and Login with Amazon:
client_id
client_secret
authorize_scopes
For Sign in with Apple:
client_id
team_id
key_id
private_key
authorize_scopes
For OIDC providers:
client_id
client_secret
attributes_request_method
oidc_issuer
authorize_scopes
authorize_url if not available from discovery URL specified by oidc_issuer key
token_url if not available from discovery URL specified by oidc_issuer key
attributes_url if not available from discovery URL specified by oidc_issuer key
jwks_uri if not available from discovery URL specified by oidc_issuer key
authorize_scopes
For SAML providers:
MetadataFile OR MetadataURL
IDPSignOut optional
The identity provider details. The following list describes the provider detail keys for each identity provider type.
For Google, Facebook and Login with Amazon:
client_id
client_secret
authorize_scopes
For Sign in with Apple:
client_id
team_id
key_id
private_key
authorize_scopes
For OIDC providers:
client_id
client_secret
attributes_request_method
oidc_issuer
authorize_scopes
authorize_url if not available from discovery URL specified by oidc_issuer key
token_url if not available from discovery URL specified by oidc_issuer key
attributes_url if not available from discovery URL specified by oidc_issuer key
jwks_uri if not available from discovery URL specified by oidc_issuer key
authorize_scopes
For SAML providers:
MetadataFile OR MetadataURL
IDPSignout optional
The Amazon Pinpoint analytics configuration for collecting metrics for this user pool.
" + "documentation":"The Amazon Pinpoint analytics configuration for collecting metrics for this user pool.
Cognito User Pools only supports sending events to Amazon Pinpoint projects in the US East (N. Virginia) us-east-1 Region, regardless of the region in which the user pool resides.
The risk level.
" + }, + "CompromisedCredentialsDetected":{ + "shape":"WrappedBooleanType", + "documentation":"Indicates whether compromised credentials were detected during an authentication event.
" } }, "documentation":"The event risk type.
" @@ -7266,7 +7270,7 @@ }, "AnalyticsConfiguration":{ "shape":"AnalyticsConfigurationType", - "documentation":"The Amazon Pinpoint analytics configuration for collecting metrics for this user pool.
" + "documentation":"The Amazon Pinpoint analytics configuration for collecting metrics for this user pool.
Cognito User Pools only supports sending events to Amazon Pinpoint projects in the US East (N. Virginia) us-east-1 Region, regardless of the region in which the user pool resides.
The Amazon Pinpoint analytics configuration for the user pool client.
" + "documentation":"The Amazon Pinpoint analytics configuration for the user pool client.
Cognito User Pools only supports sending events to Amazon Pinpoint projects in the US East (N. Virginia) us-east-1 Region, regardless of the region in which the user pool resides.
Runs and maintains a desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount
, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see UpdateService.
In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service Load Balancing in the Amazon Elastic Container Service Developer Guide.
Tasks for services that do not use a load balancer are considered healthy if they're in the RUNNING
state. Tasks for services that do use a load balancer are considered healthy if they're in the RUNNING
state and the container instance that they're hosted on is reported as healthy by the load balancer.
There are two service scheduler strategies available:
REPLICA
- The replica scheduling strategy places and maintains the desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. For more information, see Service Scheduler Concepts in the Amazon Elastic Container Service Developer Guide.
DAEMON
- The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. When using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies. For more information, see Service Scheduler Concepts in the Amazon Elastic Container Service Developer Guide.
You can optionally specify a deployment configuration for your service. The deployment is triggered by changing properties, such as the task definition or the desired count of a service, with an UpdateService operation. The default value for a replica service for minimumHealthyPercent
is 100%. The default value for a daemon service for minimumHealthyPercent
is 0%.
If a service is using the ECS
deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING
state during a deployment, as a percentage of the desired number of tasks (rounded up to the nearest integer), and while any container instances are in the DRAINING
state if the service contains tasks using the EC2 launch type. This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. Tasks for services that do not use a load balancer are considered healthy if they're in the RUNNING
state. Tasks for services that do use a load balancer are considered healthy if they're in the RUNNING
state and they're reported as healthy by the load balancer. The default value for minimum healthy percent is 100%.
If a service is using the ECS
deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING
or PENDING
state during a deployment, as a percentage of the desired number of tasks (rounded down to the nearest integer), and while any container instances are in the DRAINING
state if the service contains tasks using the EC2 launch type. This parameter enables you to define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%.
If a service is using either the CODE_DEPLOY
or EXTERNAL
deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING
state while the container instances are in the DRAINING
state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used, although they're currently visible when describing your service.
When creating a service that uses the EXTERNAL
deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS Deployment Types in the Amazon Elastic Container Service Developer Guide.
When the service scheduler launches new tasks, it determines task placement in your cluster using the following logic:
Determine which of the container instances in your cluster can support your service's task definition (for example, they have the required CPU, memory, ports, and container instance attributes).
By default, the service scheduler attempts to balance tasks across Availability Zones in this manner (although you can choose a different placement strategy) with the placementStrategy
parameter):
Sort the valid container instances, giving priority to instances that have the fewest number of running tasks for this service in their respective Availability Zone. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.
Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.
Runs and maintains a desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount
, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see the UpdateService action.
In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service Load Balancing in the Amazon Elastic Container Service Developer Guide.
Tasks for services that do not use a load balancer are considered healthy if they're in the RUNNING
state. Tasks for services that do use a load balancer are considered healthy if they're in the RUNNING
state and the container instance that they're hosted on is reported as healthy by the load balancer.
There are two service scheduler strategies available:
REPLICA
- The replica scheduling strategy places and maintains the desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. For more information, see Service Scheduler Concepts in the Amazon Elastic Container Service Developer Guide.
DAEMON
- The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. When using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies. For more information, see Service Scheduler Concepts in the Amazon Elastic Container Service Developer Guide.
You can optionally specify a deployment configuration for your service. The deployment is triggered by changing properties, such as the task definition or the desired count of a service, with an UpdateService operation. The default value for a replica service for minimumHealthyPercent
is 100%. The default value for a daemon service for minimumHealthyPercent
is 0%.
If a service is using the ECS
deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING
state during a deployment, as a percentage of the desired number of tasks (rounded up to the nearest integer), and while any container instances are in the DRAINING
state if the service contains tasks using the EC2 launch type. This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. Tasks for services that do not use a load balancer are considered healthy if they're in the RUNNING
state. Tasks for services that do use a load balancer are considered healthy if they're in the RUNNING
state and they're reported as healthy by the load balancer. The default value for minimum healthy percent is 100%.
If a service is using the ECS
deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING
or PENDING
state during a deployment, as a percentage of the desired number of tasks (rounded down to the nearest integer), and while any container instances are in the DRAINING
state if the service contains tasks using the EC2 launch type. This parameter enables you to define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%.
If a service is using either the CODE_DEPLOY
or EXTERNAL
deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING
state while the container instances are in the DRAINING
state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used, although they're currently visible when describing your service.
When creating a service that uses the EXTERNAL
deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS Deployment Types in the Amazon Elastic Container Service Developer Guide.
When the service scheduler launches new tasks, it determines task placement in your cluster using the following logic:
Determine which of the container instances in your cluster can support your service's task definition (for example, they have the required CPU, memory, ports, and container instance attributes).
By default, the service scheduler attempts to balance tasks across Availability Zones in this manner (although you can choose a different placement strategy) with the placementStrategy
parameter):
Sort the valid container instances, giving priority to instances that have the fewest number of running tasks for this service in their respective Availability Zone. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.
Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.
Time duration (in seconds) to wait before giving up on resolving dependencies for a container. For example, you specify two containers in a task definition with containerA having a dependency on containerB reaching a COMPLETE
, SUCCESS
, or HEALTHY
status. If a startTimeout
value is specified for containerB and it does not reach the desired status within that time then containerA will give up and not start. This results in the task transitioning to a STOPPED
state.
For tasks using the EC2 launch type, the container instances require at least version 1.26.0 of the container agent to enable a container start timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init
package. If your container instances are launched from version 20190301
or later, then they contain the required versions of the container agent and ecs-init
. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
For tasks using the Fargate launch type, the task or service requires platform version 1.3.0
or later.
Time duration (in seconds) to wait before giving up on resolving dependencies for a container. For example, you specify two containers in a task definition with containerA having a dependency on containerB reaching a COMPLETE
, SUCCESS
, or HEALTHY
status. If a startTimeout
value is specified for containerB and it does not reach the desired status within that time then containerA will give up and not start. This results in the task transitioning to a STOPPED
state.
For tasks using the Fargate launch type, this parameter requires that the task or service uses platform version 1.3.0 or later. If this parameter is not specified, the default value of 3 minutes is used.
For tasks using the EC2 launch type, if the startTimeout
parameter is not specified, the value set for the Amazon ECS container agent configuration variable ECS_CONTAINER_START_TIMEOUT
is used by default. If neither the startTimeout
parameter or the ECS_CONTAINER_START_TIMEOUT
agent configuration variable are set, then the default values of 3 minutes for Linux containers and 8 minutes on Windows containers are used. Your container instances require at least version 1.26.0 of the container agent to enable a container start timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init
package. If your container instances are launched from version 20190301
or later, then they contain the required versions of the container agent and ecs-init
. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit normally on its own.
For tasks using the Fargate launch type, the max stopTimeout
value is 2 minutes and the task or service requires platform version 1.3.0
or later.
For tasks using the EC2 launch type, the stop timeout value for the container takes precedence over the ECS_CONTAINER_STOP_TIMEOUT
container agent configuration parameter, if used. Container instances require at least version 1.26.0 of the container agent to enable a container stop timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init
package. If your container instances are launched from version 20190301
or later, then they contain the required versions of the container agent and ecs-init
. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit normally on its own.
For tasks using the Fargate launch type, the task or service requires platform version 1.3.0 or later. The max stop timeout value is 120 seconds and if the parameter is not specified, the default value of 30 seconds is used.
For tasks using the EC2 launch type, if the stopTimeout
parameter is not specified, the value set for the Amazon ECS container agent configuration variable ECS_CONTAINER_STOP_TIMEOUT
is used by default. If neither the stopTimeout
parameter or the ECS_CONTAINER_STOP_TIMEOUT
agent configuration variable are set, then the default values of 30 seconds for Linux containers and 30 seconds on Windows containers are used. Your container instances require at least version 1.26.0 of the container agent to enable a container stop timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init
package. If your container instances are launched from version 20190301
or later, then they contain the required versions of the container agent and ecs-init
. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
The period of time, in seconds, that the Amazon ECS service scheduler should ignore unhealthy Elastic Load Balancing target health checks after a task has first started. This is only valid if your service is configured to use a load balancer. If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you can specify a health check grace period of up to 2,147,483,647 seconds. During that time, the ECS service scheduler ignores health check status. This grace period can prevent the ECS service scheduler from marking tasks as unhealthy and stopping them before they have time to come up.
" + "documentation":"The period of time, in seconds, that the Amazon ECS service scheduler should ignore unhealthy Elastic Load Balancing target health checks after a task has first started. This is only used when your service is configured to use a load balancer. If your service has a load balancer defined and you don't specify a health check grace period value, the default value of 0
is used.
If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you can specify a health check grace period of up to 2,147,483,647 seconds. During that time, the Amazon ECS service scheduler ignores health check status. This grace period can prevent the service scheduler from marking tasks as unhealthy and stopping them before they have time to come up.
" }, "schedulingStrategy":{ "shape":"SchedulingStrategy", @@ -3066,11 +3066,11 @@ "members":{ "targetGroupArn":{ "shape":"String", - "documentation":"The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group or groups associated with a service or task set.
A target group ARN is only specified when using an Application Load Balancer or Network Load Balancer. If you are using a Classic Load Balancer this should be omitted.
For services using the ECS
deployment controller, you can specify one or multiple target groups. For more information, see Registering Multiple Target Groups with a Service in the Amazon Elastic Container Service Developer Guide.
For services using the CODE_DEPLOY
deployment controller, you are required to define two target groups for the load balancer. For more information, see Blue/Green Deployment with CodeDeploy in the Amazon Elastic Container Service Developer Guide.
If your service's task definition uses the awsvpc
network mode (which is required for the Fargate launch type), you must choose ip
as the target type, not instance
, when creating your target groups because tasks that use the awsvpc
network mode are associated with an elastic network interface, not an Amazon EC2 instance.
The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group or groups associated with a service or task set.
A target group ARN is only specified when using an Application Load Balancer or Network Load Balancer. If you are using a Classic Load Balancer the target group ARN should be omitted.
For services using the ECS
deployment controller, you can specify one or multiple target groups. For more information, see Registering Multiple Target Groups with a Service in the Amazon Elastic Container Service Developer Guide.
For services using the CODE_DEPLOY
deployment controller, you are required to define two target groups for the load balancer. For more information, see Blue/Green Deployment with CodeDeploy in the Amazon Elastic Container Service Developer Guide.
If your service's task definition uses the awsvpc
network mode (which is required for the Fargate launch type), you must choose ip
as the target type, not instance
, when creating your target groups because tasks that use the awsvpc
network mode are associated with an elastic network interface, not an Amazon EC2 instance.
The name of the load balancer to associate with the Amazon ECS service or task set.
A load balancer name is only specified when using a Classic Load Balancer. If you are using an Application Load Balancer or a Network Load Balancer this should be omitted.
" + "documentation":"The name of the load balancer to associate with the Amazon ECS service or task set.
A load balancer name is only specified when using a Classic Load Balancer. If you are using an Application Load Balancer or a Network Load Balancer the load balancer name parameter should be omitted.
" }, "containerName":{ "shape":"String", @@ -3081,7 +3081,7 @@ "documentation":"The port on the container to associate with the load balancer. This port must correspond to a containerPort
in the task definition the tasks in the service are using. For tasks that use the EC2 launch type, the container instance they are launched on must allow ingress traffic on the hostPort
of the port mapping.
Details on the load balancer or load balancers to use with a service or task set.
" + "documentation":"The load balancer configuration to use with a service or task set.
For specific notes and restrictions regarding the use of load balancers with services and task sets, see the CreateService and CreateTaskSet actions.
" }, "LoadBalancers":{ "type":"list", @@ -5140,6 +5140,14 @@ "documentation":"Optional deployment parameters that control how many tasks run during the deployment and the ordering of stopping and starting tasks.
" }, "networkConfiguration":{"shape":"NetworkConfiguration"}, + "placementConstraints":{ + "shape":"PlacementConstraints", + "documentation":"An array of task placement constraint objects to update the service to use. If no value is specified, the existing placement constraints for the service will remain unchanged. If this value is specified, it will override any existing placement constraints defined for the service. To remove all existing placement constraints, specify an empty array.
You can specify a maximum of 10 constraints per task (this limit includes constraints in the task definition and those specified at runtime).
" + }, + "placementStrategy":{ + "shape":"PlacementStrategies", + "documentation":"The task placement strategy objects to update the service to use. If no value is specified, the existing placement strategy for the service will remain unchanged. If this value is specified, it will override the existing placement strategy defined for the service. To remove an existing placement strategy, specify an empty object.
You can specify a maximum of five strategy rules per service.
" + }, "platformVersion":{ "shape":"String", "documentation":"The platform version on which your tasks in the service are running. A platform version is only specified for tasks using the Fargate launch type. If a platform version is not specified, the LATEST
platform version is used by default. For more information, see AWS Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.
Creates a new cache subnet group.
Use this parameter only when you are creating a cluster in an Amazon Virtual Private Cloud (Amazon VPC).
" }, + "CreateGlobalReplicationGroup":{ + "name":"CreateGlobalReplicationGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateGlobalReplicationGroupMessage"}, + "output":{ + "shape":"CreateGlobalReplicationGroupResult", + "resultWrapper":"CreateGlobalReplicationGroupResult" + }, + "errors":[ + {"shape":"ReplicationGroupNotFoundFault"}, + {"shape":"InvalidReplicationGroupStateFault"}, + {"shape":"GlobalReplicationGroupAlreadyExistsFault"}, + {"shape":"ServiceLinkedRoleNotFoundFault"}, + {"shape":"InvalidParameterValueException"} + ], + "documentation":"Global Datastore for Redis offers fully managed, fast, reliable and secure cross-region replication. Using Global Datastore for Redis, you can create cross-region read replica clusters for ElastiCache for Redis to enable low-latency reads and disaster recovery across regions. For more information, see Replication Across Regions Using Global Datastore.
The GlobalReplicationGroupId is the name of the Global Datastore.
The PrimaryReplicationGroupId represents the name of the primary cluster that accepts writes and will replicate updates to the secondary cluster.
Creates a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group.
A Redis (cluster mode disabled) replication group is a collection of clusters, where one of the clusters is a read/write primary and the others are read-only replicas. Writes to the primary are asynchronously propagated to the replicas.
A Redis (cluster mode enabled) replication group is a collection of 1 to 90 node groups (shards). Each node group (shard) has one read/write primary node and up to 5 read-only replica nodes. Writes to the primary are asynchronously propagated to the replicas. Redis (cluster mode enabled) replication groups partition the data across node groups (shards).
When a Redis (cluster mode disabled) replication group has been successfully created, you can add one or more read replicas to it, up to a total of 5 read replicas. You cannot alter a Redis (cluster mode enabled) replication group after it has been created. However, if you need to increase or decrease the number of node groups (console: shards), you can avail yourself of ElastiCache for Redis' enhanced backup and restore. For more information, see Restoring From a Backup with Cluster Resizing in the ElastiCache User Guide.
This operation is valid for Redis only.
Creates a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group.
This API can be used to create a standalone regional replication group or a secondary replication group associated with a Global Datastore.
A Redis (cluster mode disabled) replication group is a collection of clusters, where one of the clusters is a read/write primary and the others are read-only replicas. Writes to the primary are asynchronously propagated to the replicas.
A Redis (cluster mode enabled) replication group is a collection of 1 to 90 node groups (shards). Each node group (shard) has one read/write primary node and up to 5 read-only replica nodes. Writes to the primary are asynchronously propagated to the replicas. Redis (cluster mode enabled) replication groups partition the data across node groups (shards).
When a Redis (cluster mode disabled) replication group has been successfully created, you can add one or more read replicas to it, up to a total of 5 read replicas. You cannot alter a Redis (cluster mode enabled) replication group after it has been created. However, if you need to increase or decrease the number of node groups (console: shards), you can avail yourself of ElastiCache for Redis' enhanced backup and restore. For more information, see Restoring From a Backup with Cluster Resizing in the ElastiCache User Guide.
This operation is valid for Redis only.
Creates a copy of an entire cluster or replication group at a specific moment in time.
This operation is valid for Redis only.
Decreases the number of node groups in a Global Datastore
" + }, "DecreaseReplicaCount":{ "name":"DecreaseReplicaCount", "http":{ @@ -289,7 +330,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"Dynamically decreases the number of replics in a Redis (cluster mode disabled) replication group or the number of replica nodes in one or more node groups (shards) of a Redis (cluster mode enabled) replication group. This operation is performed with no cluster down time.
" + "documentation":"Dynamically decreases the number of replicas in a Redis (cluster mode disabled) replication group or the number of replica nodes in one or more node groups (shards) of a Redis (cluster mode enabled) replication group. This operation is performed with no cluster down time.
" }, "DeleteCacheCluster":{ "name":"DeleteCacheCluster", @@ -356,6 +397,24 @@ ], "documentation":"Deletes a cache subnet group.
You cannot delete a cache subnet group if it is associated with any clusters.
Deleting a Global Datastore is a two-step process:
First, you must DisassociateGlobalReplicationGroup to remove the secondary clusters in the Global Datastore.
Once the Global Datastore contains only the primary cluster, you can use DeleteGlobalReplicationGroup API to delete the Global Datastore while retainining the primary cluster using Retain…= true.
Since the Global Datastore has only a primary cluster, you can delete the Global Datastore while retaining the primary by setting RetainPrimaryCluster=true
.
When you receive a successful response from this operation, Amazon ElastiCache immediately begins deleting the selected resources; you cannot cancel or revert this operation.
This operation is valid for Redis only.
Returns events related to clusters, cache security groups, and cache parameter groups. You can obtain events specific to a particular cluster, cache security group, or cache parameter group by providing the name as a parameter.
By default, only the events occurring within the last hour are returned; however, you can retrieve up to 14 days' worth of events if necessary.
" }, + "DescribeGlobalReplicationGroups":{ + "name":"DescribeGlobalReplicationGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeGlobalReplicationGroupsMessage"}, + "output":{ + "shape":"DescribeGlobalReplicationGroupsResult", + "resultWrapper":"DescribeGlobalReplicationGroupsResult" + }, + "errors":[ + {"shape":"GlobalReplicationGroupNotFoundFault"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ], + "documentation":"Returns information about a particular global replication group. If no identifier is specified, returns information about all Global Datastores.
" + }, "DescribeReplicationGroups":{ "name":"DescribeReplicationGroups", "http":{ @@ -640,6 +717,62 @@ ], "documentation":"Returns details of the update actions
" }, + "DisassociateGlobalReplicationGroup":{ + "name":"DisassociateGlobalReplicationGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateGlobalReplicationGroupMessage"}, + "output":{ + "shape":"DisassociateGlobalReplicationGroupResult", + "resultWrapper":"DisassociateGlobalReplicationGroupResult" + }, + "errors":[ + {"shape":"GlobalReplicationGroupNotFoundFault"}, + {"shape":"InvalidGlobalReplicationGroupStateFault"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ], + "documentation":"Remove a secondary cluster from the Global Datastore using the Global Datastore name. The secondary cluster will no longer receive updates from the primary cluster, but will remain as a standalone cluster in that AWS region.
" + }, + "FailoverGlobalReplicationGroup":{ + "name":"FailoverGlobalReplicationGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"FailoverGlobalReplicationGroupMessage"}, + "output":{ + "shape":"FailoverGlobalReplicationGroupResult", + "resultWrapper":"FailoverGlobalReplicationGroupResult" + }, + "errors":[ + {"shape":"GlobalReplicationGroupNotFoundFault"}, + {"shape":"InvalidGlobalReplicationGroupStateFault"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterCombinationException"} + ], + "documentation":"Used to failover the primary region to a selected secondary region.
" + }, + "IncreaseNodeGroupsInGlobalReplicationGroup":{ + "name":"IncreaseNodeGroupsInGlobalReplicationGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"IncreaseNodeGroupsInGlobalReplicationGroupMessage"}, + "output":{ + "shape":"IncreaseNodeGroupsInGlobalReplicationGroupResult", + "resultWrapper":"IncreaseNodeGroupsInGlobalReplicationGroupResult" + }, + "errors":[ + {"shape":"GlobalReplicationGroupNotFoundFault"}, + {"shape":"InvalidGlobalReplicationGroupStateFault"}, + {"shape":"InvalidParameterValueException"} + ], + "documentation":"Increase the number of node groups in the Global Datastore
" + }, "IncreaseReplicaCount":{ "name":"IncreaseReplicaCount", "http":{ @@ -745,7 +878,8 @@ {"shape":"CacheParameterGroupNotFoundFault"}, {"shape":"InvalidCacheParameterGroupStateFault"}, {"shape":"InvalidParameterValueException"}, - {"shape":"InvalidParameterCombinationException"} + {"shape":"InvalidParameterCombinationException"}, + {"shape":"InvalidGlobalReplicationGroupStateFault"} ], "documentation":"Modifies the parameters of a cache parameter group. You can modify up to 20 parameters in a single request by submitting a list parameter name and value pairs.
" }, @@ -768,6 +902,24 @@ ], "documentation":"Modifies an existing cache subnet group.
" }, + "ModifyGlobalReplicationGroup":{ + "name":"ModifyGlobalReplicationGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyGlobalReplicationGroupMessage"}, + "output":{ + "shape":"ModifyGlobalReplicationGroupResult", + "resultWrapper":"ModifyGlobalReplicationGroupResult" + }, + "errors":[ + {"shape":"GlobalReplicationGroupNotFoundFault"}, + {"shape":"InvalidGlobalReplicationGroupStateFault"}, + {"shape":"InvalidParameterValueException"} + ], + "documentation":"Modifies the settings for a Global Datastore.
" + }, "ModifyReplicationGroup":{ "name":"ModifyReplicationGroup", "http":{ @@ -842,6 +994,24 @@ ], "documentation":"Allows you to purchase a reserved cache node offering.
" }, + "RebalanceSlotsInGlobalReplicationGroup":{ + "name":"RebalanceSlotsInGlobalReplicationGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebalanceSlotsInGlobalReplicationGroupMessage"}, + "output":{ + "shape":"RebalanceSlotsInGlobalReplicationGroupResult", + "resultWrapper":"RebalanceSlotsInGlobalReplicationGroupResult" + }, + "errors":[ + {"shape":"GlobalReplicationGroupNotFoundFault"}, + {"shape":"InvalidGlobalReplicationGroupStateFault"}, + {"shape":"InvalidParameterValueException"} + ], + "documentation":"Redistribute slots to ensure unifirom distribution across existing shards in the cluster.
" + }, "RebootCacheCluster":{ "name":"RebootCacheCluster", "http":{ @@ -893,7 +1063,8 @@ {"shape":"InvalidCacheParameterGroupStateFault"}, {"shape":"CacheParameterGroupNotFoundFault"}, {"shape":"InvalidParameterValueException"}, - {"shape":"InvalidParameterCombinationException"} + {"shape":"InvalidParameterCombinationException"}, + {"shape":"InvalidGlobalReplicationGroupStateFault"} ], "documentation":"Modifies the parameters of a cache parameter group to the engine or system default value. You can reset specific parameters by submitting a list of parameter names. To reset the entire cache parameter group, specify the ResetAllParameters
and CacheParameterGroupName
parameters.
A string list, each element of which specifies a cache node type which you can use to scale your cluster or replication group.
When scaling down on a Redis cluster or replication group using ModifyCacheCluster
or ModifyReplicationGroup
, use a value from this list for the CacheNodeType
parameter.
A string list, each element of which specifies a cache node type which you can use to scale your cluster or replication group. When scaling down a Redis cluster or replication group using ModifyCacheCluster or ModifyReplicationGroup, use a value from this list for the CacheNodeType parameter.
" } }, "documentation":"Represents the allowed node types you can use to modify your cluster or replication group.
" @@ -1169,7 +1340,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"The name of the compute and memory capacity node type for the cluster.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
The name of the compute and memory capacity node type for the cluster.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
The current state of this cache node.
" + "documentation":"The current state of this cache node, one of the following values: available
, creating
, rebooting
, or deleting
.
The Availability Zone where this node was created and now resides.
" } }, - "documentation":"Represents an individual cache node within a cluster. Each cache node runs its own instance of the cluster's protocol-compliant caching software - either Memcached or Redis.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
Represents an individual cache node within a cluster. Each cache node runs its own instance of the cluster's protocol-compliant caching software - either Memcached or Redis.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
The description for this cache parameter group.
" + }, + "IsGlobal":{ + "shape":"Boolean", + "documentation":"Indicates whether the parameter group is associated with a Global Datastore
" } }, "documentation":"Represents the output of a CreateCacheParameterGroup
operation.
The compute and memory capacity of the nodes in the node group (shard).
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
The compute and memory capacity of the nodes in the node group (shard).
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
The suffix for name of a Global Datastore. The suffix guarantees uniqueness of the Global Datastore name across multiple regions.
" + }, + "GlobalReplicationGroupDescription":{ + "shape":"String", + "documentation":"Provides details of the Global Datastore
" + }, + "PrimaryReplicationGroupId":{ + "shape":"String", + "documentation":"The name of the primary cluster that accepts writes and will replicate updates to the secondary cluster.
" + } + } + }, + "CreateGlobalReplicationGroupResult":{ + "type":"structure", + "members":{ + "GlobalReplicationGroup":{"shape":"GlobalReplicationGroup"} + } + }, "CreateReplicationGroupMessage":{ "type":"structure", "required":[ @@ -2156,6 +2358,10 @@ "shape":"String", "documentation":"A user-created description for the replication group.
" }, + "GlobalReplicationGroupId":{ + "shape":"String", + "documentation":"The name of the Global Datastore
" + }, "PrimaryClusterId":{ "shape":"String", "documentation":"The identifier of the cluster that serves as the primary for this replication group. This cluster must already exist and have a status of available
.
This parameter is not required if NumCacheClusters
, NumNodeGroups
, or ReplicasPerNodeGroup
is specified.
The compute and memory capacity of the nodes in the node group (shard).
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
The compute and memory capacity of the nodes in the node group (shard).
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
The ID of the KMS key used to encrypt the disk on the cluster.
" + "documentation":"The ID of the KMS key used to encrypt the disk in the cluster.
" } }, "documentation":"Represents the input of a CreateReplicationGroup
operation.
The name of the Global Datastore
" + }, + "NodeGroupCount":{ + "shape":"Integer", + "documentation":"The number of node groups (shards) that results from the modification of the shard configuration
" + }, + "GlobalNodeGroupsToRemove":{ + "shape":"GlobalNodeGroupIdList", + "documentation":"If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. NodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster. ElastiCache for Redis will attempt to remove all node groups listed by NodeGroupsToRemove from the cluster.
" + }, + "GlobalNodeGroupsToRetain":{ + "shape":"GlobalNodeGroupIdList", + "documentation":"If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. NodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster. ElastiCache for Redis will attempt to remove all node groups listed by NodeGroupsToRemove from the cluster.
" + }, + "ApplyImmediately":{ + "shape":"Boolean", + "documentation":"Indicates that the shard reconfiguration process begins immediately. At present, the only permitted value for this parameter is true.
" + } + } + }, + "DecreaseNodeGroupsInGlobalReplicationGroupResult":{ + "type":"structure", + "members":{ + "GlobalReplicationGroup":{"shape":"GlobalReplicationGroup"} + } + }, "DecreaseReplicaCountMessage":{ "type":"structure", "required":[ @@ -2409,6 +2651,29 @@ }, "documentation":"Represents the input of a DeleteCacheSubnetGroup
operation.
The name of the Global Datastore
" + }, + "RetainPrimaryReplicationGroup":{ + "shape":"Boolean", + "documentation":"If set to true
, the primary replication is retained as a standalone replication group.
Represents the input of a DescribeEvents
operation.
The name of the Global Datastore
" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved.
" + }, + "Marker":{ + "shape":"String", + "documentation":"An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords
.
Returns the list of members that comprise the Global Datastore.
" + } + } + }, + "DescribeGlobalReplicationGroupsResult":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "documentation":"An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. >
" + }, + "GlobalReplicationGroups":{ + "shape":"GlobalReplicationGroupList", + "documentation":"Indicates the slot configuration and global identifier for each slice group.
" + } + } + }, "DescribeReplicationGroupsMessage":{ "type":"structure", "members":{ @@ -2674,7 +2973,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"The cache node type filter value. Use this parameter to show only those reservations matching the specified cache node type.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
The cache node type filter value. Use this parameter to show only those reservations matching the specified cache node type.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
The cache node type filter value. Use this parameter to show only the available offerings matching the specified cache node type.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
The cache node type filter value. Use this parameter to show only the available offerings matching the specified cache node type.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
The name of the Global Datastore
" + }, + "ReplicationGroupId":{ + "shape":"String", + "documentation":"The name of the secondary cluster you wish to remove from the Global Datastore
" + }, + "ReplicationGroupRegion":{ + "shape":"String", + "documentation":"The AWS region of secondary cluster you wish to remove from the Global Datastore
" + } + } + }, + "DisassociateGlobalReplicationGroupResult":{ + "type":"structure", + "members":{ + "GlobalReplicationGroup":{"shape":"GlobalReplicationGroup"} + } + }, "Double":{"type":"double"}, "EC2SecurityGroup":{ "type":"structure", @@ -2953,6 +3280,228 @@ }, "documentation":"Represents the output of a DescribeEvents
operation.
The name of the Global Datastore
" + }, + "PrimaryRegion":{ + "shape":"String", + "documentation":"The AWS region of the primary cluster of the Global Datastore
" + }, + "PrimaryReplicationGroupId":{ + "shape":"String", + "documentation":"The name of the primary replication group
" + } + } + }, + "FailoverGlobalReplicationGroupResult":{ + "type":"structure", + "members":{ + "GlobalReplicationGroup":{"shape":"GlobalReplicationGroup"} + } + }, + "GlobalNodeGroup":{ + "type":"structure", + "members":{ + "GlobalNodeGroupId":{ + "shape":"String", + "documentation":"The name of the global node group
" + }, + "Slots":{ + "shape":"String", + "documentation":"The keyspace for this node group
" + } + }, + "documentation":"Indicates the slot configuration and global identifier for a slice group.
" + }, + "GlobalNodeGroupIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"GlobalNodeGroupId" + } + }, + "GlobalNodeGroupList":{ + "type":"list", + "member":{ + "shape":"GlobalNodeGroup", + "locationName":"GlobalNodeGroup" + } + }, + "GlobalReplicationGroup":{ + "type":"structure", + "members":{ + "GlobalReplicationGroupId":{ + "shape":"String", + "documentation":"The name of the Global Datastore
" + }, + "GlobalReplicationGroupDescription":{ + "shape":"String", + "documentation":"The optional description of the Global Datastore
" + }, + "Status":{ + "shape":"String", + "documentation":"The status of the Global Datastore
" + }, + "CacheNodeType":{ + "shape":"String", + "documentation":"The cache node type of the Global Datastore
" + }, + "Engine":{ + "shape":"String", + "documentation":"The Elasticache engine. For preview, it is Redis only.
" + }, + "EngineVersion":{ + "shape":"String", + "documentation":"The Elasticache Redis engine version. For preview, it is Redis version 5.0.5 only.
" + }, + "Members":{ + "shape":"GlobalReplicationGroupMemberList", + "documentation":"The replication groups that comprise the Global Datastore.
" + }, + "ClusterEnabled":{ + "shape":"BooleanOptional", + "documentation":"A flag that indicates whether the Global Datastore is cluster enabled.
" + }, + "GlobalNodeGroups":{ + "shape":"GlobalNodeGroupList", + "documentation":"Indicates the slot configuration and global identifier for each slice group.
" + }, + "AuthTokenEnabled":{ + "shape":"BooleanOptional", + "documentation":"A flag that enables using an AuthToken
(password) when issuing Redis commands.
Default: false
A flag that enables in-transit encryption when set to true. You cannot modify the value of TransitEncryptionEnabled
after the cluster is created. To enable in-transit encryption on a cluster you must set TransitEncryptionEnabled
to true when you create a cluster.
A flag that enables encryption at rest when set to true
.
You cannot modify the value of AtRestEncryptionEnabled
after the replication group is created. To enable encryption at rest on a replication group you must set AtRestEncryptionEnabled
to true
when you create the replication group.
Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6
, 4.x
or later.
Consists of a primary cluster that accepts writes and an associated secondary cluster that resides in a different AWS region. The secondary cluster accepts only reads. The primary cluster automatically replicates updates to the secondary cluster.
The GlobalReplicationGroupId represents the name of the Global Datastore, which is what you use to associate a secondary cluster.
The Global Datastore name already exists.
", + "error":{ + "code":"GlobalReplicationGroupAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "GlobalReplicationGroupInfo":{ + "type":"structure", + "members":{ + "GlobalReplicationGroupId":{ + "shape":"String", + "documentation":"The name of the Global Datastore
" + }, + "GlobalReplicationGroupMemberRole":{ + "shape":"String", + "documentation":"The role of the replication group in a Global Datastore. Can be primary or secondary.
" + } + }, + "documentation":"The name of the Global Datastore and role of this replication group in the Global Datastore.
" + }, + "GlobalReplicationGroupList":{ + "type":"list", + "member":{ + "shape":"GlobalReplicationGroup", + "locationName":"GlobalReplicationGroup" + } + }, + "GlobalReplicationGroupMember":{ + "type":"structure", + "members":{ + "ReplicationGroupId":{ + "shape":"String", + "documentation":"The replication group id of the Global Datastore member.
" + }, + "ReplicationGroupRegion":{ + "shape":"String", + "documentation":"The AWS region of the Global Datastore member.
" + }, + "Role":{ + "shape":"String", + "documentation":"Indicates the role of the replication group, primary or secondary.
" + }, + "AutomaticFailover":{ + "shape":"AutomaticFailoverStatus", + "documentation":"Indicates whether automatic failover is enabled for the replication group.
" + }, + "Status":{ + "shape":"String", + "documentation":"The status of the membership of the replication group.
" + } + }, + "documentation":"A member of a Global Datastore. It contains the Replication Group Id, the AWS region and the role of the replication group.
", + "wrapper":true + }, + "GlobalReplicationGroupMemberList":{ + "type":"list", + "member":{ + "shape":"GlobalReplicationGroupMember", + "locationName":"GlobalReplicationGroupMember" + } + }, + "GlobalReplicationGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"The Global Datastore does not exist
", + "error":{ + "code":"GlobalReplicationGroupNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "IncreaseNodeGroupsInGlobalReplicationGroupMessage":{ + "type":"structure", + "required":[ + "GlobalReplicationGroupId", + "NodeGroupCount", + "ApplyImmediately" + ], + "members":{ + "GlobalReplicationGroupId":{ + "shape":"String", + "documentation":"The name of the Global Datastore
" + }, + "NodeGroupCount":{ + "shape":"Integer", + "documentation":"The number of node groups you wish to add
" + }, + "RegionalConfigurations":{ + "shape":"RegionalConfigurationList", + "documentation":"Describes the replication group IDs, the AWS regions where they are stored and the shard configuration for each that comprise the Global Datastore
" + }, + "ApplyImmediately":{ + "shape":"Boolean", + "documentation":"Indicates that the process begins immediately. At present, the only permitted value for this parameter is true.
" + } + } + }, + "IncreaseNodeGroupsInGlobalReplicationGroupResult":{ + "type":"structure", + "members":{ + "GlobalReplicationGroup":{"shape":"GlobalReplicationGroup"} + } + }, "IncreaseReplicaCountMessage":{ "type":"structure", "required":[ @@ -3046,6 +3595,18 @@ }, "exception":true }, + "InvalidGlobalReplicationGroupStateFault":{ + "type":"structure", + "members":{ + }, + "documentation":"The Global Datastore is not available
", + "error":{ + "code":"InvalidGlobalReplicationGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "InvalidKMSKeyFault":{ "type":"structure", "members":{ @@ -3301,6 +3862,45 @@ "CacheSubnetGroup":{"shape":"CacheSubnetGroup"} } }, + "ModifyGlobalReplicationGroupMessage":{ + "type":"structure", + "required":[ + "GlobalReplicationGroupId", + "ApplyImmediately" + ], + "members":{ + "GlobalReplicationGroupId":{ + "shape":"String", + "documentation":"The name of the Global Datastore
" + }, + "ApplyImmediately":{ + "shape":"Boolean", + "documentation":"If true, this parameter causes the modifications in this request and any pending modifications to be applied, asynchronously and as soon as possible, regardless of the PreferredMaintenanceWindow setting for the replication group. If false, changes to the nodes in the replication group are applied on the next maintenance reboot, or the next failure reboot, whichever occurs first.
" + }, + "CacheNodeType":{ + "shape":"String", + "documentation":"A valid cache node type that you want to scale this Global Datastore to.
" + }, + "EngineVersion":{ + "shape":"String", + "documentation":"The upgraded version of the cache engine to be run on the clusters in the Global Datastore.
" + }, + "GlobalReplicationGroupDescription":{ + "shape":"String", + "documentation":"A description of the Global Datastore
" + }, + "AutomaticFailoverEnabled":{ + "shape":"BooleanOptional", + "documentation":"Determines whether a read replica is automatically promoted to read/write primary if the existing primary encounters a failure.
" + } + } + }, + "ModifyGlobalReplicationGroupResult":{ + "type":"structure", + "members":{ + "GlobalReplicationGroup":{"shape":"GlobalReplicationGroup"} + } + }, "ModifyReplicationGroupMessage":{ "type":"structure", "required":["ReplicationGroupId"], @@ -3925,6 +4525,29 @@ "ReservedCacheNode":{"shape":"ReservedCacheNode"} } }, + "RebalanceSlotsInGlobalReplicationGroupMessage":{ + "type":"structure", + "required":[ + "GlobalReplicationGroupId", + "ApplyImmediately" + ], + "members":{ + "GlobalReplicationGroupId":{ + "shape":"String", + "documentation":"The name of the Global Datastore
" + }, + "ApplyImmediately":{ + "shape":"Boolean", + "documentation":"If True
, redistribution is applied immediately.
The name of the secondary cluster
" + }, + "ReplicationGroupRegion":{ + "shape":"String", + "documentation":"The AWS region where the cluster is stored
" + }, + "ReshardingConfiguration":{ + "shape":"ReshardingConfigurationList", + "documentation":"A list of PreferredAvailabilityZones
objects that specifies the configuration of a node group in the resharded cluster.
A list of the replication groups
" + }, + "RegionalConfigurationList":{ + "type":"list", + "member":{ + "shape":"RegionalConfiguration", + "locationName":"RegionalConfiguration" + } + }, "RemoveReplicasList":{ "type":"list", "member":{"shape":"String"} @@ -4011,6 +4664,10 @@ "shape":"String", "documentation":"The user supplied description of the replication group.
" }, + "GlobalReplicationGroupInfo":{ + "shape":"GlobalReplicationGroupInfo", + "documentation":"The name of the Global Datastore and role of this replication group in the Global Datastore.
" + }, "Status":{ "shape":"String", "documentation":"The current state of this replication group - creating
, available
, modifying
, deleting
, create-failed
, snapshotting
.
The cache node type for the reserved cache nodes.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
The cache node type for the reserved cache nodes.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
The cache node type for the reserved cache node.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
The cache node type for the reserved cache node.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
The name of the compute and memory capacity node type for the source cluster.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
The name of the compute and memory capacity node type for the source cluster.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Redis append-only files (AOF) are not supported for T1 or T2 instances.
Redis Multi-AZ with automatic failover is not supported on T1 instances.
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
The Availability Zone.
", + "documentation":"The Availability Zone.
You must specify AvailabilityZone
or AvailabilityZoneId
.
The ID of the Availability Zone.
", + "documentation":"The ID of the Availability Zone.
You must specify AvailabilityZone
or AvailabilityZoneId
.
If the batch inference job failed, the reason for the failure.
" + }, + "solutionVersionArn":{ + "shape":"Arn", + "documentation":"The ARN of the solution version used by the batch inference job.
" } }, "documentation":"A truncated version of the BatchInferenceJob datatype. The ListBatchInferenceJobs operation returns a list of batch inference job summaries.
" @@ -2130,7 +2134,7 @@ "members":{ "type":{ "shape":"HPOObjectiveType", - "documentation":"The data type of the metric.
" + "documentation":"The type of the metric. Valid values are Maximize
and Minimize
.
The scope of training used to create the solution version. The FULL
option trains the solution version based on the entirety of the input solution's training data, while the UPDATE
option processes only the training data that has changed since the creation of the last solution version. Choose UPDATE
when you want to start recommending items added to the dataset without retraining the model.
The UPDATE
option can only be used after you've created a solution version with the FULL
option and the training solution uses the native-recipe-hrnn-coldstart.
If hyperparameter optimization was performed, contains the hyperparameter values of the best performing model.
" + }, "status":{ "shape":"Status", "documentation":"The status of the solution version.
A solution version can be in one of the following states:
CREATE PENDING
CREATE IN_PROGRESS
ACTIVE
CREATE FAILED
A list of the hyperparameter values of the best performing model.
" + } + }, + "documentation":"If hyperparameter optimization (HPO) was performed, contains the hyperparameter values of the best performing model.
" + }, "UpdateCampaignRequest":{ "type":"structure", "required":["campaignArn"], diff --git a/botocore/data/rds/2014-10-31/service-2.json b/botocore/data/rds/2014-10-31/service-2.json index 18989240..076c6c69 100644 --- a/botocore/data/rds/2014-10-31/service-2.json +++ b/botocore/data/rds/2014-10-31/service-2.json @@ -7389,7 +7389,7 @@ "documentation":" An optional pagination token provided by a previous DescribeExportTasks
request. If you specify this parameter, the response includes only records beyond the marker, up to the value specified by the MaxRecords
parameter.
The maximum number of records to include in the response. If more records exist than the specified value, a pagination token called a marker is included in the response. You can use the marker in a later DescribeExportTasks
request to retrieve the remaining results.
Default: 100
Constraints: Minimum 20, maximum 100.
" } } diff --git a/botocore/data/s3control/2018-08-20/service-2.json b/botocore/data/s3control/2018-08-20/service-2.json index 489cc982..b235e6e3 100644 --- a/botocore/data/s3control/2018-08-20/service-2.json +++ b/botocore/data/s3control/2018-08-20/service-2.json @@ -62,6 +62,21 @@ "input":{"shape":"DeleteAccessPointPolicyRequest"}, "documentation":"Deletes the access point policy for the specified access point.
" }, + "DeleteJobTagging":{ + "name":"DeleteJobTagging", + "http":{ + "method":"DELETE", + "requestUri":"/v20180820/jobs/{id}/tagging" + }, + "input":{"shape":"DeleteJobTaggingRequest"}, + "output":{"shape":"DeleteJobTaggingResult"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"} + ], + "documentation":"Delete the tags on a Amazon S3 batch operations job, if any.
" + }, "DeletePublicAccessBlock":{ "name":"DeletePublicAccessBlock", "http":{ @@ -117,6 +132,21 @@ "output":{"shape":"GetAccessPointPolicyStatusResult"}, "documentation":"Indicates whether the specified access point currently has a policy that allows public access. For more information about public access through access points, see Managing Data Access with Amazon S3 Access Points in the Amazon Simple Storage Service Developer Guide.
" }, + "GetJobTagging":{ + "name":"GetJobTagging", + "http":{ + "method":"GET", + "requestUri":"/v20180820/jobs/{id}/tagging" + }, + "input":{"shape":"GetJobTaggingRequest"}, + "output":{"shape":"GetJobTaggingResult"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"} + ], + "documentation":"Retrieve the tags on a Amazon S3 batch operations job.
" + }, "GetPublicAccessBlock":{ "name":"GetPublicAccessBlock", "http":{ @@ -168,6 +198,26 @@ }, "documentation":"Associates an access policy with the specified access point. Each access point can have only one policy, so a request made to this API replaces any existing policy associated with the specified access point.
" }, + "PutJobTagging":{ + "name":"PutJobTagging", + "http":{ + "method":"PUT", + "requestUri":"/v20180820/jobs/{id}/tagging" + }, + "input":{ + "shape":"PutJobTaggingRequest", + "locationName":"PutJobTaggingRequest", + "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} + }, + "output":{"shape":"PutJobTaggingResult"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyTagsException"} + ], + "documentation":"Replace the set of tags on a Amazon S3 batch operations job.
" + }, "PutPublicAccessBlock":{ "name":"PutPublicAccessBlock", "http":{ @@ -353,6 +403,10 @@ "RoleArn":{ "shape":"IAMRoleArn", "documentation":"The Amazon Resource Name (ARN) for the Identity and Access Management (IAM) Role that batch operations will use to execute this job's operation on each object in the manifest.
" + }, + "Tags":{ + "shape":"S3TagSet", + "documentation":"An optional set of tags to associate with the job when it is created.
" } } }, @@ -408,6 +462,32 @@ } } }, + "DeleteJobTaggingRequest":{ + "type":"structure", + "required":[ + "AccountId", + "JobId" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"The account ID for the Amazon Web Services account associated with the Amazon S3 batch operations job you want to remove tags from.
", + "location":"header", + "locationName":"x-amz-account-id" + }, + "JobId":{ + "shape":"JobId", + "documentation":"The ID for the job whose tags you want to delete.
", + "location":"uri", + "locationName":"id" + } + } + }, + "DeleteJobTaggingResult":{ + "type":"structure", + "members":{ + } + }, "DeletePublicAccessBlockRequest":{ "type":"structure", "required":["AccountId"], @@ -562,6 +642,36 @@ } } }, + "GetJobTaggingRequest":{ + "type":"structure", + "required":[ + "AccountId", + "JobId" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"The account ID for the Amazon Web Services account associated with the Amazon S3 batch operations job you want to retrieve tags for.
", + "location":"header", + "locationName":"x-amz-account-id" + }, + "JobId":{ + "shape":"JobId", + "documentation":"The ID for the job whose tags you want to retrieve.
", + "location":"uri", + "locationName":"id" + } + } + }, + "GetJobTaggingResult":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"S3TagSet", + "documentation":"The set of tags associated with the job.
" + } + } + }, "GetPublicAccessBlockOutput":{ "type":"structure", "members":{ @@ -1245,6 +1355,37 @@ } } }, + "PutJobTaggingRequest":{ + "type":"structure", + "required":[ + "AccountId", + "JobId", + "Tags" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"The account ID for the Amazon Web Services account associated with the Amazon S3 batch operations job you want to replace tags on.
", + "location":"header", + "locationName":"x-amz-account-id" + }, + "JobId":{ + "shape":"JobId", + "documentation":"The ID for the job whose tags you want to replace.
", + "location":"uri", + "locationName":"id" + }, + "Tags":{ + "shape":"S3TagSet", + "documentation":"The set of tags to associate with the job.
" + } + } + }, + "PutJobTaggingResult":{ + "type":"structure", + "members":{ + } + }, "PutPublicAccessBlockRequest":{ "type":"structure", "required":[ @@ -1661,6 +1802,13 @@ "documentation":"", "exception":true }, + "TooManyTagsException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, "UpdateJobPriorityRequest":{ "type":"structure", "required":[ diff --git a/botocore/data/servicecatalog/2015-12-10/service-2.json b/botocore/data/servicecatalog/2015-12-10/service-2.json index 99464f96..d95463d7 100644 --- a/botocore/data/servicecatalog/2015-12-10/service-2.json +++ b/botocore/data/servicecatalog/2015-12-10/service-2.json @@ -1524,6 +1524,14 @@ "Owner":{ "shape":"AccountId", "documentation":"The owner of the constraint.
" + }, + "ProductId":{ + "shape":"Id", + "documentation":"The identifier of the product the constraint applies to. Note that a constraint applies to a specific instance of a product within a certain portfolio.
" + }, + "PortfolioId":{ + "shape":"Id", + "documentation":"The identifier of the portfolio the product resides in. The constraint applies only to the instance of the product that lives within this portfolio.
" } }, "documentation":"Information about a constraint.
" diff --git a/botocore/data/ssm/2014-11-06/service-2.json b/botocore/data/ssm/2014-11-06/service-2.json index 35d82c90..0415aaf9 100644 --- a/botocore/data/ssm/2014-11-06/service-2.json +++ b/botocore/data/ssm/2014-11-06/service-2.json @@ -5533,7 +5533,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.
" + "documentation":"The token to use when requesting the next set of items.
" } } }, @@ -11300,8 +11300,8 @@ "box":true }, "ApproveUntilDate":{ - "shape":"PatchStringDate", - "documentation":"The cutoff date for auto approval of released patches. Any patches released on or before this date will be installed automatically
", + "shape":"PatchStringDateTime", + "documentation":"Example API
", "box":true }, "EnableNonSecurity":{ @@ -11405,11 +11405,10 @@ }, "documentation":"Information about the approval status of a patch.
" }, - "PatchStringDate":{ + "PatchStringDateTime":{ "type":"string", "max":10, - "min":1, - "pattern":"^(\\d{4}-(0[1-9]|1[0-2])-(0[1-9]|[12]\\d|3[01]))$" + "min":1 }, "PatchTitle":{"type":"string"}, "PatchUnreportedNotApplicableCount":{"type":"integer"}, @@ -11969,6 +11968,21 @@ "exception":true }, "ResourceDataSyncCreatedTime":{"type":"timestamp"}, + "ResourceDataSyncDestinationDataSharing":{ + "type":"structure", + "members":{ + "DestinationDataSharingType":{ + "shape":"ResourceDataSyncDestinationDataSharingType", + "documentation":"The sharing data type. Only Organization
is supported.
Synchronize Systems Manager Inventory data from multiple AWS accounts defined in AWS Organizations to a centralized Amazon S3 bucket. Data is synchronized to individual key prefixes in the central bucket. Each key prefix represents a different AWS account ID.
" + }, + "ResourceDataSyncDestinationDataSharingType":{ + "type":"string", + "max":64, + "min":1 + }, "ResourceDataSyncIncludeFutureRegions":{"type":"boolean"}, "ResourceDataSyncInvalidConfigurationException":{ "type":"structure", @@ -12103,6 +12117,10 @@ "AWSKMSKeyARN":{ "shape":"ResourceDataSyncAWSKMSKeyARN", "documentation":"The ARN of an encryption key for a destination in Amazon S3. Must belong to the same Region as the destination Amazon S3 bucket.
" + }, + "DestinationDataSharing":{ + "shape":"ResourceDataSyncDestinationDataSharing", + "documentation":"Enables destination data sharing. By default, this field is null
.
Information about the target Amazon S3 bucket for the Resource Data Sync.
" diff --git a/botocore/handlers.py b/botocore/handlers.py index daa22886..a312f5c2 100644 --- a/botocore/handlers.py +++ b/botocore/handlers.py @@ -484,8 +484,7 @@ def parse_get_bucket_location(parsed, http_response, **kwargs): # The "parsed" passed in only has the ResponseMetadata # filled out. This handler will fill in the LocationConstraint # value. - if 'LocationConstraint' in parsed: - # Response already set - a stub? + if http_response.raw is None: return response_body = http_response.content parser = xml.etree.cElementTree.XMLParser( diff --git a/docs/source/conf.py b/docs/source/conf.py index 33225a6f..4eb7d03b 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -54,7 +54,7 @@ copyright = u'2013, Mitch Garnaat' # The short X.Y version. version = '1.15.' # The full version, including alpha/beta/rc tags. -release = '1.15.21' +release = '1.15.26' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/tests/functional/test_stub.py b/tests/functional/test_stub.py index f44b722b..752ba224 100644 --- a/tests/functional/test_stub.py +++ b/tests/functional/test_stub.py @@ -313,3 +313,21 @@ class TestStubber(unittest.TestCase): actual_response = self.client.list_objects(**expected_params) self.assertEqual(desired_response, actual_response) self.stubber.assert_no_pending_responses() + + def test_parse_get_bucket_location(self): + error_code = "NoSuchBucket" + error_message = "The specified bucket does not exist" + self.stubber.add_client_error( + 'get_bucket_location', error_code, error_message) + self.stubber.activate() + + with self.assertRaises(ClientError): + self.client.get_bucket_location(Bucket='foo') + + def test_parse_get_bucket_location_returns_response(self): + service_response = {"LocationConstraint": "us-west-2"} + self.stubber.add_response('get_bucket_location',service_response) + self.stubber.activate() + response = self.client.get_bucket_location(Bucket='foo') + self.assertEqual(response, service_response) + diff --git a/tests/unit/test_handlers.py b/tests/unit/test_handlers.py index 551d18f5..a41bacb2 100644 --- a/tests/unit/test_handlers.py +++ b/tests/unit/test_handlers.py @@ -921,13 +921,6 @@ class TestHandlers(BaseSessionTest): self.assertEqual(parsed['CommonPrefixes'][0]['Prefix'], u'\xe7\xf6s% asd\x08 c') - def test_get_bucket_location_optional(self): - # This handler should no-op if another hook (i.e. stubber) has already - # filled in response - response = {"LocationConstraint": "eu-west-1"} - handlers.parse_get_bucket_location(response, None), - self.assertEqual(response["LocationConstraint"], "eu-west-1") - def test_set_operation_specific_signer_no_auth_type(self): signing_name = 'myservice' context = {'auth_type': None}