Update upstream source from tag 'upstream/1.15.26+repack'

Update to upstream version '1.15.26+repack'
with Debian dir c57535e050
This commit is contained in:
TANIGUCHI Takaki 2020-03-22 21:13:01 +09:00
commit 2f2b47b83c
21 changed files with 1651 additions and 71 deletions

View file

@ -1,6 +1,6 @@
Metadata-Version: 1.1
Name: botocore
Version: 1.15.21
Version: 1.15.26
Summary: Low-level, data-driven core of boto 3.
Home-page: https://github.com/boto/botocore
Author: Amazon Web Services

View file

@ -1,6 +1,6 @@
Metadata-Version: 1.1
Name: botocore
Version: 1.15.21
Version: 1.15.26
Summary: Low-level, data-driven core of boto 3.
Home-page: https://github.com/boto/botocore
Author: Amazon Web Services

View file

@ -16,7 +16,7 @@ import os
import re
import logging
__version__ = '1.15.21'
__version__ = '1.15.26'
class NullHandler(logging.Handler):

View file

@ -86,7 +86,7 @@
{"shape":"RequestInProgressException"},
{"shape":"InvalidArnException"}
],
"documentation":"<p>Retrieves a certificate specified by an ARN and its certificate chain . The chain is an ordered list of certificates that contains the end entity certificate, intermediate certificates of subordinate CAs, and the root certificate in that order. The certificate and certificate chain are base64 encoded. If you want to decode the certificate to see the individual fields, you can use OpenSSL.</p>"
"documentation":"<p>Retrieves an Amazon-issued certificate and its certificate chain. The chain consists of the certificate of the issuing CA and the intermediate certificates of any other subordinate CAs. All of the certificates are base64 encoded. You can use <a href=\"https://wiki.openssl.org/index.php/Command_Line_Utilities\">OpenSSL</a> to decode the certificates and inspect individual fields.</p>"
},
"ImportCertificate":{
"name":"ImportCertificate",
@ -498,7 +498,7 @@
},
"ResourceRecord":{
"shape":"ResourceRecord",
"documentation":"<p>Contains the CNAME record that you add to your DNS database for domain validation. For more information, see <a href=\"https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-validate-dns.html\">Use DNS to Validate Domain Ownership</a>.</p>"
"documentation":"<p>Contains the CNAME record that you add to your DNS database for domain validation. For more information, see <a href=\"https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-validate-dns.html\">Use DNS to Validate Domain Ownership</a>.</p> <p>Note: The CNAME information that you need does not include the name of your domain. If you include&#x2028; your domain name in the DNS database CNAME record, validation fails.&#x2028; For example, if the name is \"_a79865eb4cd1a6ab990a45779b4e0b96.yourdomain.com\", only \"_a79865eb4cd1a6ab990a45779b4e0b96\" must be used.</p>"
},
"ValidationMethod":{
"shape":"ValidationMethod",
@ -664,11 +664,11 @@
"members":{
"Certificate":{
"shape":"CertificateBody",
"documentation":"<p>String that contains the ACM certificate represented by the ARN specified at input.</p>"
"documentation":"<p>The ACM-issued certificate corresponding to the ARN specified as input.</p>"
},
"CertificateChain":{
"shape":"CertificateChain",
"documentation":"<p>The certificate chain that contains the root certificate issued by the certificate authority (CA).</p>"
"documentation":"<p>Certificates forming the requested certificate's chain of trust. The chain consists of the certificate of the issuing CA and the intermediate certificates of any other subordinate CAs. </p>"
}
}
},
@ -822,7 +822,7 @@
"members":{
"message":{"shape":"String"}
},
"documentation":"<p>An ACM limit has been exceeded.</p>",
"documentation":"<p>An ACM quota has been exceeded.</p>",
"exception":true
},
"ListCertificatesRequest":{
@ -885,7 +885,7 @@
},
"NextToken":{
"type":"string",
"max":320,
"max":10000,
"min":1,
"pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u00FF]*"
},
@ -904,7 +904,7 @@
},
"PrivateKeyBlob":{
"type":"blob",
"max":524288,
"max":5120,
"min":1,
"sensitive":true
},
@ -996,7 +996,7 @@
},
"SubjectAlternativeNames":{
"shape":"DomainList",
"documentation":"<p>Additional FQDNs to be included in the Subject Alternative Name extension of the ACM certificate. For example, add the name www.example.net to a certificate for which the <code>DomainName</code> field is www.example.com if users can reach your site by using either name. The maximum number of domain names that you can add to an ACM certificate is 100. However, the initial limit is 10 domain names. If you need more than 10 names, you must request a limit increase. For more information, see <a href=\"https://docs.aws.amazon.com/acm/latest/userguide/acm-limits.html\">Limits</a>.</p> <p> The maximum length of a SAN DNS name is 253 octets. The name is made up of multiple labels separated by periods. No label can be longer than 63 octets. Consider the following examples: </p> <ul> <li> <p> <code>(63 octets).(63 octets).(63 octets).(61 octets)</code> is legal because the total length is 253 octets (63+1+63+1+63+1+61) and no label exceeds 63 octets.</p> </li> <li> <p> <code>(64 octets).(63 octets).(63 octets).(61 octets)</code> is not legal because the total length exceeds 253 octets (64+1+63+1+63+1+61) and the first label exceeds 63 octets.</p> </li> <li> <p> <code>(63 octets).(63 octets).(63 octets).(62 octets)</code> is not legal because the total length of the DNS name (63+1+63+1+63+1+62) exceeds 253 octets.</p> </li> </ul>"
"documentation":"<p>Additional FQDNs to be included in the Subject Alternative Name extension of the ACM certificate. For example, add the name www.example.net to a certificate for which the <code>DomainName</code> field is www.example.com if users can reach your site by using either name. The maximum number of domain names that you can add to an ACM certificate is 100. However, the initial quota is 10 domain names. If you need more than 10 names, you must request a quota increase. For more information, see <a href=\"https://docs.aws.amazon.com/acm/latest/userguide/acm-limits.html\">Quotas</a>.</p> <p> The maximum length of a SAN DNS name is 253 octets. The name is made up of multiple labels separated by periods. No label can be longer than 63 octets. Consider the following examples: </p> <ul> <li> <p> <code>(63 octets).(63 octets).(63 octets).(61 octets)</code> is legal because the total length is 253 octets (63+1+63+1+63+1+61) and no label exceeds 63 octets.</p> </li> <li> <p> <code>(64 octets).(63 octets).(63 octets).(61 octets)</code> is not legal because the total length exceeds 253 octets (64+1+63+1+63+1+61) and the first label exceeds 63 octets.</p> </li> <li> <p> <code>(63 octets).(63 octets).(63 octets).(62 octets)</code> is not legal because the total length of the DNS name (63+1+63+1+63+1+62) exceeds 253 octets.</p> </li> </ul>"
},
"IdempotencyToken":{
"shape":"IdempotencyToken",

View file

@ -1109,7 +1109,7 @@
{"shape":"UserNotConfirmedException"},
{"shape":"InternalErrorException"}
],
"documentation":"<p>Calling this API causes a message to be sent to the end user with a confirmation code that is required to change the user's password. For the <code>Username</code> parameter, you can use the username or user alias. If a verified phone number exists for the user, the confirmation code is sent to the phone number. Otherwise, if a verified email exists, the confirmation code is sent to the email. If neither a verified phone number nor a verified email exists, <code>InvalidParameterException</code> is thrown. To use the confirmation code for resetting the password, call .</p>",
"documentation":"<p>Calling this API causes a message to be sent to the end user with a confirmation code that is required to change the user's password. For the <code>Username</code> parameter, you can use the username or user alias. The method used to send the confirmation code is sent according to the specified AccountRecoverySetting. For more information, see <a href=\"\">Recovering User Accounts</a> in the <i>Amazon Cognito Developer Guide</i>. If neither a verified phone number nor a verified email exists, an <code>InvalidParameterException</code> is thrown. To use the confirmation code for resetting the password, call .</p>",
"authtype":"none"
},
"GetCSVHeader":{
@ -3033,7 +3033,7 @@
"documentation":"<p>If <code>UserDataShared</code> is <code>true</code>, Amazon Cognito will include user data in the events it publishes to Amazon Pinpoint analytics.</p>"
}
},
"documentation":"<p>The Amazon Pinpoint analytics configuration for collecting metrics for a user pool.</p>"
"documentation":"<p>The Amazon Pinpoint analytics configuration for collecting metrics for a user pool.</p> <note> <p>Cognito User Pools only supports sending events to Amazon Pinpoint projects in the US East (N. Virginia) us-east-1 Region, regardless of the region in which the user pool resides.</p> </note>"
},
"AnalyticsMetadataType":{
"type":"structure",
@ -3043,7 +3043,7 @@
"documentation":"<p>The endpoint ID.</p>"
}
},
"documentation":"<p>An Amazon Pinpoint analytics endpoint.</p> <p>An endpoint uniquely identifies a mobile device, email address, or phone number that can receive messages from Amazon Pinpoint analytics.</p>"
"documentation":"<p>An Amazon Pinpoint analytics endpoint.</p> <p>An endpoint uniquely identifies a mobile device, email address, or phone number that can receive messages from Amazon Pinpoint analytics.</p> <note> <p>Cognito User Pools only supports sending events to Amazon Pinpoint projects in the US East (N. Virginia) us-east-1 Region, regardless of the region in which the user pool resides.</p> </note>"
},
"ArnType":{
"type":"string",
@ -3685,7 +3685,7 @@
},
"ProviderDetails":{
"shape":"ProviderDetailsType",
"documentation":"<p>The identity provider details. The following list describes the provider detail keys for each identity provider type.</p> <ul> <li> <p>For Google, Facebook and Login with Amazon:</p> <ul> <li> <p>client_id</p> </li> <li> <p>client_secret</p> </li> <li> <p>authorize_scopes</p> </li> </ul> </li> <li> <p>For Sign in with Apple:</p> <ul> <li> <p>client_id</p> </li> <li> <p>team_id</p> </li> <li> <p>key_id</p> </li> <li> <p>private_key</p> </li> <li> <p>authorize_scopes</p> </li> </ul> </li> <li> <p>For OIDC providers:</p> <ul> <li> <p>client_id</p> </li> <li> <p>client_secret</p> </li> <li> <p>attributes_request_method</p> </li> <li> <p>oidc_issuer</p> </li> <li> <p>authorize_scopes</p> </li> <li> <p>authorize_url <i>if not available from discovery URL specified by oidc_issuer key</i> </p> </li> <li> <p>token_url <i>if not available from discovery URL specified by oidc_issuer key</i> </p> </li> <li> <p>attributes_url <i>if not available from discovery URL specified by oidc_issuer key</i> </p> </li> <li> <p>jwks_uri <i>if not available from discovery URL specified by oidc_issuer key</i> </p> </li> <li> <p>authorize_scopes</p> </li> </ul> </li> <li> <p>For SAML providers:</p> <ul> <li> <p>MetadataFile OR MetadataURL</p> </li> <li> <p>IDPSignOut <i>optional</i> </p> </li> </ul> </li> </ul>"
"documentation":"<p>The identity provider details. The following list describes the provider detail keys for each identity provider type.</p> <ul> <li> <p>For Google, Facebook and Login with Amazon:</p> <ul> <li> <p>client_id</p> </li> <li> <p>client_secret</p> </li> <li> <p>authorize_scopes</p> </li> </ul> </li> <li> <p>For Sign in with Apple:</p> <ul> <li> <p>client_id</p> </li> <li> <p>team_id</p> </li> <li> <p>key_id</p> </li> <li> <p>private_key</p> </li> <li> <p>authorize_scopes</p> </li> </ul> </li> <li> <p>For OIDC providers:</p> <ul> <li> <p>client_id</p> </li> <li> <p>client_secret</p> </li> <li> <p>attributes_request_method</p> </li> <li> <p>oidc_issuer</p> </li> <li> <p>authorize_scopes</p> </li> <li> <p>authorize_url <i>if not available from discovery URL specified by oidc_issuer key</i> </p> </li> <li> <p>token_url <i>if not available from discovery URL specified by oidc_issuer key</i> </p> </li> <li> <p>attributes_url <i>if not available from discovery URL specified by oidc_issuer key</i> </p> </li> <li> <p>jwks_uri <i>if not available from discovery URL specified by oidc_issuer key</i> </p> </li> <li> <p>authorize_scopes</p> </li> </ul> </li> <li> <p>For SAML providers:</p> <ul> <li> <p>MetadataFile OR MetadataURL</p> </li> <li> <p>IDPSignout <i>optional</i> </p> </li> </ul> </li> </ul>"
},
"AttributeMapping":{
"shape":"AttributeMappingType",
@ -3841,7 +3841,7 @@
},
"AnalyticsConfiguration":{
"shape":"AnalyticsConfigurationType",
"documentation":"<p>The Amazon Pinpoint analytics configuration for collecting metrics for this user pool.</p>"
"documentation":"<p>The Amazon Pinpoint analytics configuration for collecting metrics for this user pool.</p> <note> <p>Cognito User Pools only supports sending events to Amazon Pinpoint projects in the US East (N. Virginia) us-east-1 Region, regardless of the region in which the user pool resides.</p> </note>"
},
"PreventUserExistenceErrors":{
"shape":"PreventUserExistenceErrorTypes",
@ -4655,6 +4655,10 @@
"RiskLevel":{
"shape":"RiskLevelType",
"documentation":"<p>The risk level.</p>"
},
"CompromisedCredentialsDetected":{
"shape":"WrappedBooleanType",
"documentation":"<p>Indicates whether compromised credentials were detected during an authentication event.</p>"
}
},
"documentation":"<p>The event risk type.</p>"
@ -7266,7 +7270,7 @@
},
"AnalyticsConfiguration":{
"shape":"AnalyticsConfigurationType",
"documentation":"<p>The Amazon Pinpoint analytics configuration for collecting metrics for this user pool.</p>"
"documentation":"<p>The Amazon Pinpoint analytics configuration for collecting metrics for this user pool.</p> <note> <p>Cognito User Pools only supports sending events to Amazon Pinpoint projects in the US East (N. Virginia) us-east-1 Region, regardless of the region in which the user pool resides.</p> </note>"
},
"PreventUserExistenceErrors":{
"shape":"PreventUserExistenceErrorTypes",
@ -7665,7 +7669,7 @@
},
"AnalyticsConfiguration":{
"shape":"AnalyticsConfigurationType",
"documentation":"<p>The Amazon Pinpoint analytics configuration for the user pool client.</p>"
"documentation":"<p>The Amazon Pinpoint analytics configuration for the user pool client.</p> <note> <p>Cognito User Pools only supports sending events to Amazon Pinpoint projects in the US East (N. Virginia) us-east-1 Region, regardless of the region in which the user pool resides.</p> </note>"
},
"PreventUserExistenceErrors":{
"shape":"PreventUserExistenceErrorTypes",

File diff suppressed because one or more lines are too long

View file

@ -83,6 +83,12 @@
"limit_key": "MaxRecords",
"output_token": "Marker",
"result_key": "UpdateActions"
},
"DescribeGlobalReplicationGroups": {
"input_token": "Marker",
"limit_key": "MaxRecords",
"output_token": "Marker",
"result_key": "GlobalReplicationGroups"
}
}
}

View file

@ -210,6 +210,26 @@
],
"documentation":"<p>Creates a new cache subnet group.</p> <p>Use this parameter only when you are creating a cluster in an Amazon Virtual Private Cloud (Amazon VPC).</p>"
},
"CreateGlobalReplicationGroup":{
"name":"CreateGlobalReplicationGroup",
"http":{
"method":"POST",
"requestUri":"/"
},
"input":{"shape":"CreateGlobalReplicationGroupMessage"},
"output":{
"shape":"CreateGlobalReplicationGroupResult",
"resultWrapper":"CreateGlobalReplicationGroupResult"
},
"errors":[
{"shape":"ReplicationGroupNotFoundFault"},
{"shape":"InvalidReplicationGroupStateFault"},
{"shape":"GlobalReplicationGroupAlreadyExistsFault"},
{"shape":"ServiceLinkedRoleNotFoundFault"},
{"shape":"InvalidParameterValueException"}
],
"documentation":"<p>Global Datastore for Redis offers fully managed, fast, reliable and secure cross-region replication. Using Global Datastore for Redis, you can create cross-region read replica clusters for ElastiCache for Redis to enable low-latency reads and disaster recovery across regions. For more information, see <a href=\"/AmazonElastiCache/latest/red-ug/Redis-Global-Clusters.html\">Replication Across Regions Using Global Datastore</a>. </p> <ul> <li> <p>The <b>GlobalReplicationGroupId</b> is the name of the Global Datastore.</p> </li> <li> <p>The <b>PrimaryReplicationGroupId</b> represents the name of the primary cluster that accepts writes and will replicate updates to the secondary cluster.</p> </li> </ul>"
},
"CreateReplicationGroup":{
"name":"CreateReplicationGroup",
"http":{
@ -235,10 +255,12 @@
{"shape":"InvalidVPCNetworkStateFault"},
{"shape":"TagQuotaPerResourceExceeded"},
{"shape":"NodeGroupsPerReplicationGroupQuotaExceededFault"},
{"shape":"GlobalReplicationGroupNotFoundFault"},
{"shape":"InvalidGlobalReplicationGroupStateFault"},
{"shape":"InvalidParameterValueException"},
{"shape":"InvalidParameterCombinationException"}
],
"documentation":"<p>Creates a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group.</p> <p>A Redis (cluster mode disabled) replication group is a collection of clusters, where one of the clusters is a read/write primary and the others are read-only replicas. Writes to the primary are asynchronously propagated to the replicas.</p> <p>A Redis (cluster mode enabled) replication group is a collection of 1 to 90 node groups (shards). Each node group (shard) has one read/write primary node and up to 5 read-only replica nodes. Writes to the primary are asynchronously propagated to the replicas. Redis (cluster mode enabled) replication groups partition the data across node groups (shards).</p> <p>When a Redis (cluster mode disabled) replication group has been successfully created, you can add one or more read replicas to it, up to a total of 5 read replicas. You cannot alter a Redis (cluster mode enabled) replication group after it has been created. However, if you need to increase or decrease the number of node groups (console: shards), you can avail yourself of ElastiCache for Redis' enhanced backup and restore. For more information, see <a href=\"https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/backups-restoring.html\">Restoring From a Backup with Cluster Resizing</a> in the <i>ElastiCache User Guide</i>.</p> <note> <p>This operation is valid for Redis only.</p> </note>"
"documentation":"<p>Creates a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group.</p> <p>This API can be used to create a standalone regional replication group or a secondary replication group associated with a Global Datastore.</p> <p>A Redis (cluster mode disabled) replication group is a collection of clusters, where one of the clusters is a read/write primary and the others are read-only replicas. Writes to the primary are asynchronously propagated to the replicas.</p> <p>A Redis (cluster mode enabled) replication group is a collection of 1 to 90 node groups (shards). Each node group (shard) has one read/write primary node and up to 5 read-only replica nodes. Writes to the primary are asynchronously propagated to the replicas. Redis (cluster mode enabled) replication groups partition the data across node groups (shards).</p> <p>When a Redis (cluster mode disabled) replication group has been successfully created, you can add one or more read replicas to it, up to a total of 5 read replicas. You cannot alter a Redis (cluster mode enabled) replication group after it has been created. However, if you need to increase or decrease the number of node groups (console: shards), you can avail yourself of ElastiCache for Redis' enhanced backup and restore. For more information, see <a href=\"https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/backups-restoring.html\">Restoring From a Backup with Cluster Resizing</a> in the <i>ElastiCache User Guide</i>.</p> <note> <p>This operation is valid for Redis only.</p> </note>"
},
"CreateSnapshot":{
"name":"CreateSnapshot",
@ -264,6 +286,25 @@
],
"documentation":"<p>Creates a copy of an entire cluster or replication group at a specific moment in time.</p> <note> <p>This operation is valid for Redis only.</p> </note>"
},
"DecreaseNodeGroupsInGlobalReplicationGroup":{
"name":"DecreaseNodeGroupsInGlobalReplicationGroup",
"http":{
"method":"POST",
"requestUri":"/"
},
"input":{"shape":"DecreaseNodeGroupsInGlobalReplicationGroupMessage"},
"output":{
"shape":"DecreaseNodeGroupsInGlobalReplicationGroupResult",
"resultWrapper":"DecreaseNodeGroupsInGlobalReplicationGroupResult"
},
"errors":[
{"shape":"GlobalReplicationGroupNotFoundFault"},
{"shape":"InvalidGlobalReplicationGroupStateFault"},
{"shape":"InvalidParameterValueException"},
{"shape":"InvalidParameterCombinationException"}
],
"documentation":"<p>Decreases the number of node groups in a Global Datastore</p>"
},
"DecreaseReplicaCount":{
"name":"DecreaseReplicaCount",
"http":{
@ -289,7 +330,7 @@
{"shape":"InvalidParameterValueException"},
{"shape":"InvalidParameterCombinationException"}
],
"documentation":"<p>Dynamically decreases the number of replics in a Redis (cluster mode disabled) replication group or the number of replica nodes in one or more node groups (shards) of a Redis (cluster mode enabled) replication group. This operation is performed with no cluster down time.</p>"
"documentation":"<p>Dynamically decreases the number of replicas in a Redis (cluster mode disabled) replication group or the number of replica nodes in one or more node groups (shards) of a Redis (cluster mode enabled) replication group. This operation is performed with no cluster down time.</p>"
},
"DeleteCacheCluster":{
"name":"DeleteCacheCluster",
@ -356,6 +397,24 @@
],
"documentation":"<p>Deletes a cache subnet group.</p> <note> <p>You cannot delete a cache subnet group if it is associated with any clusters.</p> </note>"
},
"DeleteGlobalReplicationGroup":{
"name":"DeleteGlobalReplicationGroup",
"http":{
"method":"POST",
"requestUri":"/"
},
"input":{"shape":"DeleteGlobalReplicationGroupMessage"},
"output":{
"shape":"DeleteGlobalReplicationGroupResult",
"resultWrapper":"DeleteGlobalReplicationGroupResult"
},
"errors":[
{"shape":"GlobalReplicationGroupNotFoundFault"},
{"shape":"InvalidGlobalReplicationGroupStateFault"},
{"shape":"InvalidParameterValueException"}
],
"documentation":"<p>Deleting a Global Datastore is a two-step process: </p> <ul> <li> <p>First, you must <a>DisassociateGlobalReplicationGroup</a> to remove the secondary clusters in the Global Datastore.</p> </li> <li> <p>Once the Global Datastore contains only the primary cluster, you can use DeleteGlobalReplicationGroup API to delete the Global Datastore while retainining the primary cluster using Retain…= true.</p> </li> </ul> <p>Since the Global Datastore has only a primary cluster, you can delete the Global Datastore while retaining the primary by setting <code>RetainPrimaryCluster=true</code>.</p> <p>When you receive a successful response from this operation, Amazon ElastiCache immediately begins deleting the selected resources; you cannot cancel or revert this operation.</p> <note> <p>This operation is valid for Redis only.</p> </note>"
},
"DeleteReplicationGroup":{
"name":"DeleteReplicationGroup",
"http":{
@ -532,6 +591,24 @@
],
"documentation":"<p>Returns events related to clusters, cache security groups, and cache parameter groups. You can obtain events specific to a particular cluster, cache security group, or cache parameter group by providing the name as a parameter.</p> <p>By default, only the events occurring within the last hour are returned; however, you can retrieve up to 14 days' worth of events if necessary.</p>"
},
"DescribeGlobalReplicationGroups":{
"name":"DescribeGlobalReplicationGroups",
"http":{
"method":"POST",
"requestUri":"/"
},
"input":{"shape":"DescribeGlobalReplicationGroupsMessage"},
"output":{
"shape":"DescribeGlobalReplicationGroupsResult",
"resultWrapper":"DescribeGlobalReplicationGroupsResult"
},
"errors":[
{"shape":"GlobalReplicationGroupNotFoundFault"},
{"shape":"InvalidParameterValueException"},
{"shape":"InvalidParameterCombinationException"}
],
"documentation":"<p>Returns information about a particular global replication group. If no identifier is specified, returns information about all Global Datastores. </p>"
},
"DescribeReplicationGroups":{
"name":"DescribeReplicationGroups",
"http":{
@ -640,6 +717,62 @@
],
"documentation":"<p>Returns details of the update actions </p>"
},
"DisassociateGlobalReplicationGroup":{
"name":"DisassociateGlobalReplicationGroup",
"http":{
"method":"POST",
"requestUri":"/"
},
"input":{"shape":"DisassociateGlobalReplicationGroupMessage"},
"output":{
"shape":"DisassociateGlobalReplicationGroupResult",
"resultWrapper":"DisassociateGlobalReplicationGroupResult"
},
"errors":[
{"shape":"GlobalReplicationGroupNotFoundFault"},
{"shape":"InvalidGlobalReplicationGroupStateFault"},
{"shape":"InvalidParameterValueException"},
{"shape":"InvalidParameterCombinationException"}
],
"documentation":"<p>Remove a secondary cluster from the Global Datastore using the Global Datastore name. The secondary cluster will no longer receive updates from the primary cluster, but will remain as a standalone cluster in that AWS region.</p>"
},
"FailoverGlobalReplicationGroup":{
"name":"FailoverGlobalReplicationGroup",
"http":{
"method":"POST",
"requestUri":"/"
},
"input":{"shape":"FailoverGlobalReplicationGroupMessage"},
"output":{
"shape":"FailoverGlobalReplicationGroupResult",
"resultWrapper":"FailoverGlobalReplicationGroupResult"
},
"errors":[
{"shape":"GlobalReplicationGroupNotFoundFault"},
{"shape":"InvalidGlobalReplicationGroupStateFault"},
{"shape":"InvalidParameterValueException"},
{"shape":"InvalidParameterCombinationException"}
],
"documentation":"<p>Used to failover the primary region to a selected secondary region.</p>"
},
"IncreaseNodeGroupsInGlobalReplicationGroup":{
"name":"IncreaseNodeGroupsInGlobalReplicationGroup",
"http":{
"method":"POST",
"requestUri":"/"
},
"input":{"shape":"IncreaseNodeGroupsInGlobalReplicationGroupMessage"},
"output":{
"shape":"IncreaseNodeGroupsInGlobalReplicationGroupResult",
"resultWrapper":"IncreaseNodeGroupsInGlobalReplicationGroupResult"
},
"errors":[
{"shape":"GlobalReplicationGroupNotFoundFault"},
{"shape":"InvalidGlobalReplicationGroupStateFault"},
{"shape":"InvalidParameterValueException"}
],
"documentation":"<p>Increase the number of node groups in the Global Datastore</p>"
},
"IncreaseReplicaCount":{
"name":"IncreaseReplicaCount",
"http":{
@ -745,7 +878,8 @@
{"shape":"CacheParameterGroupNotFoundFault"},
{"shape":"InvalidCacheParameterGroupStateFault"},
{"shape":"InvalidParameterValueException"},
{"shape":"InvalidParameterCombinationException"}
{"shape":"InvalidParameterCombinationException"},
{"shape":"InvalidGlobalReplicationGroupStateFault"}
],
"documentation":"<p>Modifies the parameters of a cache parameter group. You can modify up to 20 parameters in a single request by submitting a list parameter name and value pairs.</p>"
},
@ -768,6 +902,24 @@
],
"documentation":"<p>Modifies an existing cache subnet group.</p>"
},
"ModifyGlobalReplicationGroup":{
"name":"ModifyGlobalReplicationGroup",
"http":{
"method":"POST",
"requestUri":"/"
},
"input":{"shape":"ModifyGlobalReplicationGroupMessage"},
"output":{
"shape":"ModifyGlobalReplicationGroupResult",
"resultWrapper":"ModifyGlobalReplicationGroupResult"
},
"errors":[
{"shape":"GlobalReplicationGroupNotFoundFault"},
{"shape":"InvalidGlobalReplicationGroupStateFault"},
{"shape":"InvalidParameterValueException"}
],
"documentation":"<p>Modifies the settings for a Global Datastore.</p>"
},
"ModifyReplicationGroup":{
"name":"ModifyReplicationGroup",
"http":{
@ -842,6 +994,24 @@
],
"documentation":"<p>Allows you to purchase a reserved cache node offering.</p>"
},
"RebalanceSlotsInGlobalReplicationGroup":{
"name":"RebalanceSlotsInGlobalReplicationGroup",
"http":{
"method":"POST",
"requestUri":"/"
},
"input":{"shape":"RebalanceSlotsInGlobalReplicationGroupMessage"},
"output":{
"shape":"RebalanceSlotsInGlobalReplicationGroupResult",
"resultWrapper":"RebalanceSlotsInGlobalReplicationGroupResult"
},
"errors":[
{"shape":"GlobalReplicationGroupNotFoundFault"},
{"shape":"InvalidGlobalReplicationGroupStateFault"},
{"shape":"InvalidParameterValueException"}
],
"documentation":"<p>Redistribute slots to ensure unifirom distribution across existing shards in the cluster.</p>"
},
"RebootCacheCluster":{
"name":"RebootCacheCluster",
"http":{
@ -893,7 +1063,8 @@
{"shape":"InvalidCacheParameterGroupStateFault"},
{"shape":"CacheParameterGroupNotFoundFault"},
{"shape":"InvalidParameterValueException"},
{"shape":"InvalidParameterCombinationException"}
{"shape":"InvalidParameterCombinationException"},
{"shape":"InvalidGlobalReplicationGroupStateFault"}
],
"documentation":"<p>Modifies the parameters of a cache parameter group to the engine or system default value. You can reset specific parameters by submitting a list of parameter names. To reset the entire cache parameter group, specify the <code>ResetAllParameters</code> and <code>CacheParameterGroupName</code> parameters.</p>"
},
@ -1014,7 +1185,7 @@
},
"ScaleDownModifications":{
"shape":"NodeTypeList",
"documentation":"<p>A string list, each element of which specifies a cache node type which you can use to scale your cluster or replication group.</p> <p>When scaling down on a Redis cluster or replication group using <code>ModifyCacheCluster</code> or <code>ModifyReplicationGroup</code>, use a value from this list for the <code>CacheNodeType</code> parameter.</p>"
"documentation":"<p>A string list, each element of which specifies a cache node type which you can use to scale your cluster or replication group. When scaling down a Redis cluster or replication group using ModifyCacheCluster or ModifyReplicationGroup, use a value from this list for the CacheNodeType parameter. </p>"
}
},
"documentation":"<p>Represents the allowed node types you can use to modify your cluster or replication group.</p>"
@ -1169,7 +1340,7 @@
},
"CacheNodeType":{
"shape":"String",
"documentation":"<p>The name of the compute and memory capacity node type for the cluster.</p> <p>The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.</p> <ul> <li> <p>General purpose:</p> <ul> <li> <p>Current generation: </p> <p> <b>M5 node types:</b> <code>cache.m5.large</code>, <code>cache.m5.xlarge</code>, <code>cache.m5.2xlarge</code>, <code>cache.m5.4xlarge</code>, <code>cache.m5.12xlarge</code>, <code>cache.m5.24xlarge</code> </p> <p> <b>M4 node types:</b> <code>cache.m4.large</code>, <code>cache.m4.xlarge</code>, <code>cache.m4.2xlarge</code>, <code>cache.m4.4xlarge</code>, <code>cache.m4.10xlarge</code> </p> <p> <b>T2 node types:</b> <code>cache.t2.micro</code>, <code>cache.t2.small</code>, <code>cache.t2.medium</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>T1 node types:</b> <code>cache.t1.micro</code> </p> <p> <b>M1 node types:</b> <code>cache.m1.small</code>, <code>cache.m1.medium</code>, <code>cache.m1.large</code>, <code>cache.m1.xlarge</code> </p> <p> <b>M3 node types:</b> <code>cache.m3.medium</code>, <code>cache.m3.large</code>, <code>cache.m3.xlarge</code>, <code>cache.m3.2xlarge</code> </p> </li> </ul> </li> <li> <p>Compute optimized:</p> <ul> <li> <p>Previous generation: (not recommended)</p> <p> <b>C1 node types:</b> <code>cache.c1.xlarge</code> </p> </li> </ul> </li> <li> <p>Memory optimized:</p> <ul> <li> <p>Current generation: </p> <p> <b>R5 node types:</b> <code>cache.r5.large</code>, <code>cache.r5.xlarge</code>, <code>cache.r5.2xlarge</code>, <code>cache.r5.4xlarge</code>, <code>cache.r5.12xlarge</code>, <code>cache.r5.24xlarge</code> </p> <p> <b>R4 node types:</b> <code>cache.r4.large</code>, <code>cache.r4.xlarge</code>, <code>cache.r4.2xlarge</code>, <code>cache.r4.4xlarge</code>, <code>cache.r4.8xlarge</code>, <code>cache.r4.16xlarge</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>M2 node types:</b> <code>cache.m2.xlarge</code>, <code>cache.m2.2xlarge</code>, <code>cache.m2.4xlarge</code> </p> <p> <b>R3 node types:</b> <code>cache.r3.large</code>, <code>cache.r3.xlarge</code>, <code>cache.r3.2xlarge</code>, <code>cache.r3.4xlarge</code>, <code>cache.r3.8xlarge</code> </p> </li> </ul> </li> </ul> <p> <b>Additional node type info</b> </p> <ul> <li> <p>All current generation instance types are created in Amazon VPC by default.</p> </li> <li> <p>Redis append-only files (AOF) are not supported for T1 or T2 instances.</p> </li> <li> <p>Redis Multi-AZ with automatic failover is not supported on T1 instances.</p> </li> <li> <p>Redis configuration variables <code>appendonly</code> and <code>appendfsync</code> are not supported on Redis version 2.8.22 and later.</p> </li> </ul>"
"documentation":"<p>The name of the compute and memory capacity node type for the cluster.</p> <p>The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.</p> <ul> <li> <p>General purpose:</p> <ul> <li> <p>Current generation: </p> <p> <b>M5 node types:</b> <code>cache.m5.large</code>, <code>cache.m5.xlarge</code>, <code>cache.m5.2xlarge</code>, <code>cache.m5.4xlarge</code>, <code>cache.m5.12xlarge</code>, <code>cache.m5.24xlarge</code> </p> <p> <b>M4 node types:</b> <code>cache.m4.large</code>, <code>cache.m4.xlarge</code>, <code>cache.m4.2xlarge</code>, <code>cache.m4.4xlarge</code>, <code>cache.m4.10xlarge</code> </p> <p> <b>T3 node types:</b> <code>cache.t3.micro</code>, <code>cache.t3.small</code>, <code>cache.t3.medium</code> </p> <p> <b>T2 node types:</b> <code>cache.t2.micro</code>, <code>cache.t2.small</code>, <code>cache.t2.medium</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>T1 node types:</b> <code>cache.t1.micro</code> </p> <p> <b>M1 node types:</b> <code>cache.m1.small</code>, <code>cache.m1.medium</code>, <code>cache.m1.large</code>, <code>cache.m1.xlarge</code> </p> <p> <b>M3 node types:</b> <code>cache.m3.medium</code>, <code>cache.m3.large</code>, <code>cache.m3.xlarge</code>, <code>cache.m3.2xlarge</code> </p> </li> </ul> </li> <li> <p>Compute optimized:</p> <ul> <li> <p>Previous generation: (not recommended)</p> <p> <b>C1 node types:</b> <code>cache.c1.xlarge</code> </p> </li> </ul> </li> <li> <p>Memory optimized:</p> <ul> <li> <p>Current generation: </p> <p> <b>R5 node types:</b> <code>cache.r5.large</code>, <code>cache.r5.xlarge</code>, <code>cache.r5.2xlarge</code>, <code>cache.r5.4xlarge</code>, <code>cache.r5.12xlarge</code>, <code>cache.r5.24xlarge</code> </p> <p> <b>R4 node types:</b> <code>cache.r4.large</code>, <code>cache.r4.xlarge</code>, <code>cache.r4.2xlarge</code>, <code>cache.r4.4xlarge</code>, <code>cache.r4.8xlarge</code>, <code>cache.r4.16xlarge</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>M2 node types:</b> <code>cache.m2.xlarge</code>, <code>cache.m2.2xlarge</code>, <code>cache.m2.4xlarge</code> </p> <p> <b>R3 node types:</b> <code>cache.r3.large</code>, <code>cache.r3.xlarge</code>, <code>cache.r3.2xlarge</code>, <code>cache.r3.4xlarge</code>, <code>cache.r3.8xlarge</code> </p> </li> </ul> </li> </ul> <p> <b>Additional node type info</b> </p> <ul> <li> <p>All current generation instance types are created in Amazon VPC by default.</p> </li> <li> <p>Redis append-only files (AOF) are not supported for T1 or T2 instances.</p> </li> <li> <p>Redis Multi-AZ with automatic failover is not supported on T1 instances.</p> </li> <li> <p>Redis configuration variables <code>appendonly</code> and <code>appendfsync</code> are not supported on Redis version 2.8.22 and later.</p> </li> </ul>"
},
"Engine":{
"shape":"String",
@ -1366,7 +1537,7 @@
},
"CacheNodeStatus":{
"shape":"String",
"documentation":"<p>The current state of this cache node.</p>"
"documentation":"<p>The current state of this cache node, one of the following values: <code>available</code>, <code>creating</code>, <code>rebooting</code>, or <code>deleting</code>.</p>"
},
"CacheNodeCreateTime":{
"shape":"TStamp",
@ -1389,7 +1560,7 @@
"documentation":"<p>The Availability Zone where this node was created and now resides.</p>"
}
},
"documentation":"<p>Represents an individual cache node within a cluster. Each cache node runs its own instance of the cluster's protocol-compliant caching software - either Memcached or Redis.</p> <p>The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.</p> <ul> <li> <p>General purpose:</p> <ul> <li> <p>Current generation: </p> <p> <b>M5 node types:</b> <code>cache.m5.large</code>, <code>cache.m5.xlarge</code>, <code>cache.m5.2xlarge</code>, <code>cache.m5.4xlarge</code>, <code>cache.m5.12xlarge</code>, <code>cache.m5.24xlarge</code> </p> <p> <b>M4 node types:</b> <code>cache.m4.large</code>, <code>cache.m4.xlarge</code>, <code>cache.m4.2xlarge</code>, <code>cache.m4.4xlarge</code>, <code>cache.m4.10xlarge</code> </p> <p> <b>T2 node types:</b> <code>cache.t2.micro</code>, <code>cache.t2.small</code>, <code>cache.t2.medium</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>T1 node types:</b> <code>cache.t1.micro</code> </p> <p> <b>M1 node types:</b> <code>cache.m1.small</code>, <code>cache.m1.medium</code>, <code>cache.m1.large</code>, <code>cache.m1.xlarge</code> </p> <p> <b>M3 node types:</b> <code>cache.m3.medium</code>, <code>cache.m3.large</code>, <code>cache.m3.xlarge</code>, <code>cache.m3.2xlarge</code> </p> </li> </ul> </li> <li> <p>Compute optimized:</p> <ul> <li> <p>Previous generation: (not recommended)</p> <p> <b>C1 node types:</b> <code>cache.c1.xlarge</code> </p> </li> </ul> </li> <li> <p>Memory optimized:</p> <ul> <li> <p>Current generation: </p> <p> <b>R5 node types:</b> <code>cache.r5.large</code>, <code>cache.r5.xlarge</code>, <code>cache.r5.2xlarge</code>, <code>cache.r5.4xlarge</code>, <code>cache.r5.12xlarge</code>, <code>cache.r5.24xlarge</code> </p> <p> <b>R4 node types:</b> <code>cache.r4.large</code>, <code>cache.r4.xlarge</code>, <code>cache.r4.2xlarge</code>, <code>cache.r4.4xlarge</code>, <code>cache.r4.8xlarge</code>, <code>cache.r4.16xlarge</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>M2 node types:</b> <code>cache.m2.xlarge</code>, <code>cache.m2.2xlarge</code>, <code>cache.m2.4xlarge</code> </p> <p> <b>R3 node types:</b> <code>cache.r3.large</code>, <code>cache.r3.xlarge</code>, <code>cache.r3.2xlarge</code>, <code>cache.r3.4xlarge</code>, <code>cache.r3.8xlarge</code> </p> </li> </ul> </li> </ul> <p> <b>Additional node type info</b> </p> <ul> <li> <p>All current generation instance types are created in Amazon VPC by default.</p> </li> <li> <p>Redis append-only files (AOF) are not supported for T1 or T2 instances.</p> </li> <li> <p>Redis Multi-AZ with automatic failover is not supported on T1 instances.</p> </li> <li> <p>Redis configuration variables <code>appendonly</code> and <code>appendfsync</code> are not supported on Redis version 2.8.22 and later.</p> </li> </ul>"
"documentation":"<p>Represents an individual cache node within a cluster. Each cache node runs its own instance of the cluster's protocol-compliant caching software - either Memcached or Redis.</p> <p>The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.</p> <ul> <li> <p>General purpose:</p> <ul> <li> <p>Current generation: </p> <p> <b>M5 node types:</b> <code>cache.m5.large</code>, <code>cache.m5.xlarge</code>, <code>cache.m5.2xlarge</code>, <code>cache.m5.4xlarge</code>, <code>cache.m5.12xlarge</code>, <code>cache.m5.24xlarge</code> </p> <p> <b>M4 node types:</b> <code>cache.m4.large</code>, <code>cache.m4.xlarge</code>, <code>cache.m4.2xlarge</code>, <code>cache.m4.4xlarge</code>, <code>cache.m4.10xlarge</code> </p> <p> <b>T3 node types:</b> <code>cache.t3.micro</code>, <code>cache.t3.small</code>, <code>cache.t3.medium</code> </p> <p> <b>T2 node types:</b> <code>cache.t2.micro</code>, <code>cache.t2.small</code>, <code>cache.t2.medium</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>T1 node types:</b> <code>cache.t1.micro</code> </p> <p> <b>M1 node types:</b> <code>cache.m1.small</code>, <code>cache.m1.medium</code>, <code>cache.m1.large</code>, <code>cache.m1.xlarge</code> </p> <p> <b>M3 node types:</b> <code>cache.m3.medium</code>, <code>cache.m3.large</code>, <code>cache.m3.xlarge</code>, <code>cache.m3.2xlarge</code> </p> </li> </ul> </li> <li> <p>Compute optimized:</p> <ul> <li> <p>Previous generation: (not recommended)</p> <p> <b>C1 node types:</b> <code>cache.c1.xlarge</code> </p> </li> </ul> </li> <li> <p>Memory optimized:</p> <ul> <li> <p>Current generation: </p> <p> <b>R5 node types:</b> <code>cache.r5.large</code>, <code>cache.r5.xlarge</code>, <code>cache.r5.2xlarge</code>, <code>cache.r5.4xlarge</code>, <code>cache.r5.12xlarge</code>, <code>cache.r5.24xlarge</code> </p> <p> <b>R4 node types:</b> <code>cache.r4.large</code>, <code>cache.r4.xlarge</code>, <code>cache.r4.2xlarge</code>, <code>cache.r4.4xlarge</code>, <code>cache.r4.8xlarge</code>, <code>cache.r4.16xlarge</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>M2 node types:</b> <code>cache.m2.xlarge</code>, <code>cache.m2.2xlarge</code>, <code>cache.m2.4xlarge</code> </p> <p> <b>R3 node types:</b> <code>cache.r3.large</code>, <code>cache.r3.xlarge</code>, <code>cache.r3.2xlarge</code>, <code>cache.r3.4xlarge</code>, <code>cache.r3.8xlarge</code> </p> </li> </ul> </li> </ul> <p> <b>Additional node type info</b> </p> <ul> <li> <p>All current generation instance types are created in Amazon VPC by default.</p> </li> <li> <p>Redis append-only files (AOF) are not supported for T1 or T2 instances.</p> </li> <li> <p>Redis Multi-AZ with automatic failover is not supported on T1 instances.</p> </li> <li> <p>Redis configuration variables <code>appendonly</code> and <code>appendfsync</code> are not supported on Redis version 2.8.22 and later.</p> </li> </ul>"
},
"CacheNodeIdsList":{
"type":"list",
@ -1534,6 +1705,10 @@
"Description":{
"shape":"String",
"documentation":"<p>The description for this cache parameter group.</p>"
},
"IsGlobal":{
"shape":"Boolean",
"documentation":"<p>Indicates whether the parameter group is associated with a Global Datastore</p>"
}
},
"documentation":"<p>Represents the output of a <code>CreateCacheParameterGroup</code> operation.</p>",
@ -1984,7 +2159,7 @@
},
"CacheNodeType":{
"shape":"String",
"documentation":"<p>The compute and memory capacity of the nodes in the node group (shard).</p> <p>The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.</p> <ul> <li> <p>General purpose:</p> <ul> <li> <p>Current generation: </p> <p> <b>M5 node types:</b> <code>cache.m5.large</code>, <code>cache.m5.xlarge</code>, <code>cache.m5.2xlarge</code>, <code>cache.m5.4xlarge</code>, <code>cache.m5.12xlarge</code>, <code>cache.m5.24xlarge</code> </p> <p> <b>M4 node types:</b> <code>cache.m4.large</code>, <code>cache.m4.xlarge</code>, <code>cache.m4.2xlarge</code>, <code>cache.m4.4xlarge</code>, <code>cache.m4.10xlarge</code> </p> <p> <b>T2 node types:</b> <code>cache.t2.micro</code>, <code>cache.t2.small</code>, <code>cache.t2.medium</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>T1 node types:</b> <code>cache.t1.micro</code> </p> <p> <b>M1 node types:</b> <code>cache.m1.small</code>, <code>cache.m1.medium</code>, <code>cache.m1.large</code>, <code>cache.m1.xlarge</code> </p> <p> <b>M3 node types:</b> <code>cache.m3.medium</code>, <code>cache.m3.large</code>, <code>cache.m3.xlarge</code>, <code>cache.m3.2xlarge</code> </p> </li> </ul> </li> <li> <p>Compute optimized:</p> <ul> <li> <p>Previous generation: (not recommended)</p> <p> <b>C1 node types:</b> <code>cache.c1.xlarge</code> </p> </li> </ul> </li> <li> <p>Memory optimized:</p> <ul> <li> <p>Current generation: </p> <p> <b>R5 node types:</b> <code>cache.r5.large</code>, <code>cache.r5.xlarge</code>, <code>cache.r5.2xlarge</code>, <code>cache.r5.4xlarge</code>, <code>cache.r5.12xlarge</code>, <code>cache.r5.24xlarge</code> </p> <p> <b>R4 node types:</b> <code>cache.r4.large</code>, <code>cache.r4.xlarge</code>, <code>cache.r4.2xlarge</code>, <code>cache.r4.4xlarge</code>, <code>cache.r4.8xlarge</code>, <code>cache.r4.16xlarge</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>M2 node types:</b> <code>cache.m2.xlarge</code>, <code>cache.m2.2xlarge</code>, <code>cache.m2.4xlarge</code> </p> <p> <b>R3 node types:</b> <code>cache.r3.large</code>, <code>cache.r3.xlarge</code>, <code>cache.r3.2xlarge</code>, <code>cache.r3.4xlarge</code>, <code>cache.r3.8xlarge</code> </p> </li> </ul> </li> </ul> <p> <b>Additional node type info</b> </p> <ul> <li> <p>All current generation instance types are created in Amazon VPC by default.</p> </li> <li> <p>Redis append-only files (AOF) are not supported for T1 or T2 instances.</p> </li> <li> <p>Redis Multi-AZ with automatic failover is not supported on T1 instances.</p> </li> <li> <p>Redis configuration variables <code>appendonly</code> and <code>appendfsync</code> are not supported on Redis version 2.8.22 and later.</p> </li> </ul>"
"documentation":"<p>The compute and memory capacity of the nodes in the node group (shard).</p> <p>The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.</p> <ul> <li> <p>General purpose:</p> <ul> <li> <p>Current generation: </p> <p> <b>M5 node types:</b> <code>cache.m5.large</code>, <code>cache.m5.xlarge</code>, <code>cache.m5.2xlarge</code>, <code>cache.m5.4xlarge</code>, <code>cache.m5.12xlarge</code>, <code>cache.m5.24xlarge</code> </p> <p> <b>M4 node types:</b> <code>cache.m4.large</code>, <code>cache.m4.xlarge</code>, <code>cache.m4.2xlarge</code>, <code>cache.m4.4xlarge</code>, <code>cache.m4.10xlarge</code> </p> <p> <b>T3 node types:</b> <code>cache.t3.micro</code>, <code>cache.t3.small</code>, <code>cache.t3.medium</code> </p> <p> <b>T2 node types:</b> <code>cache.t2.micro</code>, <code>cache.t2.small</code>, <code>cache.t2.medium</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>T1 node types:</b> <code>cache.t1.micro</code> </p> <p> <b>M1 node types:</b> <code>cache.m1.small</code>, <code>cache.m1.medium</code>, <code>cache.m1.large</code>, <code>cache.m1.xlarge</code> </p> <p> <b>M3 node types:</b> <code>cache.m3.medium</code>, <code>cache.m3.large</code>, <code>cache.m3.xlarge</code>, <code>cache.m3.2xlarge</code> </p> </li> </ul> </li> <li> <p>Compute optimized:</p> <ul> <li> <p>Previous generation: (not recommended)</p> <p> <b>C1 node types:</b> <code>cache.c1.xlarge</code> </p> </li> </ul> </li> <li> <p>Memory optimized:</p> <ul> <li> <p>Current generation: </p> <p> <b>R5 node types:</b> <code>cache.r5.large</code>, <code>cache.r5.xlarge</code>, <code>cache.r5.2xlarge</code>, <code>cache.r5.4xlarge</code>, <code>cache.r5.12xlarge</code>, <code>cache.r5.24xlarge</code> </p> <p> <b>R4 node types:</b> <code>cache.r4.large</code>, <code>cache.r4.xlarge</code>, <code>cache.r4.2xlarge</code>, <code>cache.r4.4xlarge</code>, <code>cache.r4.8xlarge</code>, <code>cache.r4.16xlarge</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>M2 node types:</b> <code>cache.m2.xlarge</code>, <code>cache.m2.2xlarge</code>, <code>cache.m2.4xlarge</code> </p> <p> <b>R3 node types:</b> <code>cache.r3.large</code>, <code>cache.r3.xlarge</code>, <code>cache.r3.2xlarge</code>, <code>cache.r3.4xlarge</code>, <code>cache.r3.8xlarge</code> </p> </li> </ul> </li> </ul> <p> <b>Additional node type info</b> </p> <ul> <li> <p>All current generation instance types are created in Amazon VPC by default.</p> </li> <li> <p>Redis append-only files (AOF) are not supported for T1 or T2 instances.</p> </li> <li> <p>Redis Multi-AZ with automatic failover is not supported on T1 instances.</p> </li> <li> <p>Redis configuration variables <code>appendonly</code> and <code>appendfsync</code> are not supported on Redis version 2.8.22 and later.</p> </li> </ul>"
},
"Engine":{
"shape":"String",
@ -2141,6 +2316,33 @@
"CacheSubnetGroup":{"shape":"CacheSubnetGroup"}
}
},
"CreateGlobalReplicationGroupMessage":{
"type":"structure",
"required":[
"GlobalReplicationGroupIdSuffix",
"PrimaryReplicationGroupId"
],
"members":{
"GlobalReplicationGroupIdSuffix":{
"shape":"String",
"documentation":"<p>The suffix for name of a Global Datastore. The suffix guarantees uniqueness of the Global Datastore name across multiple regions.</p>"
},
"GlobalReplicationGroupDescription":{
"shape":"String",
"documentation":"<p>Provides details of the Global Datastore</p>"
},
"PrimaryReplicationGroupId":{
"shape":"String",
"documentation":"<p>The name of the primary cluster that accepts writes and will replicate updates to the secondary cluster.</p>"
}
}
},
"CreateGlobalReplicationGroupResult":{
"type":"structure",
"members":{
"GlobalReplicationGroup":{"shape":"GlobalReplicationGroup"}
}
},
"CreateReplicationGroupMessage":{
"type":"structure",
"required":[
@ -2156,6 +2358,10 @@
"shape":"String",
"documentation":"<p>A user-created description for the replication group.</p>"
},
"GlobalReplicationGroupId":{
"shape":"String",
"documentation":"<p>The name of the Global Datastore</p>"
},
"PrimaryClusterId":{
"shape":"String",
"documentation":"<p>The identifier of the cluster that serves as the primary for this replication group. This cluster must already exist and have a status of <code>available</code>.</p> <p>This parameter is not required if <code>NumCacheClusters</code>, <code>NumNodeGroups</code>, or <code>ReplicasPerNodeGroup</code> is specified.</p>"
@ -2186,7 +2392,7 @@
},
"CacheNodeType":{
"shape":"String",
"documentation":"<p>The compute and memory capacity of the nodes in the node group (shard).</p> <p>The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.</p> <ul> <li> <p>General purpose:</p> <ul> <li> <p>Current generation: </p> <p> <b>M5 node types:</b> <code>cache.m5.large</code>, <code>cache.m5.xlarge</code>, <code>cache.m5.2xlarge</code>, <code>cache.m5.4xlarge</code>, <code>cache.m5.12xlarge</code>, <code>cache.m5.24xlarge</code> </p> <p> <b>M4 node types:</b> <code>cache.m4.large</code>, <code>cache.m4.xlarge</code>, <code>cache.m4.2xlarge</code>, <code>cache.m4.4xlarge</code>, <code>cache.m4.10xlarge</code> </p> <p> <b>T2 node types:</b> <code>cache.t2.micro</code>, <code>cache.t2.small</code>, <code>cache.t2.medium</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>T1 node types:</b> <code>cache.t1.micro</code> </p> <p> <b>M1 node types:</b> <code>cache.m1.small</code>, <code>cache.m1.medium</code>, <code>cache.m1.large</code>, <code>cache.m1.xlarge</code> </p> <p> <b>M3 node types:</b> <code>cache.m3.medium</code>, <code>cache.m3.large</code>, <code>cache.m3.xlarge</code>, <code>cache.m3.2xlarge</code> </p> </li> </ul> </li> <li> <p>Compute optimized:</p> <ul> <li> <p>Previous generation: (not recommended)</p> <p> <b>C1 node types:</b> <code>cache.c1.xlarge</code> </p> </li> </ul> </li> <li> <p>Memory optimized:</p> <ul> <li> <p>Current generation: </p> <p> <b>R5 node types:</b> <code>cache.r5.large</code>, <code>cache.r5.xlarge</code>, <code>cache.r5.2xlarge</code>, <code>cache.r5.4xlarge</code>, <code>cache.r5.12xlarge</code>, <code>cache.r5.24xlarge</code> </p> <p> <b>R4 node types:</b> <code>cache.r4.large</code>, <code>cache.r4.xlarge</code>, <code>cache.r4.2xlarge</code>, <code>cache.r4.4xlarge</code>, <code>cache.r4.8xlarge</code>, <code>cache.r4.16xlarge</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>M2 node types:</b> <code>cache.m2.xlarge</code>, <code>cache.m2.2xlarge</code>, <code>cache.m2.4xlarge</code> </p> <p> <b>R3 node types:</b> <code>cache.r3.large</code>, <code>cache.r3.xlarge</code>, <code>cache.r3.2xlarge</code>, <code>cache.r3.4xlarge</code>, <code>cache.r3.8xlarge</code> </p> </li> </ul> </li> </ul> <p> <b>Additional node type info</b> </p> <ul> <li> <p>All current generation instance types are created in Amazon VPC by default.</p> </li> <li> <p>Redis append-only files (AOF) are not supported for T1 or T2 instances.</p> </li> <li> <p>Redis Multi-AZ with automatic failover is not supported on T1 instances.</p> </li> <li> <p>Redis configuration variables <code>appendonly</code> and <code>appendfsync</code> are not supported on Redis version 2.8.22 and later.</p> </li> </ul>"
"documentation":"<p>The compute and memory capacity of the nodes in the node group (shard).</p> <p>The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.</p> <ul> <li> <p>General purpose:</p> <ul> <li> <p>Current generation: </p> <p> <b>M5 node types:</b> <code>cache.m5.large</code>, <code>cache.m5.xlarge</code>, <code>cache.m5.2xlarge</code>, <code>cache.m5.4xlarge</code>, <code>cache.m5.12xlarge</code>, <code>cache.m5.24xlarge</code> </p> <p> <b>M4 node types:</b> <code>cache.m4.large</code>, <code>cache.m4.xlarge</code>, <code>cache.m4.2xlarge</code>, <code>cache.m4.4xlarge</code>, <code>cache.m4.10xlarge</code> </p> <p> <b>T3 node types:</b> <code>cache.t3.micro</code>, <code>cache.t3.small</code>, <code>cache.t3.medium</code> </p> <p> <b>T2 node types:</b> <code>cache.t2.micro</code>, <code>cache.t2.small</code>, <code>cache.t2.medium</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>T1 node types:</b> <code>cache.t1.micro</code> </p> <p> <b>M1 node types:</b> <code>cache.m1.small</code>, <code>cache.m1.medium</code>, <code>cache.m1.large</code>, <code>cache.m1.xlarge</code> </p> <p> <b>M3 node types:</b> <code>cache.m3.medium</code>, <code>cache.m3.large</code>, <code>cache.m3.xlarge</code>, <code>cache.m3.2xlarge</code> </p> </li> </ul> </li> <li> <p>Compute optimized:</p> <ul> <li> <p>Previous generation: (not recommended)</p> <p> <b>C1 node types:</b> <code>cache.c1.xlarge</code> </p> </li> </ul> </li> <li> <p>Memory optimized:</p> <ul> <li> <p>Current generation: </p> <p> <b>R5 node types:</b> <code>cache.r5.large</code>, <code>cache.r5.xlarge</code>, <code>cache.r5.2xlarge</code>, <code>cache.r5.4xlarge</code>, <code>cache.r5.12xlarge</code>, <code>cache.r5.24xlarge</code> </p> <p> <b>R4 node types:</b> <code>cache.r4.large</code>, <code>cache.r4.xlarge</code>, <code>cache.r4.2xlarge</code>, <code>cache.r4.4xlarge</code>, <code>cache.r4.8xlarge</code>, <code>cache.r4.16xlarge</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>M2 node types:</b> <code>cache.m2.xlarge</code>, <code>cache.m2.2xlarge</code>, <code>cache.m2.4xlarge</code> </p> <p> <b>R3 node types:</b> <code>cache.r3.large</code>, <code>cache.r3.xlarge</code>, <code>cache.r3.2xlarge</code>, <code>cache.r3.4xlarge</code>, <code>cache.r3.8xlarge</code> </p> </li> </ul> </li> </ul> <p> <b>Additional node type info</b> </p> <ul> <li> <p>All current generation instance types are created in Amazon VPC by default.</p> </li> <li> <p>Redis append-only files (AOF) are not supported for T1 or T2 instances.</p> </li> <li> <p>Redis Multi-AZ with automatic failover is not supported on T1 instances.</p> </li> <li> <p>Redis configuration variables <code>appendonly</code> and <code>appendfsync</code> are not supported on Redis version 2.8.22 and later.</p> </li> </ul>"
},
"Engine":{
"shape":"String",
@ -2262,7 +2468,7 @@
},
"KmsKeyId":{
"shape":"String",
"documentation":"<p>The ID of the KMS key used to encrypt the disk on the cluster.</p>"
"documentation":"<p>The ID of the KMS key used to encrypt the disk in the cluster.</p>"
}
},
"documentation":"<p>Represents the input of a <code>CreateReplicationGroup</code> operation.</p>"
@ -2320,6 +2526,42 @@
"type":"list",
"member":{"shape":"CustomerNodeEndpoint"}
},
"DecreaseNodeGroupsInGlobalReplicationGroupMessage":{
"type":"structure",
"required":[
"GlobalReplicationGroupId",
"NodeGroupCount",
"ApplyImmediately"
],
"members":{
"GlobalReplicationGroupId":{
"shape":"String",
"documentation":"<p>The name of the Global Datastore</p>"
},
"NodeGroupCount":{
"shape":"Integer",
"documentation":"<p>The number of node groups (shards) that results from the modification of the shard configuration</p>"
},
"GlobalNodeGroupsToRemove":{
"shape":"GlobalNodeGroupIdList",
"documentation":"<p>If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. NodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster. ElastiCache for Redis will attempt to remove all node groups listed by NodeGroupsToRemove from the cluster. </p>"
},
"GlobalNodeGroupsToRetain":{
"shape":"GlobalNodeGroupIdList",
"documentation":"<p>If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. NodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster. ElastiCache for Redis will attempt to remove all node groups listed by NodeGroupsToRemove from the cluster. </p>"
},
"ApplyImmediately":{
"shape":"Boolean",
"documentation":"<p>Indicates that the shard reconfiguration process begins immediately. At present, the only permitted value for this parameter is true. </p>"
}
}
},
"DecreaseNodeGroupsInGlobalReplicationGroupResult":{
"type":"structure",
"members":{
"GlobalReplicationGroup":{"shape":"GlobalReplicationGroup"}
}
},
"DecreaseReplicaCountMessage":{
"type":"structure",
"required":[
@ -2409,6 +2651,29 @@
},
"documentation":"<p>Represents the input of a <code>DeleteCacheSubnetGroup</code> operation.</p>"
},
"DeleteGlobalReplicationGroupMessage":{
"type":"structure",
"required":[
"GlobalReplicationGroupId",
"RetainPrimaryReplicationGroup"
],
"members":{
"GlobalReplicationGroupId":{
"shape":"String",
"documentation":"<p>The name of the Global Datastore</p>"
},
"RetainPrimaryReplicationGroup":{
"shape":"Boolean",
"documentation":"<p>If set to <code>true</code>, the primary replication is retained as a standalone replication group. </p>"
}
}
},
"DeleteGlobalReplicationGroupResult":{
"type":"structure",
"members":{
"GlobalReplicationGroup":{"shape":"GlobalReplicationGroup"}
}
},
"DeleteReplicationGroupMessage":{
"type":"structure",
"required":["ReplicationGroupId"],
@ -2643,6 +2908,40 @@
},
"documentation":"<p>Represents the input of a <code>DescribeEvents</code> operation.</p>"
},
"DescribeGlobalReplicationGroupsMessage":{
"type":"structure",
"members":{
"GlobalReplicationGroupId":{
"shape":"String",
"documentation":"<p>The name of the Global Datastore</p>"
},
"MaxRecords":{
"shape":"IntegerOptional",
"documentation":"<p>The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved. </p>"
},
"Marker":{
"shape":"String",
"documentation":"<p>An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by <code>MaxRecords</code>. </p>"
},
"ShowMemberInfo":{
"shape":"BooleanOptional",
"documentation":"<p>Returns the list of members that comprise the Global Datastore.</p>"
}
}
},
"DescribeGlobalReplicationGroupsResult":{
"type":"structure",
"members":{
"Marker":{
"shape":"String",
"documentation":"<p>An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. &gt;</p>"
},
"GlobalReplicationGroups":{
"shape":"GlobalReplicationGroupList",
"documentation":"<p>Indicates the slot configuration and global identifier for each slice group.</p>"
}
}
},
"DescribeReplicationGroupsMessage":{
"type":"structure",
"members":{
@ -2674,7 +2973,7 @@
},
"CacheNodeType":{
"shape":"String",
"documentation":"<p>The cache node type filter value. Use this parameter to show only those reservations matching the specified cache node type.</p> <p>The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.</p> <ul> <li> <p>General purpose:</p> <ul> <li> <p>Current generation: </p> <p> <b>M5 node types:</b> <code>cache.m5.large</code>, <code>cache.m5.xlarge</code>, <code>cache.m5.2xlarge</code>, <code>cache.m5.4xlarge</code>, <code>cache.m5.12xlarge</code>, <code>cache.m5.24xlarge</code> </p> <p> <b>M4 node types:</b> <code>cache.m4.large</code>, <code>cache.m4.xlarge</code>, <code>cache.m4.2xlarge</code>, <code>cache.m4.4xlarge</code>, <code>cache.m4.10xlarge</code> </p> <p> <b>T2 node types:</b> <code>cache.t2.micro</code>, <code>cache.t2.small</code>, <code>cache.t2.medium</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>T1 node types:</b> <code>cache.t1.micro</code> </p> <p> <b>M1 node types:</b> <code>cache.m1.small</code>, <code>cache.m1.medium</code>, <code>cache.m1.large</code>, <code>cache.m1.xlarge</code> </p> <p> <b>M3 node types:</b> <code>cache.m3.medium</code>, <code>cache.m3.large</code>, <code>cache.m3.xlarge</code>, <code>cache.m3.2xlarge</code> </p> </li> </ul> </li> <li> <p>Compute optimized:</p> <ul> <li> <p>Previous generation: (not recommended)</p> <p> <b>C1 node types:</b> <code>cache.c1.xlarge</code> </p> </li> </ul> </li> <li> <p>Memory optimized:</p> <ul> <li> <p>Current generation: </p> <p> <b>R5 node types:</b> <code>cache.r5.large</code>, <code>cache.r5.xlarge</code>, <code>cache.r5.2xlarge</code>, <code>cache.r5.4xlarge</code>, <code>cache.r5.12xlarge</code>, <code>cache.r5.24xlarge</code> </p> <p> <b>R4 node types:</b> <code>cache.r4.large</code>, <code>cache.r4.xlarge</code>, <code>cache.r4.2xlarge</code>, <code>cache.r4.4xlarge</code>, <code>cache.r4.8xlarge</code>, <code>cache.r4.16xlarge</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>M2 node types:</b> <code>cache.m2.xlarge</code>, <code>cache.m2.2xlarge</code>, <code>cache.m2.4xlarge</code> </p> <p> <b>R3 node types:</b> <code>cache.r3.large</code>, <code>cache.r3.xlarge</code>, <code>cache.r3.2xlarge</code>, <code>cache.r3.4xlarge</code>, <code>cache.r3.8xlarge</code> </p> </li> </ul> </li> </ul> <p> <b>Additional node type info</b> </p> <ul> <li> <p>All current generation instance types are created in Amazon VPC by default.</p> </li> <li> <p>Redis append-only files (AOF) are not supported for T1 or T2 instances.</p> </li> <li> <p>Redis Multi-AZ with automatic failover is not supported on T1 instances.</p> </li> <li> <p>Redis configuration variables <code>appendonly</code> and <code>appendfsync</code> are not supported on Redis version 2.8.22 and later.</p> </li> </ul>"
"documentation":"<p>The cache node type filter value. Use this parameter to show only those reservations matching the specified cache node type.</p> <p>The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.</p> <ul> <li> <p>General purpose:</p> <ul> <li> <p>Current generation: </p> <p> <b>M5 node types:</b> <code>cache.m5.large</code>, <code>cache.m5.xlarge</code>, <code>cache.m5.2xlarge</code>, <code>cache.m5.4xlarge</code>, <code>cache.m5.12xlarge</code>, <code>cache.m5.24xlarge</code> </p> <p> <b>M4 node types:</b> <code>cache.m4.large</code>, <code>cache.m4.xlarge</code>, <code>cache.m4.2xlarge</code>, <code>cache.m4.4xlarge</code>, <code>cache.m4.10xlarge</code> </p> <p> <b>T3 node types:</b> <code>cache.t3.micro</code>, <code>cache.t3.small</code>, <code>cache.t3.medium</code> </p> <p> <b>T2 node types:</b> <code>cache.t2.micro</code>, <code>cache.t2.small</code>, <code>cache.t2.medium</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>T1 node types:</b> <code>cache.t1.micro</code> </p> <p> <b>M1 node types:</b> <code>cache.m1.small</code>, <code>cache.m1.medium</code>, <code>cache.m1.large</code>, <code>cache.m1.xlarge</code> </p> <p> <b>M3 node types:</b> <code>cache.m3.medium</code>, <code>cache.m3.large</code>, <code>cache.m3.xlarge</code>, <code>cache.m3.2xlarge</code> </p> </li> </ul> </li> <li> <p>Compute optimized:</p> <ul> <li> <p>Previous generation: (not recommended)</p> <p> <b>C1 node types:</b> <code>cache.c1.xlarge</code> </p> </li> </ul> </li> <li> <p>Memory optimized:</p> <ul> <li> <p>Current generation: </p> <p> <b>R5 node types:</b> <code>cache.r5.large</code>, <code>cache.r5.xlarge</code>, <code>cache.r5.2xlarge</code>, <code>cache.r5.4xlarge</code>, <code>cache.r5.12xlarge</code>, <code>cache.r5.24xlarge</code> </p> <p> <b>R4 node types:</b> <code>cache.r4.large</code>, <code>cache.r4.xlarge</code>, <code>cache.r4.2xlarge</code>, <code>cache.r4.4xlarge</code>, <code>cache.r4.8xlarge</code>, <code>cache.r4.16xlarge</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>M2 node types:</b> <code>cache.m2.xlarge</code>, <code>cache.m2.2xlarge</code>, <code>cache.m2.4xlarge</code> </p> <p> <b>R3 node types:</b> <code>cache.r3.large</code>, <code>cache.r3.xlarge</code>, <code>cache.r3.2xlarge</code>, <code>cache.r3.4xlarge</code>, <code>cache.r3.8xlarge</code> </p> </li> </ul> </li> </ul> <p> <b>Additional node type info</b> </p> <ul> <li> <p>All current generation instance types are created in Amazon VPC by default.</p> </li> <li> <p>Redis append-only files (AOF) are not supported for T1 or T2 instances.</p> </li> <li> <p>Redis Multi-AZ with automatic failover is not supported on T1 instances.</p> </li> <li> <p>Redis configuration variables <code>appendonly</code> and <code>appendfsync</code> are not supported on Redis version 2.8.22 and later.</p> </li> </ul>"
},
"Duration":{
"shape":"String",
@ -2708,7 +3007,7 @@
},
"CacheNodeType":{
"shape":"String",
"documentation":"<p>The cache node type filter value. Use this parameter to show only the available offerings matching the specified cache node type.</p> <p>The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.</p> <ul> <li> <p>General purpose:</p> <ul> <li> <p>Current generation: </p> <p> <b>M5 node types:</b> <code>cache.m5.large</code>, <code>cache.m5.xlarge</code>, <code>cache.m5.2xlarge</code>, <code>cache.m5.4xlarge</code>, <code>cache.m5.12xlarge</code>, <code>cache.m5.24xlarge</code> </p> <p> <b>M4 node types:</b> <code>cache.m4.large</code>, <code>cache.m4.xlarge</code>, <code>cache.m4.2xlarge</code>, <code>cache.m4.4xlarge</code>, <code>cache.m4.10xlarge</code> </p> <p> <b>T2 node types:</b> <code>cache.t2.micro</code>, <code>cache.t2.small</code>, <code>cache.t2.medium</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>T1 node types:</b> <code>cache.t1.micro</code> </p> <p> <b>M1 node types:</b> <code>cache.m1.small</code>, <code>cache.m1.medium</code>, <code>cache.m1.large</code>, <code>cache.m1.xlarge</code> </p> <p> <b>M3 node types:</b> <code>cache.m3.medium</code>, <code>cache.m3.large</code>, <code>cache.m3.xlarge</code>, <code>cache.m3.2xlarge</code> </p> </li> </ul> </li> <li> <p>Compute optimized:</p> <ul> <li> <p>Previous generation: (not recommended)</p> <p> <b>C1 node types:</b> <code>cache.c1.xlarge</code> </p> </li> </ul> </li> <li> <p>Memory optimized:</p> <ul> <li> <p>Current generation: </p> <p> <b>R5 node types:</b> <code>cache.r5.large</code>, <code>cache.r5.xlarge</code>, <code>cache.r5.2xlarge</code>, <code>cache.r5.4xlarge</code>, <code>cache.r5.12xlarge</code>, <code>cache.r5.24xlarge</code> </p> <p> <b>R4 node types:</b> <code>cache.r4.large</code>, <code>cache.r4.xlarge</code>, <code>cache.r4.2xlarge</code>, <code>cache.r4.4xlarge</code>, <code>cache.r4.8xlarge</code>, <code>cache.r4.16xlarge</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>M2 node types:</b> <code>cache.m2.xlarge</code>, <code>cache.m2.2xlarge</code>, <code>cache.m2.4xlarge</code> </p> <p> <b>R3 node types:</b> <code>cache.r3.large</code>, <code>cache.r3.xlarge</code>, <code>cache.r3.2xlarge</code>, <code>cache.r3.4xlarge</code>, <code>cache.r3.8xlarge</code> </p> </li> </ul> </li> </ul> <p> <b>Additional node type info</b> </p> <ul> <li> <p>All current generation instance types are created in Amazon VPC by default.</p> </li> <li> <p>Redis append-only files (AOF) are not supported for T1 or T2 instances.</p> </li> <li> <p>Redis Multi-AZ with automatic failover is not supported on T1 instances.</p> </li> <li> <p>Redis configuration variables <code>appendonly</code> and <code>appendfsync</code> are not supported on Redis version 2.8.22 and later.</p> </li> </ul>"
"documentation":"<p>The cache node type filter value. Use this parameter to show only the available offerings matching the specified cache node type.</p> <p>The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.</p> <ul> <li> <p>General purpose:</p> <ul> <li> <p>Current generation: </p> <p> <b>M5 node types:</b> <code>cache.m5.large</code>, <code>cache.m5.xlarge</code>, <code>cache.m5.2xlarge</code>, <code>cache.m5.4xlarge</code>, <code>cache.m5.12xlarge</code>, <code>cache.m5.24xlarge</code> </p> <p> <b>M4 node types:</b> <code>cache.m4.large</code>, <code>cache.m4.xlarge</code>, <code>cache.m4.2xlarge</code>, <code>cache.m4.4xlarge</code>, <code>cache.m4.10xlarge</code> </p> <p> <b>T3 node types:</b> <code>cache.t3.micro</code>, <code>cache.t3.small</code>, <code>cache.t3.medium</code> </p> <p> <b>T2 node types:</b> <code>cache.t2.micro</code>, <code>cache.t2.small</code>, <code>cache.t2.medium</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>T1 node types:</b> <code>cache.t1.micro</code> </p> <p> <b>M1 node types:</b> <code>cache.m1.small</code>, <code>cache.m1.medium</code>, <code>cache.m1.large</code>, <code>cache.m1.xlarge</code> </p> <p> <b>M3 node types:</b> <code>cache.m3.medium</code>, <code>cache.m3.large</code>, <code>cache.m3.xlarge</code>, <code>cache.m3.2xlarge</code> </p> </li> </ul> </li> <li> <p>Compute optimized:</p> <ul> <li> <p>Previous generation: (not recommended)</p> <p> <b>C1 node types:</b> <code>cache.c1.xlarge</code> </p> </li> </ul> </li> <li> <p>Memory optimized:</p> <ul> <li> <p>Current generation: </p> <p> <b>R5 node types:</b> <code>cache.r5.large</code>, <code>cache.r5.xlarge</code>, <code>cache.r5.2xlarge</code>, <code>cache.r5.4xlarge</code>, <code>cache.r5.12xlarge</code>, <code>cache.r5.24xlarge</code> </p> <p> <b>R4 node types:</b> <code>cache.r4.large</code>, <code>cache.r4.xlarge</code>, <code>cache.r4.2xlarge</code>, <code>cache.r4.4xlarge</code>, <code>cache.r4.8xlarge</code>, <code>cache.r4.16xlarge</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>M2 node types:</b> <code>cache.m2.xlarge</code>, <code>cache.m2.2xlarge</code>, <code>cache.m2.4xlarge</code> </p> <p> <b>R3 node types:</b> <code>cache.r3.large</code>, <code>cache.r3.xlarge</code>, <code>cache.r3.2xlarge</code>, <code>cache.r3.4xlarge</code>, <code>cache.r3.8xlarge</code> </p> </li> </ul> </li> </ul> <p> <b>Additional node type info</b> </p> <ul> <li> <p>All current generation instance types are created in Amazon VPC by default.</p> </li> <li> <p>Redis append-only files (AOF) are not supported for T1 or T2 instances.</p> </li> <li> <p>Redis Multi-AZ with automatic failover is not supported on T1 instances.</p> </li> <li> <p>Redis configuration variables <code>appendonly</code> and <code>appendfsync</code> are not supported on Redis version 2.8.22 and later.</p> </li> </ul>"
},
"Duration":{
"shape":"String",
@ -2847,6 +3146,34 @@
}
}
},
"DisassociateGlobalReplicationGroupMessage":{
"type":"structure",
"required":[
"GlobalReplicationGroupId",
"ReplicationGroupId",
"ReplicationGroupRegion"
],
"members":{
"GlobalReplicationGroupId":{
"shape":"String",
"documentation":"<p>The name of the Global Datastore</p>"
},
"ReplicationGroupId":{
"shape":"String",
"documentation":"<p>The name of the secondary cluster you wish to remove from the Global Datastore</p>"
},
"ReplicationGroupRegion":{
"shape":"String",
"documentation":"<p>The AWS region of secondary cluster you wish to remove from the Global Datastore</p>"
}
}
},
"DisassociateGlobalReplicationGroupResult":{
"type":"structure",
"members":{
"GlobalReplicationGroup":{"shape":"GlobalReplicationGroup"}
}
},
"Double":{"type":"double"},
"EC2SecurityGroup":{
"type":"structure",
@ -2953,6 +3280,228 @@
},
"documentation":"<p>Represents the output of a <code>DescribeEvents</code> operation.</p>"
},
"FailoverGlobalReplicationGroupMessage":{
"type":"structure",
"required":[
"GlobalReplicationGroupId",
"PrimaryRegion",
"PrimaryReplicationGroupId"
],
"members":{
"GlobalReplicationGroupId":{
"shape":"String",
"documentation":"<p>The name of the Global Datastore</p>"
},
"PrimaryRegion":{
"shape":"String",
"documentation":"<p>The AWS region of the primary cluster of the Global Datastore</p>"
},
"PrimaryReplicationGroupId":{
"shape":"String",
"documentation":"<p>The name of the primary replication group</p>"
}
}
},
"FailoverGlobalReplicationGroupResult":{
"type":"structure",
"members":{
"GlobalReplicationGroup":{"shape":"GlobalReplicationGroup"}
}
},
"GlobalNodeGroup":{
"type":"structure",
"members":{
"GlobalNodeGroupId":{
"shape":"String",
"documentation":"<p>The name of the global node group</p>"
},
"Slots":{
"shape":"String",
"documentation":"<p>The keyspace for this node group</p>"
}
},
"documentation":"<p>Indicates the slot configuration and global identifier for a slice group.</p>"
},
"GlobalNodeGroupIdList":{
"type":"list",
"member":{
"shape":"String",
"locationName":"GlobalNodeGroupId"
}
},
"GlobalNodeGroupList":{
"type":"list",
"member":{
"shape":"GlobalNodeGroup",
"locationName":"GlobalNodeGroup"
}
},
"GlobalReplicationGroup":{
"type":"structure",
"members":{
"GlobalReplicationGroupId":{
"shape":"String",
"documentation":"<p>The name of the Global Datastore</p>"
},
"GlobalReplicationGroupDescription":{
"shape":"String",
"documentation":"<p>The optional description of the Global Datastore</p>"
},
"Status":{
"shape":"String",
"documentation":"<p>The status of the Global Datastore</p>"
},
"CacheNodeType":{
"shape":"String",
"documentation":"<p>The cache node type of the Global Datastore</p>"
},
"Engine":{
"shape":"String",
"documentation":"<p>The Elasticache engine. For preview, it is Redis only.</p>"
},
"EngineVersion":{
"shape":"String",
"documentation":"<p>The Elasticache Redis engine version. For preview, it is Redis version 5.0.5 only.</p>"
},
"Members":{
"shape":"GlobalReplicationGroupMemberList",
"documentation":"<p>The replication groups that comprise the Global Datastore.</p>"
},
"ClusterEnabled":{
"shape":"BooleanOptional",
"documentation":"<p>A flag that indicates whether the Global Datastore is cluster enabled.</p>"
},
"GlobalNodeGroups":{
"shape":"GlobalNodeGroupList",
"documentation":"<p>Indicates the slot configuration and global identifier for each slice group.</p>"
},
"AuthTokenEnabled":{
"shape":"BooleanOptional",
"documentation":"<p>A flag that enables using an <code>AuthToken</code> (password) when issuing Redis commands.</p> <p>Default: <code>false</code> </p>"
},
"TransitEncryptionEnabled":{
"shape":"BooleanOptional",
"documentation":"<p>A flag that enables in-transit encryption when set to true. You cannot modify the value of <code>TransitEncryptionEnabled</code> after the cluster is created. To enable in-transit encryption on a cluster you must set <code>TransitEncryptionEnabled</code> to true when you create a cluster. </p>"
},
"AtRestEncryptionEnabled":{
"shape":"BooleanOptional",
"documentation":"<p>A flag that enables encryption at rest when set to <code>true</code>.</p> <p>You cannot modify the value of <code>AtRestEncryptionEnabled</code> after the replication group is created. To enable encryption at rest on a replication group you must set <code>AtRestEncryptionEnabled</code> to <code>true</code> when you create the replication group. </p> <p> <b>Required:</b> Only available when creating a replication group in an Amazon VPC using redis version <code>3.2.6</code>, <code>4.x</code> or later.</p>"
}
},
"documentation":"<p>Consists of a primary cluster that accepts writes and an associated secondary cluster that resides in a different AWS region. The secondary cluster accepts only reads. The primary cluster automatically replicates updates to the secondary cluster.</p> <ul> <li> <p>The <b>GlobalReplicationGroupId</b> represents the name of the Global Datastore, which is what you use to associate a secondary cluster.</p> </li> </ul>",
"wrapper":true
},
"GlobalReplicationGroupAlreadyExistsFault":{
"type":"structure",
"members":{
},
"documentation":"<p>The Global Datastore name already exists.</p>",
"error":{
"code":"GlobalReplicationGroupAlreadyExistsFault",
"httpStatusCode":400,
"senderFault":true
},
"exception":true
},
"GlobalReplicationGroupInfo":{
"type":"structure",
"members":{
"GlobalReplicationGroupId":{
"shape":"String",
"documentation":"<p>The name of the Global Datastore</p>"
},
"GlobalReplicationGroupMemberRole":{
"shape":"String",
"documentation":"<p>The role of the replication group in a Global Datastore. Can be primary or secondary.</p>"
}
},
"documentation":"<p>The name of the Global Datastore and role of this replication group in the Global Datastore.</p>"
},
"GlobalReplicationGroupList":{
"type":"list",
"member":{
"shape":"GlobalReplicationGroup",
"locationName":"GlobalReplicationGroup"
}
},
"GlobalReplicationGroupMember":{
"type":"structure",
"members":{
"ReplicationGroupId":{
"shape":"String",
"documentation":"<p>The replication group id of the Global Datastore member.</p>"
},
"ReplicationGroupRegion":{
"shape":"String",
"documentation":"<p>The AWS region of the Global Datastore member.</p>"
},
"Role":{
"shape":"String",
"documentation":"<p>Indicates the role of the replication group, primary or secondary.</p>"
},
"AutomaticFailover":{
"shape":"AutomaticFailoverStatus",
"documentation":"<p>Indicates whether automatic failover is enabled for the replication group.</p>"
},
"Status":{
"shape":"String",
"documentation":"<p>The status of the membership of the replication group.</p>"
}
},
"documentation":"<p>A member of a Global Datastore. It contains the Replication Group Id, the AWS region and the role of the replication group. </p>",
"wrapper":true
},
"GlobalReplicationGroupMemberList":{
"type":"list",
"member":{
"shape":"GlobalReplicationGroupMember",
"locationName":"GlobalReplicationGroupMember"
}
},
"GlobalReplicationGroupNotFoundFault":{
"type":"structure",
"members":{
},
"documentation":"<p>The Global Datastore does not exist</p>",
"error":{
"code":"GlobalReplicationGroupNotFoundFault",
"httpStatusCode":404,
"senderFault":true
},
"exception":true
},
"IncreaseNodeGroupsInGlobalReplicationGroupMessage":{
"type":"structure",
"required":[
"GlobalReplicationGroupId",
"NodeGroupCount",
"ApplyImmediately"
],
"members":{
"GlobalReplicationGroupId":{
"shape":"String",
"documentation":"<p>The name of the Global Datastore</p>"
},
"NodeGroupCount":{
"shape":"Integer",
"documentation":"<p>The number of node groups you wish to add</p>"
},
"RegionalConfigurations":{
"shape":"RegionalConfigurationList",
"documentation":"<p>Describes the replication group IDs, the AWS regions where they are stored and the shard configuration for each that comprise the Global Datastore</p>"
},
"ApplyImmediately":{
"shape":"Boolean",
"documentation":"<p>Indicates that the process begins immediately. At present, the only permitted value for this parameter is true.</p>"
}
}
},
"IncreaseNodeGroupsInGlobalReplicationGroupResult":{
"type":"structure",
"members":{
"GlobalReplicationGroup":{"shape":"GlobalReplicationGroup"}
}
},
"IncreaseReplicaCountMessage":{
"type":"structure",
"required":[
@ -3046,6 +3595,18 @@
},
"exception":true
},
"InvalidGlobalReplicationGroupStateFault":{
"type":"structure",
"members":{
},
"documentation":"<p>The Global Datastore is not available</p>",
"error":{
"code":"InvalidGlobalReplicationGroupState",
"httpStatusCode":400,
"senderFault":true
},
"exception":true
},
"InvalidKMSKeyFault":{
"type":"structure",
"members":{
@ -3301,6 +3862,45 @@
"CacheSubnetGroup":{"shape":"CacheSubnetGroup"}
}
},
"ModifyGlobalReplicationGroupMessage":{
"type":"structure",
"required":[
"GlobalReplicationGroupId",
"ApplyImmediately"
],
"members":{
"GlobalReplicationGroupId":{
"shape":"String",
"documentation":"<p>The name of the Global Datastore</p>"
},
"ApplyImmediately":{
"shape":"Boolean",
"documentation":"<p>If true, this parameter causes the modifications in this request and any pending modifications to be applied, asynchronously and as soon as possible, regardless of the PreferredMaintenanceWindow setting for the replication group. If false, changes to the nodes in the replication group are applied on the next maintenance reboot, or the next failure reboot, whichever occurs first. </p>"
},
"CacheNodeType":{
"shape":"String",
"documentation":"<p>A valid cache node type that you want to scale this Global Datastore to.</p>"
},
"EngineVersion":{
"shape":"String",
"documentation":"<p>The upgraded version of the cache engine to be run on the clusters in the Global Datastore. </p>"
},
"GlobalReplicationGroupDescription":{
"shape":"String",
"documentation":"<p>A description of the Global Datastore</p>"
},
"AutomaticFailoverEnabled":{
"shape":"BooleanOptional",
"documentation":"<p>Determines whether a read replica is automatically promoted to read/write primary if the existing primary encounters a failure. </p>"
}
}
},
"ModifyGlobalReplicationGroupResult":{
"type":"structure",
"members":{
"GlobalReplicationGroup":{"shape":"GlobalReplicationGroup"}
}
},
"ModifyReplicationGroupMessage":{
"type":"structure",
"required":["ReplicationGroupId"],
@ -3925,6 +4525,29 @@
"ReservedCacheNode":{"shape":"ReservedCacheNode"}
}
},
"RebalanceSlotsInGlobalReplicationGroupMessage":{
"type":"structure",
"required":[
"GlobalReplicationGroupId",
"ApplyImmediately"
],
"members":{
"GlobalReplicationGroupId":{
"shape":"String",
"documentation":"<p>The name of the Global Datastore</p>"
},
"ApplyImmediately":{
"shape":"Boolean",
"documentation":"<p>If <code>True</code>, redistribution is applied immediately.</p>"
}
}
},
"RebalanceSlotsInGlobalReplicationGroupResult":{
"type":"structure",
"members":{
"GlobalReplicationGroup":{"shape":"GlobalReplicationGroup"}
}
},
"RebootCacheClusterMessage":{
"type":"structure",
"required":[
@ -3971,6 +4594,36 @@
"locationName":"RecurringCharge"
}
},
"RegionalConfiguration":{
"type":"structure",
"required":[
"ReplicationGroupId",
"ReplicationGroupRegion",
"ReshardingConfiguration"
],
"members":{
"ReplicationGroupId":{
"shape":"String",
"documentation":"<p>The name of the secondary cluster</p>"
},
"ReplicationGroupRegion":{
"shape":"String",
"documentation":"<p>The AWS region where the cluster is stored</p>"
},
"ReshardingConfiguration":{
"shape":"ReshardingConfigurationList",
"documentation":"<p>A list of <code>PreferredAvailabilityZones</code> objects that specifies the configuration of a node group in the resharded cluster. </p>"
}
},
"documentation":"<p>A list of the replication groups </p>"
},
"RegionalConfigurationList":{
"type":"list",
"member":{
"shape":"RegionalConfiguration",
"locationName":"RegionalConfiguration"
}
},
"RemoveReplicasList":{
"type":"list",
"member":{"shape":"String"}
@ -4011,6 +4664,10 @@
"shape":"String",
"documentation":"<p>The user supplied description of the replication group.</p>"
},
"GlobalReplicationGroupInfo":{
"shape":"GlobalReplicationGroupInfo",
"documentation":"<p>The name of the Global Datastore and role of this replication group in the Global Datastore.</p>"
},
"Status":{
"shape":"String",
"documentation":"<p>The current state of this replication group - <code>creating</code>, <code>available</code>, <code>modifying</code>, <code>deleting</code>, <code>create-failed</code>, <code>snapshotting</code>.</p>"
@ -4188,7 +4845,7 @@
},
"CacheNodeType":{
"shape":"String",
"documentation":"<p>The cache node type for the reserved cache nodes.</p> <p>The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.</p> <ul> <li> <p>General purpose:</p> <ul> <li> <p>Current generation: </p> <p> <b>M5 node types:</b> <code>cache.m5.large</code>, <code>cache.m5.xlarge</code>, <code>cache.m5.2xlarge</code>, <code>cache.m5.4xlarge</code>, <code>cache.m5.12xlarge</code>, <code>cache.m5.24xlarge</code> </p> <p> <b>M4 node types:</b> <code>cache.m4.large</code>, <code>cache.m4.xlarge</code>, <code>cache.m4.2xlarge</code>, <code>cache.m4.4xlarge</code>, <code>cache.m4.10xlarge</code> </p> <p> <b>T2 node types:</b> <code>cache.t2.micro</code>, <code>cache.t2.small</code>, <code>cache.t2.medium</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>T1 node types:</b> <code>cache.t1.micro</code> </p> <p> <b>M1 node types:</b> <code>cache.m1.small</code>, <code>cache.m1.medium</code>, <code>cache.m1.large</code>, <code>cache.m1.xlarge</code> </p> <p> <b>M3 node types:</b> <code>cache.m3.medium</code>, <code>cache.m3.large</code>, <code>cache.m3.xlarge</code>, <code>cache.m3.2xlarge</code> </p> </li> </ul> </li> <li> <p>Compute optimized:</p> <ul> <li> <p>Previous generation: (not recommended)</p> <p> <b>C1 node types:</b> <code>cache.c1.xlarge</code> </p> </li> </ul> </li> <li> <p>Memory optimized:</p> <ul> <li> <p>Current generation: </p> <p> <b>R5 node types:</b> <code>cache.r5.large</code>, <code>cache.r5.xlarge</code>, <code>cache.r5.2xlarge</code>, <code>cache.r5.4xlarge</code>, <code>cache.r5.12xlarge</code>, <code>cache.r5.24xlarge</code> </p> <p> <b>R4 node types:</b> <code>cache.r4.large</code>, <code>cache.r4.xlarge</code>, <code>cache.r4.2xlarge</code>, <code>cache.r4.4xlarge</code>, <code>cache.r4.8xlarge</code>, <code>cache.r4.16xlarge</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>M2 node types:</b> <code>cache.m2.xlarge</code>, <code>cache.m2.2xlarge</code>, <code>cache.m2.4xlarge</code> </p> <p> <b>R3 node types:</b> <code>cache.r3.large</code>, <code>cache.r3.xlarge</code>, <code>cache.r3.2xlarge</code>, <code>cache.r3.4xlarge</code>, <code>cache.r3.8xlarge</code> </p> </li> </ul> </li> </ul> <p> <b>Additional node type info</b> </p> <ul> <li> <p>All current generation instance types are created in Amazon VPC by default.</p> </li> <li> <p>Redis append-only files (AOF) are not supported for T1 or T2 instances.</p> </li> <li> <p>Redis Multi-AZ with automatic failover is not supported on T1 instances.</p> </li> <li> <p>Redis configuration variables <code>appendonly</code> and <code>appendfsync</code> are not supported on Redis version 2.8.22 and later.</p> </li> </ul>"
"documentation":"<p>The cache node type for the reserved cache nodes.</p> <p>The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.</p> <ul> <li> <p>General purpose:</p> <ul> <li> <p>Current generation: </p> <p> <b>M5 node types:</b> <code>cache.m5.large</code>, <code>cache.m5.xlarge</code>, <code>cache.m5.2xlarge</code>, <code>cache.m5.4xlarge</code>, <code>cache.m5.12xlarge</code>, <code>cache.m5.24xlarge</code> </p> <p> <b>M4 node types:</b> <code>cache.m4.large</code>, <code>cache.m4.xlarge</code>, <code>cache.m4.2xlarge</code>, <code>cache.m4.4xlarge</code>, <code>cache.m4.10xlarge</code> </p> <p> <b>T3 node types:</b> <code>cache.t3.micro</code>, <code>cache.t3.small</code>, <code>cache.t3.medium</code> </p> <p> <b>T2 node types:</b> <code>cache.t2.micro</code>, <code>cache.t2.small</code>, <code>cache.t2.medium</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>T1 node types:</b> <code>cache.t1.micro</code> </p> <p> <b>M1 node types:</b> <code>cache.m1.small</code>, <code>cache.m1.medium</code>, <code>cache.m1.large</code>, <code>cache.m1.xlarge</code> </p> <p> <b>M3 node types:</b> <code>cache.m3.medium</code>, <code>cache.m3.large</code>, <code>cache.m3.xlarge</code>, <code>cache.m3.2xlarge</code> </p> </li> </ul> </li> <li> <p>Compute optimized:</p> <ul> <li> <p>Previous generation: (not recommended)</p> <p> <b>C1 node types:</b> <code>cache.c1.xlarge</code> </p> </li> </ul> </li> <li> <p>Memory optimized:</p> <ul> <li> <p>Current generation: </p> <p> <b>R5 node types:</b> <code>cache.r5.large</code>, <code>cache.r5.xlarge</code>, <code>cache.r5.2xlarge</code>, <code>cache.r5.4xlarge</code>, <code>cache.r5.12xlarge</code>, <code>cache.r5.24xlarge</code> </p> <p> <b>R4 node types:</b> <code>cache.r4.large</code>, <code>cache.r4.xlarge</code>, <code>cache.r4.2xlarge</code>, <code>cache.r4.4xlarge</code>, <code>cache.r4.8xlarge</code>, <code>cache.r4.16xlarge</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>M2 node types:</b> <code>cache.m2.xlarge</code>, <code>cache.m2.2xlarge</code>, <code>cache.m2.4xlarge</code> </p> <p> <b>R3 node types:</b> <code>cache.r3.large</code>, <code>cache.r3.xlarge</code>, <code>cache.r3.2xlarge</code>, <code>cache.r3.4xlarge</code>, <code>cache.r3.8xlarge</code> </p> </li> </ul> </li> </ul> <p> <b>Additional node type info</b> </p> <ul> <li> <p>All current generation instance types are created in Amazon VPC by default.</p> </li> <li> <p>Redis append-only files (AOF) are not supported for T1 or T2 instances.</p> </li> <li> <p>Redis Multi-AZ with automatic failover is not supported on T1 instances.</p> </li> <li> <p>Redis configuration variables <code>appendonly</code> and <code>appendfsync</code> are not supported on Redis version 2.8.22 and later.</p> </li> </ul>"
},
"StartTime":{
"shape":"TStamp",
@ -4300,7 +4957,7 @@
},
"CacheNodeType":{
"shape":"String",
"documentation":"<p>The cache node type for the reserved cache node.</p> <p>The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.</p> <ul> <li> <p>General purpose:</p> <ul> <li> <p>Current generation: </p> <p> <b>M5 node types:</b> <code>cache.m5.large</code>, <code>cache.m5.xlarge</code>, <code>cache.m5.2xlarge</code>, <code>cache.m5.4xlarge</code>, <code>cache.m5.12xlarge</code>, <code>cache.m5.24xlarge</code> </p> <p> <b>M4 node types:</b> <code>cache.m4.large</code>, <code>cache.m4.xlarge</code>, <code>cache.m4.2xlarge</code>, <code>cache.m4.4xlarge</code>, <code>cache.m4.10xlarge</code> </p> <p> <b>T2 node types:</b> <code>cache.t2.micro</code>, <code>cache.t2.small</code>, <code>cache.t2.medium</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>T1 node types:</b> <code>cache.t1.micro</code> </p> <p> <b>M1 node types:</b> <code>cache.m1.small</code>, <code>cache.m1.medium</code>, <code>cache.m1.large</code>, <code>cache.m1.xlarge</code> </p> <p> <b>M3 node types:</b> <code>cache.m3.medium</code>, <code>cache.m3.large</code>, <code>cache.m3.xlarge</code>, <code>cache.m3.2xlarge</code> </p> </li> </ul> </li> <li> <p>Compute optimized:</p> <ul> <li> <p>Previous generation: (not recommended)</p> <p> <b>C1 node types:</b> <code>cache.c1.xlarge</code> </p> </li> </ul> </li> <li> <p>Memory optimized:</p> <ul> <li> <p>Current generation: </p> <p> <b>R5 node types:</b> <code>cache.r5.large</code>, <code>cache.r5.xlarge</code>, <code>cache.r5.2xlarge</code>, <code>cache.r5.4xlarge</code>, <code>cache.r5.12xlarge</code>, <code>cache.r5.24xlarge</code> </p> <p> <b>R4 node types:</b> <code>cache.r4.large</code>, <code>cache.r4.xlarge</code>, <code>cache.r4.2xlarge</code>, <code>cache.r4.4xlarge</code>, <code>cache.r4.8xlarge</code>, <code>cache.r4.16xlarge</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>M2 node types:</b> <code>cache.m2.xlarge</code>, <code>cache.m2.2xlarge</code>, <code>cache.m2.4xlarge</code> </p> <p> <b>R3 node types:</b> <code>cache.r3.large</code>, <code>cache.r3.xlarge</code>, <code>cache.r3.2xlarge</code>, <code>cache.r3.4xlarge</code>, <code>cache.r3.8xlarge</code> </p> </li> </ul> </li> </ul> <p> <b>Additional node type info</b> </p> <ul> <li> <p>All current generation instance types are created in Amazon VPC by default.</p> </li> <li> <p>Redis append-only files (AOF) are not supported for T1 or T2 instances.</p> </li> <li> <p>Redis Multi-AZ with automatic failover is not supported on T1 instances.</p> </li> <li> <p>Redis configuration variables <code>appendonly</code> and <code>appendfsync</code> are not supported on Redis version 2.8.22 and later.</p> </li> </ul>"
"documentation":"<p>The cache node type for the reserved cache node.</p> <p>The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.</p> <ul> <li> <p>General purpose:</p> <ul> <li> <p>Current generation: </p> <p> <b>M5 node types:</b> <code>cache.m5.large</code>, <code>cache.m5.xlarge</code>, <code>cache.m5.2xlarge</code>, <code>cache.m5.4xlarge</code>, <code>cache.m5.12xlarge</code>, <code>cache.m5.24xlarge</code> </p> <p> <b>M4 node types:</b> <code>cache.m4.large</code>, <code>cache.m4.xlarge</code>, <code>cache.m4.2xlarge</code>, <code>cache.m4.4xlarge</code>, <code>cache.m4.10xlarge</code> </p> <p> <b>T3 node types:</b> <code>cache.t3.micro</code>, <code>cache.t3.small</code>, <code>cache.t3.medium</code> </p> <p> <b>T2 node types:</b> <code>cache.t2.micro</code>, <code>cache.t2.small</code>, <code>cache.t2.medium</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>T1 node types:</b> <code>cache.t1.micro</code> </p> <p> <b>M1 node types:</b> <code>cache.m1.small</code>, <code>cache.m1.medium</code>, <code>cache.m1.large</code>, <code>cache.m1.xlarge</code> </p> <p> <b>M3 node types:</b> <code>cache.m3.medium</code>, <code>cache.m3.large</code>, <code>cache.m3.xlarge</code>, <code>cache.m3.2xlarge</code> </p> </li> </ul> </li> <li> <p>Compute optimized:</p> <ul> <li> <p>Previous generation: (not recommended)</p> <p> <b>C1 node types:</b> <code>cache.c1.xlarge</code> </p> </li> </ul> </li> <li> <p>Memory optimized:</p> <ul> <li> <p>Current generation: </p> <p> <b>R5 node types:</b> <code>cache.r5.large</code>, <code>cache.r5.xlarge</code>, <code>cache.r5.2xlarge</code>, <code>cache.r5.4xlarge</code>, <code>cache.r5.12xlarge</code>, <code>cache.r5.24xlarge</code> </p> <p> <b>R4 node types:</b> <code>cache.r4.large</code>, <code>cache.r4.xlarge</code>, <code>cache.r4.2xlarge</code>, <code>cache.r4.4xlarge</code>, <code>cache.r4.8xlarge</code>, <code>cache.r4.16xlarge</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>M2 node types:</b> <code>cache.m2.xlarge</code>, <code>cache.m2.2xlarge</code>, <code>cache.m2.4xlarge</code> </p> <p> <b>R3 node types:</b> <code>cache.r3.large</code>, <code>cache.r3.xlarge</code>, <code>cache.r3.2xlarge</code>, <code>cache.r3.4xlarge</code>, <code>cache.r3.8xlarge</code> </p> </li> </ul> </li> </ul> <p> <b>Additional node type info</b> </p> <ul> <li> <p>All current generation instance types are created in Amazon VPC by default.</p> </li> <li> <p>Redis append-only files (AOF) are not supported for T1 or T2 instances.</p> </li> <li> <p>Redis Multi-AZ with automatic failover is not supported on T1 instances.</p> </li> <li> <p>Redis configuration variables <code>appendonly</code> and <code>appendfsync</code> are not supported on Redis version 2.8.22 and later.</p> </li> </ul>"
},
"Duration":{
"shape":"Integer",
@ -4638,7 +5295,7 @@
},
"CacheNodeType":{
"shape":"String",
"documentation":"<p>The name of the compute and memory capacity node type for the source cluster.</p> <p>The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.</p> <ul> <li> <p>General purpose:</p> <ul> <li> <p>Current generation: </p> <p> <b>M5 node types:</b> <code>cache.m5.large</code>, <code>cache.m5.xlarge</code>, <code>cache.m5.2xlarge</code>, <code>cache.m5.4xlarge</code>, <code>cache.m5.12xlarge</code>, <code>cache.m5.24xlarge</code> </p> <p> <b>M4 node types:</b> <code>cache.m4.large</code>, <code>cache.m4.xlarge</code>, <code>cache.m4.2xlarge</code>, <code>cache.m4.4xlarge</code>, <code>cache.m4.10xlarge</code> </p> <p> <b>T2 node types:</b> <code>cache.t2.micro</code>, <code>cache.t2.small</code>, <code>cache.t2.medium</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>T1 node types:</b> <code>cache.t1.micro</code> </p> <p> <b>M1 node types:</b> <code>cache.m1.small</code>, <code>cache.m1.medium</code>, <code>cache.m1.large</code>, <code>cache.m1.xlarge</code> </p> <p> <b>M3 node types:</b> <code>cache.m3.medium</code>, <code>cache.m3.large</code>, <code>cache.m3.xlarge</code>, <code>cache.m3.2xlarge</code> </p> </li> </ul> </li> <li> <p>Compute optimized:</p> <ul> <li> <p>Previous generation: (not recommended)</p> <p> <b>C1 node types:</b> <code>cache.c1.xlarge</code> </p> </li> </ul> </li> <li> <p>Memory optimized:</p> <ul> <li> <p>Current generation: </p> <p> <b>R5 node types:</b> <code>cache.r5.large</code>, <code>cache.r5.xlarge</code>, <code>cache.r5.2xlarge</code>, <code>cache.r5.4xlarge</code>, <code>cache.r5.12xlarge</code>, <code>cache.r5.24xlarge</code> </p> <p> <b>R4 node types:</b> <code>cache.r4.large</code>, <code>cache.r4.xlarge</code>, <code>cache.r4.2xlarge</code>, <code>cache.r4.4xlarge</code>, <code>cache.r4.8xlarge</code>, <code>cache.r4.16xlarge</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>M2 node types:</b> <code>cache.m2.xlarge</code>, <code>cache.m2.2xlarge</code>, <code>cache.m2.4xlarge</code> </p> <p> <b>R3 node types:</b> <code>cache.r3.large</code>, <code>cache.r3.xlarge</code>, <code>cache.r3.2xlarge</code>, <code>cache.r3.4xlarge</code>, <code>cache.r3.8xlarge</code> </p> </li> </ul> </li> </ul> <p> <b>Additional node type info</b> </p> <ul> <li> <p>All current generation instance types are created in Amazon VPC by default.</p> </li> <li> <p>Redis append-only files (AOF) are not supported for T1 or T2 instances.</p> </li> <li> <p>Redis Multi-AZ with automatic failover is not supported on T1 instances.</p> </li> <li> <p>Redis configuration variables <code>appendonly</code> and <code>appendfsync</code> are not supported on Redis version 2.8.22 and later.</p> </li> </ul>"
"documentation":"<p>The name of the compute and memory capacity node type for the source cluster.</p> <p>The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.</p> <ul> <li> <p>General purpose:</p> <ul> <li> <p>Current generation: </p> <p> <b>M5 node types:</b> <code>cache.m5.large</code>, <code>cache.m5.xlarge</code>, <code>cache.m5.2xlarge</code>, <code>cache.m5.4xlarge</code>, <code>cache.m5.12xlarge</code>, <code>cache.m5.24xlarge</code> </p> <p> <b>M4 node types:</b> <code>cache.m4.large</code>, <code>cache.m4.xlarge</code>, <code>cache.m4.2xlarge</code>, <code>cache.m4.4xlarge</code>, <code>cache.m4.10xlarge</code> </p> <p> <b>T3 node types:</b> <code>cache.t3.micro</code>, <code>cache.t3.small</code>, <code>cache.t3.medium</code> </p> <p> <b>T2 node types:</b> <code>cache.t2.micro</code>, <code>cache.t2.small</code>, <code>cache.t2.medium</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>T1 node types:</b> <code>cache.t1.micro</code> </p> <p> <b>M1 node types:</b> <code>cache.m1.small</code>, <code>cache.m1.medium</code>, <code>cache.m1.large</code>, <code>cache.m1.xlarge</code> </p> <p> <b>M3 node types:</b> <code>cache.m3.medium</code>, <code>cache.m3.large</code>, <code>cache.m3.xlarge</code>, <code>cache.m3.2xlarge</code> </p> </li> </ul> </li> <li> <p>Compute optimized:</p> <ul> <li> <p>Previous generation: (not recommended)</p> <p> <b>C1 node types:</b> <code>cache.c1.xlarge</code> </p> </li> </ul> </li> <li> <p>Memory optimized:</p> <ul> <li> <p>Current generation: </p> <p> <b>R5 node types:</b> <code>cache.r5.large</code>, <code>cache.r5.xlarge</code>, <code>cache.r5.2xlarge</code>, <code>cache.r5.4xlarge</code>, <code>cache.r5.12xlarge</code>, <code>cache.r5.24xlarge</code> </p> <p> <b>R4 node types:</b> <code>cache.r4.large</code>, <code>cache.r4.xlarge</code>, <code>cache.r4.2xlarge</code>, <code>cache.r4.4xlarge</code>, <code>cache.r4.8xlarge</code>, <code>cache.r4.16xlarge</code> </p> </li> <li> <p>Previous generation: (not recommended)</p> <p> <b>M2 node types:</b> <code>cache.m2.xlarge</code>, <code>cache.m2.2xlarge</code>, <code>cache.m2.4xlarge</code> </p> <p> <b>R3 node types:</b> <code>cache.r3.large</code>, <code>cache.r3.xlarge</code>, <code>cache.r3.2xlarge</code>, <code>cache.r3.4xlarge</code>, <code>cache.r3.8xlarge</code> </p> </li> </ul> </li> </ul> <p> <b>Additional node type info</b> </p> <ul> <li> <p>All current generation instance types are created in Amazon VPC by default.</p> </li> <li> <p>Redis append-only files (AOF) are not supported for T1 or T2 instances.</p> </li> <li> <p>Redis Multi-AZ with automatic failover is not supported on T1 instances.</p> </li> <li> <p>Redis configuration variables <code>appendonly</code> and <code>appendfsync</code> are not supported on Redis version 2.8.22 and later.</p> </li> </ul>"
},
"Engine":{
"shape":"String",

View file

@ -939,6 +939,25 @@
"us-west-2" : { }
}
},
"codestar-connections" : {
"endpoints" : {
"ap-northeast-1" : { },
"ap-northeast-2" : { },
"ap-south-1" : { },
"ap-southeast-1" : { },
"ap-southeast-2" : { },
"ca-central-1" : { },
"eu-central-1" : { },
"eu-west-1" : { },
"eu-west-2" : { },
"eu-west-3" : { },
"sa-east-1" : { },
"us-east-1" : { },
"us-east-2" : { },
"us-west-1" : { },
"us-west-2" : { }
}
},
"cognito-identity" : {
"endpoints" : {
"ap-northeast-1" : { },
@ -950,6 +969,24 @@
"eu-central-1" : { },
"eu-west-1" : { },
"eu-west-2" : { },
"fips-us-east-1" : {
"credentialScope" : {
"region" : "us-east-1"
},
"hostname" : "cognito-identity-fips.us-east-1.amazonaws.com"
},
"fips-us-east-2" : {
"credentialScope" : {
"region" : "us-east-2"
},
"hostname" : "cognito-identity-fips.us-east-2.amazonaws.com"
},
"fips-us-west-2" : {
"credentialScope" : {
"region" : "us-west-2"
},
"hostname" : "cognito-identity-fips.us-west-2.amazonaws.com"
},
"us-east-1" : { },
"us-east-2" : { },
"us-west-2" : { }
@ -966,6 +1003,24 @@
"eu-central-1" : { },
"eu-west-1" : { },
"eu-west-2" : { },
"fips-us-east-1" : {
"credentialScope" : {
"region" : "us-east-1"
},
"hostname" : "cognito-idp-fips.us-east-1.amazonaws.com"
},
"fips-us-east-2" : {
"credentialScope" : {
"region" : "us-east-2"
},
"hostname" : "cognito-idp-fips.us-east-2.amazonaws.com"
},
"fips-us-west-2" : {
"credentialScope" : {
"region" : "us-west-2"
},
"hostname" : "cognito-idp-fips.us-west-2.amazonaws.com"
},
"us-east-1" : { },
"us-east-2" : { },
"us-west-2" : { }
@ -1229,6 +1284,12 @@
"ap-southeast-1" : { },
"ap-southeast-2" : { },
"ca-central-1" : { },
"dms-fips" : {
"credentialScope" : {
"region" : "us-west-1"
},
"hostname" : "dms-fips.us-west-1.amazonaws.com"
},
"eu-central-1" : { },
"eu-north-1" : { },
"eu-west-1" : { },
@ -1702,6 +1763,96 @@
"eu-west-1" : { },
"eu-west-2" : { },
"eu-west-3" : { },
"fips-ap-northeast-1" : {
"credentialScope" : {
"region" : "ap-northeast-1"
},
"hostname" : "fms-fips.ap-northeast-1.amazonaws.com"
},
"fips-ap-northeast-2" : {
"credentialScope" : {
"region" : "ap-northeast-2"
},
"hostname" : "fms-fips.ap-northeast-2.amazonaws.com"
},
"fips-ap-south-1" : {
"credentialScope" : {
"region" : "ap-south-1"
},
"hostname" : "fms-fips.ap-south-1.amazonaws.com"
},
"fips-ap-southeast-1" : {
"credentialScope" : {
"region" : "ap-southeast-1"
},
"hostname" : "fms-fips.ap-southeast-1.amazonaws.com"
},
"fips-ap-southeast-2" : {
"credentialScope" : {
"region" : "ap-southeast-2"
},
"hostname" : "fms-fips.ap-southeast-2.amazonaws.com"
},
"fips-ca-central-1" : {
"credentialScope" : {
"region" : "ca-central-1"
},
"hostname" : "fms-fips.ca-central-1.amazonaws.com"
},
"fips-eu-central-1" : {
"credentialScope" : {
"region" : "eu-central-1"
},
"hostname" : "fms-fips.eu-central-1.amazonaws.com"
},
"fips-eu-west-1" : {
"credentialScope" : {
"region" : "eu-west-1"
},
"hostname" : "fms-fips.eu-west-1.amazonaws.com"
},
"fips-eu-west-2" : {
"credentialScope" : {
"region" : "eu-west-2"
},
"hostname" : "fms-fips.eu-west-2.amazonaws.com"
},
"fips-eu-west-3" : {
"credentialScope" : {
"region" : "eu-west-3"
},
"hostname" : "fms-fips.eu-west-3.amazonaws.com"
},
"fips-sa-east-1" : {
"credentialScope" : {
"region" : "sa-east-1"
},
"hostname" : "fms-fips.sa-east-1.amazonaws.com"
},
"fips-us-east-1" : {
"credentialScope" : {
"region" : "us-east-1"
},
"hostname" : "fms-fips.us-east-1.amazonaws.com"
},
"fips-us-east-2" : {
"credentialScope" : {
"region" : "us-east-2"
},
"hostname" : "fms-fips.us-east-2.amazonaws.com"
},
"fips-us-west-1" : {
"credentialScope" : {
"region" : "us-west-1"
},
"hostname" : "fms-fips.us-west-1.amazonaws.com"
},
"fips-us-west-2" : {
"credentialScope" : {
"region" : "us-west-2"
},
"hostname" : "fms-fips.us-west-2.amazonaws.com"
},
"sa-east-1" : { },
"us-east-1" : { },
"us-east-2" : { },
@ -1713,7 +1864,10 @@
"endpoints" : {
"ap-northeast-1" : { },
"ap-northeast-2" : { },
"ap-south-1" : { },
"ap-southeast-1" : { },
"ap-southeast-2" : { },
"eu-central-1" : { },
"eu-west-1" : { },
"us-east-1" : { },
"us-east-2" : { },
@ -1723,7 +1877,11 @@
"forecastquery" : {
"endpoints" : {
"ap-northeast-1" : { },
"ap-northeast-2" : { },
"ap-south-1" : { },
"ap-southeast-1" : { },
"ap-southeast-2" : { },
"eu-central-1" : { },
"eu-west-1" : { },
"us-east-1" : { },
"us-east-2" : { },
@ -1780,6 +1938,36 @@
"eu-west-1" : { },
"eu-west-2" : { },
"eu-west-3" : { },
"fips-ca-central-1" : {
"credentialScope" : {
"region" : "ca-central-1"
},
"hostname" : "glacier-fips.ca-central-1.amazonaws.com"
},
"fips-us-east-1" : {
"credentialScope" : {
"region" : "us-east-1"
},
"hostname" : "glacier-fips.us-east-1.amazonaws.com"
},
"fips-us-east-2" : {
"credentialScope" : {
"region" : "us-east-2"
},
"hostname" : "glacier-fips.us-east-2.amazonaws.com"
},
"fips-us-west-1" : {
"credentialScope" : {
"region" : "us-west-1"
},
"hostname" : "glacier-fips.us-west-1.amazonaws.com"
},
"fips-us-west-2" : {
"credentialScope" : {
"region" : "us-west-2"
},
"hostname" : "glacier-fips.us-west-2.amazonaws.com"
},
"me-south-1" : { },
"sa-east-1" : { },
"us-east-1" : { },
@ -2300,7 +2488,9 @@
},
"managedblockchain" : {
"endpoints" : {
"ap-northeast-1" : { },
"ap-southeast-1" : { },
"eu-west-1" : { },
"us-east-1" : { }
}
},
@ -3610,6 +3800,30 @@
"eu-west-1" : { },
"eu-west-2" : { },
"eu-west-3" : { },
"fips-us-east-1" : {
"credentialScope" : {
"region" : "us-east-1"
},
"hostname" : "sms-fips.us-east-1.amazonaws.com"
},
"fips-us-east-2" : {
"credentialScope" : {
"region" : "us-east-2"
},
"hostname" : "sms-fips.us-east-2.amazonaws.com"
},
"fips-us-west-1" : {
"credentialScope" : {
"region" : "us-west-1"
},
"hostname" : "sms-fips.us-west-1.amazonaws.com"
},
"fips-us-west-2" : {
"credentialScope" : {
"region" : "us-west-2"
},
"hostname" : "sms-fips.us-west-2.amazonaws.com"
},
"me-south-1" : { },
"sa-east-1" : { },
"us-east-1" : { },
@ -4442,6 +4656,12 @@
"cn-northwest-1" : { }
}
},
"iotsecuredtunneling" : {
"endpoints" : {
"cn-north-1" : { },
"cn-northwest-1" : { }
}
},
"kinesis" : {
"endpoints" : {
"cn-north-1" : { },
@ -4784,6 +5004,18 @@
},
"athena" : {
"endpoints" : {
"fips-us-gov-east-1" : {
"credentialScope" : {
"region" : "us-gov-east-1"
},
"hostname" : "athena-fips.us-gov-east-1.amazonaws.com"
},
"fips-us-gov-west-1" : {
"credentialScope" : {
"region" : "us-gov-west-1"
},
"hostname" : "athena-fips.us-gov-west-1.amazonaws.com"
},
"us-gov-east-1" : { },
"us-gov-west-1" : { }
}
@ -4930,6 +5162,12 @@
},
"dms" : {
"endpoints" : {
"dms-fips" : {
"credentialScope" : {
"region" : "us-gov-west-1"
},
"hostname" : "dms.us-gov-west-1.amazonaws.com"
},
"us-gov-east-1" : { },
"us-gov-west-1" : { }
}
@ -5036,8 +5274,17 @@
},
"glacier" : {
"endpoints" : {
"us-gov-east-1" : { },
"us-gov-east-1" : {
"credentialScope" : {
"region" : "us-gov-east-1"
},
"hostname" : "glacier.us-gov-east-1.amazonaws.com"
},
"us-gov-west-1" : {
"credentialScope" : {
"region" : "us-gov-west-1"
},
"hostname" : "glacier.us-gov-west-1.amazonaws.com",
"protocols" : [ "http", "https" ]
}
}
@ -5360,6 +5607,18 @@
},
"sms" : {
"endpoints" : {
"fips-us-gov-east-1" : {
"credentialScope" : {
"region" : "us-gov-east-1"
},
"hostname" : "sms-fips.us-gov-east-1.amazonaws.com"
},
"fips-us-gov-west-1" : {
"credentialScope" : {
"region" : "us-gov-west-1"
},
"hostname" : "sms-fips.us-gov-west-1.amazonaws.com"
},
"us-gov-east-1" : { },
"us-gov-west-1" : { }
}
@ -5580,6 +5839,12 @@
},
"dms" : {
"endpoints" : {
"dms-fips" : {
"credentialScope" : {
"region" : "us-iso-east-1"
},
"hostname" : "dms.us-iso-east-1.c2s.ic.gov"
},
"us-iso-east-1" : { }
}
},
@ -5840,6 +6105,12 @@
},
"dms" : {
"endpoints" : {
"dms-fips" : {
"credentialScope" : {
"region" : "us-isob-east-1"
},
"hostname" : "dms.us-isob-east-1.sc2s.sgov.gov"
},
"us-isob-east-1" : { }
}
},

View file

@ -28,7 +28,7 @@
"errors": [
{
"shape": "AddFlowOutputs420Exception",
"documentation": "AWS Elemental MediaConnect can't complete this request because this flow already has the maximum number of allowed outputs (20). For more information, contact AWS Customer Support."
"documentation": "AWS Elemental MediaConnect can't complete this request because this flow already has the maximum number of allowed outputs (50). For more information, contact AWS Customer Support."
},
{
"shape": "BadRequestException",
@ -55,7 +55,49 @@
"documentation": "You have exceeded the service request rate limit for your AWS Elemental MediaConnect account."
}
],
"documentation": "Adds outputs to an existing flow. You can create up to 20 outputs per flow."
"documentation": "Adds outputs to an existing flow. You can create up to 50 outputs per flow."
},
"AddFlowSources": {
"name": "AddFlowSources",
"http": {
"method": "POST",
"requestUri": "/v1/flows/{flowArn}/source",
"responseCode": 201
},
"input": {
"shape": "AddFlowSourcesRequest"
},
"output": {
"shape": "AddFlowSourcesResponse",
"documentation": "AWS Elemental MediaConnect added sources to the flow successfully."
},
"errors": [
{
"shape": "BadRequestException",
"documentation": "The request that you submitted is not valid."
},
{
"shape": "InternalServerErrorException",
"documentation": "AWS Elemental MediaConnect can't fulfill your request because it encountered an unexpected condition."
},
{
"shape": "ForbiddenException",
"documentation": "You don't have the required permissions to perform this operation."
},
{
"shape": "NotFoundException",
"documentation": "AWS Elemental MediaConnect did not find the resource that you specified in the request."
},
{
"shape": "ServiceUnavailableException",
"documentation": "AWS Elemental MediaConnect is currently unavailable. Try again later."
},
{
"shape": "TooManyRequestsException",
"documentation": "You have exceeded the service request rate limit for your AWS Elemental MediaConnect account."
}
],
"documentation": "Adds Sources to flow"
},
"CreateFlow": {
"name": "CreateFlow",
@ -97,7 +139,7 @@
"documentation": "You have exceeded the service request rate limit for your AWS Elemental MediaConnect account."
}
],
"documentation": "Creates a new flow. The request must include one source. The request optionally can include outputs (up to 20) and entitlements (up to 50)."
"documentation": "Creates a new flow. The request must include one source. The request optionally can include outputs (up to 50) and entitlements (up to 50)."
},
"DeleteFlow": {
"name": "DeleteFlow",
@ -369,6 +411,48 @@
],
"documentation": "Removes an output from an existing flow. This request can be made only on an output that does not have an entitlement associated with it. If the output has an entitlement, you must revoke the entitlement instead. When an entitlement is revoked from a flow, the service automatically removes the associated output."
},
"RemoveFlowSource": {
"name": "RemoveFlowSource",
"http": {
"method": "DELETE",
"requestUri": "/v1/flows/{flowArn}/source/{sourceArn}",
"responseCode": 202
},
"input": {
"shape": "RemoveFlowSourceRequest"
},
"output": {
"shape": "RemoveFlowSourceResponse",
"documentation": "source successfully removed from flow configuration."
},
"errors": [
{
"shape": "BadRequestException",
"documentation": "The request that you submitted is not valid."
},
{
"shape": "InternalServerErrorException",
"documentation": "AWS Elemental MediaConnect can't fulfill your request because it encountered an unexpected condition."
},
{
"shape": "ForbiddenException",
"documentation": "You don't have the required permissions to perform this operation."
},
{
"shape": "NotFoundException",
"documentation": "AWS Elemental MediaConnect did not find the resource that you specified in the request."
},
{
"shape": "ServiceUnavailableException",
"documentation": "AWS Elemental MediaConnect is currently unavailable. Try again later."
},
{
"shape": "TooManyRequestsException",
"documentation": "You have exceeded the service request rate limit for your AWS Elemental MediaConnect account."
}
],
"documentation": "Removes a source from an existing flow. This request can be made only if there is more than one source on the flow."
},
"RevokeFlowEntitlement": {
"name": "RevokeFlowEntitlement",
"http": {
@ -547,6 +631,48 @@
],
"documentation": "Deletes specified tags from a resource."
},
"UpdateFlow": {
"name": "UpdateFlow",
"http": {
"method": "PUT",
"requestUri": "/v1/flows/{flowArn}",
"responseCode": 202
},
"input": {
"shape": "UpdateFlowRequest"
},
"output": {
"shape": "UpdateFlowResponse",
"documentation": "AWS Elemental MediaConnect updated the flow successfully."
},
"errors": [
{
"shape": "BadRequestException",
"documentation": "The request that you submitted is not valid."
},
{
"shape": "InternalServerErrorException",
"documentation": "AWS Elemental MediaConnect can't fulfill your request because it encountered an unexpected condition."
},
{
"shape": "ForbiddenException",
"documentation": "You don't have the required permissions to perform this operation."
},
{
"shape": "NotFoundException",
"documentation": "AWS Elemental MediaConnect did not find the resource that you specified in the request."
},
{
"shape": "ServiceUnavailableException",
"documentation": "AWS Elemental MediaConnect is currently unavailable. Try again later."
},
{
"shape": "TooManyRequestsException",
"documentation": "You have exceeded the service request rate limit for your AWS Elemental MediaConnect account."
}
],
"documentation": "Updates flow"
},
"UpdateFlowEntitlement": {
"name": "UpdateFlowEntitlement",
"http": {
@ -729,6 +855,42 @@
}
}
},
"AddFlowSourcesRequest": {
"type": "structure",
"members": {
"FlowArn": {
"shape": "__string",
"location": "uri",
"locationName": "flowArn",
"documentation": "The flow that you want to mutate."
},
"Sources": {
"shape": "__listOfSetSourceRequest",
"locationName": "sources",
"documentation": "A list of sources that you want to add."
}
},
"documentation": "A request to add sources to the flow.",
"required": [
"FlowArn",
"Sources"
]
},
"AddFlowSourcesResponse": {
"type": "structure",
"members": {
"FlowArn": {
"shape": "__string",
"locationName": "flowArn",
"documentation": "The ARN of the flow that these sources were added to."
},
"Sources": {
"shape": "__listOfSource",
"locationName": "sources",
"documentation": "The details of the newly added sources."
}
}
},
"AddOutputRequest": {
"type": "structure",
"members": {
@ -863,11 +1025,18 @@
"Source": {
"shape": "SetSourceRequest",
"locationName": "source"
},
"SourceFailoverConfig": {
"shape": "FailoverConfig",
"locationName": "sourceFailoverConfig"
},
"Sources": {
"shape": "__listOfSetSourceRequest",
"locationName": "sources"
}
},
"documentation": "Creates a new flow. The request must include one source. The request optionally can include outputs (up to 20) and entitlements (up to 50).",
"documentation": "Creates a new flow. The request must include one source. The request optionally can include outputs (up to 50) and entitlements (up to 50).",
"required": [
"Source",
"Name"
]
},
@ -1032,6 +1201,21 @@
"Name"
]
},
"FailoverConfig": {
"type": "structure",
"members": {
"RecoveryWindow": {
"shape": "__integer",
"locationName": "recoveryWindow",
"documentation": "Search window time to look for dash-7 packets"
},
"State": {
"shape": "State",
"locationName": "state"
}
},
"documentation": "The settings for source failover"
},
"Flow": {
"type": "structure",
"members": {
@ -1074,6 +1258,14 @@
"shape": "Source",
"locationName": "source"
},
"SourceFailoverConfig": {
"shape": "FailoverConfig",
"locationName": "sourceFailoverConfig"
},
"Sources": {
"shape": "__listOfSource",
"locationName": "sources"
},
"Status": {
"shape": "Status",
"locationName": "status",
@ -1522,6 +1714,42 @@
}
}
},
"RemoveFlowSourceRequest": {
"type": "structure",
"members": {
"FlowArn": {
"shape": "__string",
"location": "uri",
"locationName": "flowArn",
"documentation": "The flow that you want to remove a source from."
},
"SourceArn": {
"shape": "__string",
"location": "uri",
"locationName": "sourceArn",
"documentation": "The ARN of the source that you want to remove."
}
},
"required": [
"FlowArn",
"SourceArn"
]
},
"RemoveFlowSourceResponse": {
"type": "structure",
"members": {
"FlowArn": {
"shape": "__string",
"locationName": "flowArn",
"documentation": "The ARN of the flow that is associated with the source you removed."
},
"SourceArn": {
"shape": "__string",
"locationName": "sourceArn",
"documentation": "The ARN of the source that was removed."
}
}
},
"ResponseError": {
"type": "structure",
"members": {
@ -1742,6 +1970,13 @@
}
}
},
"State": {
"type": "string",
"enum": [
"ENABLED",
"DISABLED"
]
},
"Status": {
"type": "string",
"enum": [
@ -1938,6 +2173,21 @@
},
"documentation": "Information about the encryption of the flow."
},
"UpdateFailoverConfig": {
"type": "structure",
"members": {
"RecoveryWindow": {
"shape": "__integer",
"locationName": "recoveryWindow",
"documentation": "Recovery window time to look for dash-7 packets"
},
"State": {
"shape": "State",
"locationName": "state"
}
},
"documentation": "The settings for source failover"
},
"UpdateFlowEntitlementRequest": {
"type": "structure",
"members": {
@ -2075,6 +2325,34 @@
}
}
},
"UpdateFlowRequest": {
"type": "structure",
"members": {
"FlowArn": {
"shape": "__string",
"location": "uri",
"locationName": "flowArn",
"documentation": "The flow that you want to update."
},
"SourceFailoverConfig": {
"shape": "UpdateFailoverConfig",
"locationName": "sourceFailoverConfig"
}
},
"documentation": "A request to update flow.",
"required": [
"FlowArn"
]
},
"UpdateFlowResponse": {
"type": "structure",
"members": {
"Flow": {
"shape": "Flow",
"locationName": "flow"
}
}
},
"UpdateFlowSourceRequest": {
"type": "structure",
"members": {
@ -2202,6 +2480,24 @@
"shape": "Output"
}
},
"__listOfSetSourceRequest": {
"type": "list",
"member": {
"shape": "SetSourceRequest"
}
},
"__listOfSource": {
"type": "list",
"member": {
"shape": "Source"
}
},
"__listOf__integer": {
"type": "list",
"member": {
"shape": "__integer"
}
},
"__listOf__string": {
"type": "list",
"member": {

View file

@ -1672,6 +1672,131 @@
"USE_CONFIGURED"
]
},
"Av1AdaptiveQuantization": {
"type": "string",
"documentation": "Adaptive quantization. Allows intra-frame quantizers to vary to improve visual quality.",
"enum": [
"OFF",
"LOW",
"MEDIUM",
"HIGH",
"HIGHER",
"MAX"
]
},
"Av1FramerateControl": {
"type": "string",
"documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.",
"enum": [
"INITIALIZE_FROM_SOURCE",
"SPECIFIED"
]
},
"Av1FramerateConversionAlgorithm": {
"type": "string",
"documentation": "When set to INTERPOLATE, produces smoother motion during frame rate conversion.",
"enum": [
"DUPLICATE_DROP",
"INTERPOLATE"
]
},
"Av1QvbrSettings": {
"type": "structure",
"members": {
"QvbrQualityLevel": {
"shape": "__integerMin1Max10",
"locationName": "qvbrQualityLevel",
"documentation": "Required when you use QVBR rate control mode. That is, when you specify qvbrSettings within av1Settings. Specify the general target quality level for this output, from 1 to 10. Use higher numbers for greater quality. Level 10 results in nearly lossless compression. The quality level for most broadcast-quality transcodes is between 6 and 9. Optionally, to specify a value between whole numbers, also provide a value for the setting qvbrQualityLevelFineTune. For example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33."
},
"QvbrQualityLevelFineTune": {
"shape": "__doubleMin0Max1",
"locationName": "qvbrQualityLevelFineTune",
"documentation": "Optional. Specify a value here to set the QVBR quality to a level that is between whole numbers. For example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33. MediaConvert rounds your QVBR quality level to the nearest third of a whole number. For example, if you set qvbrQualityLevel to 7 and you set qvbrQualityLevelFineTune to .25, your actual QVBR quality level is 7.33."
}
},
"documentation": "Settings for quality-defined variable bitrate encoding with the AV1 codec. Required when you set Rate control mode to QVBR. Not valid when you set Rate control mode to a value other than QVBR, or when you don't define Rate control mode."
},
"Av1RateControlMode": {
"type": "string",
"documentation": "'With AV1 outputs, for rate control mode, MediaConvert supports only quality-defined variable bitrate (QVBR). You can''t use CBR or VBR.'",
"enum": [
"QVBR"
]
},
"Av1Settings": {
"type": "structure",
"members": {
"AdaptiveQuantization": {
"shape": "Av1AdaptiveQuantization",
"locationName": "adaptiveQuantization",
"documentation": "Adaptive quantization. Allows intra-frame quantizers to vary to improve visual quality."
},
"FramerateControl": {
"shape": "Av1FramerateControl",
"locationName": "framerateControl",
"documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator."
},
"FramerateConversionAlgorithm": {
"shape": "Av1FramerateConversionAlgorithm",
"locationName": "framerateConversionAlgorithm",
"documentation": "When set to INTERPOLATE, produces smoother motion during frame rate conversion."
},
"FramerateDenominator": {
"shape": "__integerMin1Max2147483647",
"locationName": "framerateDenominator",
"documentation": "When you use the API for transcode jobs that use frame rate conversion, specify the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use FramerateDenominator to specify the denominator of this fraction. In this example, use 1001 for the value of FramerateDenominator. When you use the console for transcode jobs that use frame rate conversion, provide the value as a decimal number for Framerate. In this example, specify 23.976."
},
"FramerateNumerator": {
"shape": "__integerMin1Max2147483647",
"locationName": "framerateNumerator",
"documentation": "When you use the API for transcode jobs that use frame rate conversion, specify the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use FramerateNumerator to specify the numerator of this fraction. In this example, use 24000 for the value of FramerateNumerator. When you use the console for transcode jobs that use frame rate conversion, provide the value as a decimal number for Framerate. In this example, specify 23.976."
},
"GopSize": {
"shape": "__doubleMin0",
"locationName": "gopSize",
"documentation": "Specify the GOP length (keyframe interval) in frames. With AV1, MediaConvert doesn't support GOP length in seconds. This value must be greater than zero and preferably equal to 1 + ((numberBFrames + 1) * x), where x is an integer value."
},
"MaxBitrate": {
"shape": "__integerMin1000Max1152000000",
"locationName": "maxBitrate",
"documentation": "Maximum bitrate in bits/second. For example, enter five megabits per second as 5000000. Required when Rate control mode is QVBR."
},
"NumberBFramesBetweenReferenceFrames": {
"shape": "__integerMin7Max15",
"locationName": "numberBFramesBetweenReferenceFrames",
"documentation": "Specify the number of B-frames. With AV1, MediaConvert supports only 7 or 15."
},
"QvbrSettings": {
"shape": "Av1QvbrSettings",
"locationName": "qvbrSettings",
"documentation": "Settings for quality-defined variable bitrate encoding with the AV1 codec. Required when you set Rate control mode to QVBR. Not valid when you set Rate control mode to a value other than QVBR, or when you don't define Rate control mode."
},
"RateControlMode": {
"shape": "Av1RateControlMode",
"locationName": "rateControlMode",
"documentation": "'With AV1 outputs, for rate control mode, MediaConvert supports only quality-defined variable bitrate (QVBR). You can''t use CBR or VBR.'"
},
"Slices": {
"shape": "__integerMin1Max32",
"locationName": "slices",
"documentation": "Specify the number of slices per picture. This value must be 1, 2, 4, 8, 16, or 32. For progressive pictures, this value must be less than or equal to the number of macroblock rows. For interlaced pictures, this value must be less than or equal to half the number of macroblock rows."
},
"SpatialAdaptiveQuantization": {
"shape": "Av1SpatialAdaptiveQuantization",
"locationName": "spatialAdaptiveQuantization",
"documentation": "Adjust quantization within each frame based on spatial variation of content complexity."
}
},
"documentation": "Required when you set Codec, under VideoDescription>CodecSettings to the value AV1."
},
"Av1SpatialAdaptiveQuantization": {
"type": "string",
"documentation": "Adjust quantization within each frame based on spatial variation of content complexity.",
"enum": [
"DISABLED",
"ENABLED"
]
},
"AvailBlanking": {
"type": "structure",
"members": {
@ -2387,7 +2512,7 @@
"ColorSpaceConversion": {
"shape": "ColorSpaceConversion",
"locationName": "colorSpaceConversion",
"documentation": "Specify the color space you want for this output. The service supports conversion between HDR formats, between SDR formats, and from SDR to HDR. The service doesn't support conversion from HDR to SDR. SDR to HDR conversion doesn't upgrade the dynamic range. The converted video has an HDR format, but visually appears the same as an unconverted output."
"documentation": "Specify the color space you want for this output. The service supports conversion between HDR formats, between SDR formats, from SDR to HDR, and from HDR to SDR. SDR to HDR conversion doesn't upgrade the dynamic range. The converted video has an HDR format, but visually appears the same as an unconverted output. HDR to SDR conversion uses Elemental tone mapping technology to approximate the outcome of manually regrading from HDR to SDR."
},
"Contrast": {
"shape": "__integerMin1Max100",
@ -2433,7 +2558,7 @@
},
"ColorSpaceConversion": {
"type": "string",
"documentation": "Specify the color space you want for this output. The service supports conversion between HDR formats, between SDR formats, and from SDR to HDR. The service doesn't support conversion from HDR to SDR. SDR to HDR conversion doesn't upgrade the dynamic range. The converted video has an HDR format, but visually appears the same as an unconverted output.",
"documentation": "Specify the color space you want for this output. The service supports conversion between HDR formats, between SDR formats, from SDR to HDR, and from HDR to SDR. SDR to HDR conversion doesn't upgrade the dynamic range. The converted video has an HDR format, but visually appears the same as an unconverted output. HDR to SDR conversion uses Elemental tone mapping technology to approximate the outcome of manually regrading from HDR to SDR.",
"enum": [
"NONE",
"FORCE_601",
@ -9094,6 +9219,7 @@
"documentation": "Type of video codec",
"enum": [
"FRAME_CAPTURE",
"AV1",
"H_264",
"H_265",
"MPEG2",
@ -9103,6 +9229,11 @@
"VideoCodecSettings": {
"type": "structure",
"members": {
"Av1Settings": {
"shape": "Av1Settings",
"locationName": "av1Settings",
"documentation": "Required when you set Codec, under VideoDescription>CodecSettings to the value AV1."
},
"Codec": {
"shape": "VideoCodec",
"locationName": "codec",
@ -9134,7 +9265,7 @@
"documentation": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value PRORES."
}
},
"documentation": "Video codec settings, (CodecSettings) under (VideoDescription), contains the group of settings related to video encoding. The settings in this group vary depending on the value that you choose for Video codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * FRAME_CAPTURE, FrameCaptureSettings * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings"
"documentation": "Video codec settings, (CodecSettings) under (VideoDescription), contains the group of settings related to video encoding. The settings in this group vary depending on the value that you choose for Video codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * FRAME_CAPTURE, FrameCaptureSettings * AV1, Av1Settings * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings"
},
"VideoDescription": {
"type": "structure",
@ -9152,7 +9283,7 @@
"CodecSettings": {
"shape": "VideoCodecSettings",
"locationName": "codecSettings",
"documentation": "Video codec settings, (CodecSettings) under (VideoDescription), contains the group of settings related to video encoding. The settings in this group vary depending on the value that you choose for Video codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * FRAME_CAPTURE, FrameCaptureSettings * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings"
"documentation": "Video codec settings, (CodecSettings) under (VideoDescription), contains the group of settings related to video encoding. The settings in this group vary depending on the value that you choose for Video codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * FRAME_CAPTURE, FrameCaptureSettings * AV1, Av1Settings * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings"
},
"ColorMetadata": {
"shape": "ColorMetadata",
@ -9697,6 +9828,11 @@
"min": 64000,
"max": 640000
},
"__integerMin7Max15": {
"type": "integer",
"min": 7,
"max": 15
},
"__integerMin8000Max192000": {
"type": "integer",
"min": 8000,

View file

@ -143,14 +143,14 @@
},
"AvailabilityZone":{
"type":"string",
"documentation":"<p>The Availability Zone.</p>",
"documentation":"<p>The Availability Zone.</p> <p>You must specify <code>AvailabilityZone</code> or <code>AvailabilityZoneId</code>.</p>",
"max":1000,
"min":1,
"pattern":"[a-z\\d-]+"
},
"AvailabilityZoneId":{
"type":"string",
"documentation":"<p>The ID of the Availability Zone.</p>",
"documentation":"<p>The ID of the Availability Zone.</p> <p>You must specify <code>AvailabilityZone</code> or <code>AvailabilityZoneId</code>.</p>",
"max":255,
"min":1,
"pattern":"[a-z]+[0-9]+-az[0-9]+"

View file

@ -816,6 +816,10 @@
"failureReason":{
"shape":"FailureReason",
"documentation":"<p>If the batch inference job failed, the reason for the failure.</p>"
},
"solutionVersionArn":{
"shape":"Arn",
"documentation":"<p>The ARN of the solution version used by the batch inference job.</p>"
}
},
"documentation":"<p>A truncated version of the <a>BatchInferenceJob</a> datatype. The <a>ListBatchInferenceJobs</a> operation returns a list of batch inference job summaries.</p>"
@ -2130,7 +2134,7 @@
"members":{
"type":{
"shape":"HPOObjectiveType",
"documentation":"<p>The data type of the metric.</p>"
"documentation":"<p>The type of the metric. Valid values are <code>Maximize</code> and <code>Minimize</code>.</p>"
},
"metricName":{
"shape":"MetricName",
@ -2871,6 +2875,10 @@
"shape":"TrainingMode",
"documentation":"<p>The scope of training used to create the solution version. The <code>FULL</code> option trains the solution version based on the entirety of the input solution's training data, while the <code>UPDATE</code> option processes only the training data that has changed since the creation of the last solution version. Choose <code>UPDATE</code> when you want to start recommending items added to the dataset without retraining the model.</p> <important> <p>The <code>UPDATE</code> option can only be used after you've created a solution version with the <code>FULL</code> option and the training solution uses the <a>native-recipe-hrnn-coldstart</a>.</p> </important>"
},
"tunedHPOParams":{
"shape":"TunedHPOParams",
"documentation":"<p>If hyperparameter optimization was performed, contains the hyperparameter values of the best performing model.</p>"
},
"status":{
"shape":"Status",
"documentation":"<p>The status of the solution version.</p> <p>A solution version can be in one of the following states:</p> <ul> <li> <p>CREATE PENDING</p> </li> <li> <p>CREATE IN_PROGRESS</p> </li> <li> <p>ACTIVE</p> </li> <li> <p>CREATE FAILED</p> </li> </ul>"
@ -2954,6 +2962,16 @@
"min":1
},
"Tunable":{"type":"boolean"},
"TunedHPOParams":{
"type":"structure",
"members":{
"algorithmHyperParameters":{
"shape":"HyperParameters",
"documentation":"<p>A list of the hyperparameter values of the best performing model.</p>"
}
},
"documentation":"<p>If hyperparameter optimization (HPO) was performed, contains the hyperparameter values of the best performing model.</p>"
},
"UpdateCampaignRequest":{
"type":"structure",
"required":["campaignArn"],

View file

@ -7389,7 +7389,7 @@
"documentation":"<p> An optional pagination token provided by a previous <code>DescribeExportTasks</code> request. If you specify this parameter, the response includes only records beyond the marker, up to the value specified by the <code>MaxRecords</code> parameter. </p>"
},
"MaxRecords":{
"shape":"String",
"shape":"MaxRecords",
"documentation":"<p> The maximum number of records to include in the response. If more records exist than the specified value, a pagination token called a marker is included in the response. You can use the marker in a later <code>DescribeExportTasks</code> request to retrieve the remaining results. </p> <p>Default: 100</p> <p>Constraints: Minimum 20, maximum 100.</p>"
}
}

View file

@ -62,6 +62,21 @@
"input":{"shape":"DeleteAccessPointPolicyRequest"},
"documentation":"<p>Deletes the access point policy for the specified access point.</p>"
},
"DeleteJobTagging":{
"name":"DeleteJobTagging",
"http":{
"method":"DELETE",
"requestUri":"/v20180820/jobs/{id}/tagging"
},
"input":{"shape":"DeleteJobTaggingRequest"},
"output":{"shape":"DeleteJobTaggingResult"},
"errors":[
{"shape":"InternalServiceException"},
{"shape":"TooManyRequestsException"},
{"shape":"NotFoundException"}
],
"documentation":"<p>Delete the tags on a Amazon S3 batch operations job, if any.</p>"
},
"DeletePublicAccessBlock":{
"name":"DeletePublicAccessBlock",
"http":{
@ -117,6 +132,21 @@
"output":{"shape":"GetAccessPointPolicyStatusResult"},
"documentation":"<p>Indicates whether the specified access point currently has a policy that allows public access. For more information about public access through access points, see <a href=\"https://docs.aws.amazon.com/AmazonS3/latest/dev/access-points.html\">Managing Data Access with Amazon S3 Access Points</a> in the <i>Amazon Simple Storage Service Developer Guide</i>.</p>"
},
"GetJobTagging":{
"name":"GetJobTagging",
"http":{
"method":"GET",
"requestUri":"/v20180820/jobs/{id}/tagging"
},
"input":{"shape":"GetJobTaggingRequest"},
"output":{"shape":"GetJobTaggingResult"},
"errors":[
{"shape":"InternalServiceException"},
{"shape":"TooManyRequestsException"},
{"shape":"NotFoundException"}
],
"documentation":"<p>Retrieve the tags on a Amazon S3 batch operations job.</p>"
},
"GetPublicAccessBlock":{
"name":"GetPublicAccessBlock",
"http":{
@ -168,6 +198,26 @@
},
"documentation":"<p>Associates an access policy with the specified access point. Each access point can have only one policy, so a request made to this API replaces any existing policy associated with the specified access point.</p>"
},
"PutJobTagging":{
"name":"PutJobTagging",
"http":{
"method":"PUT",
"requestUri":"/v20180820/jobs/{id}/tagging"
},
"input":{
"shape":"PutJobTaggingRequest",
"locationName":"PutJobTaggingRequest",
"xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"}
},
"output":{"shape":"PutJobTaggingResult"},
"errors":[
{"shape":"InternalServiceException"},
{"shape":"TooManyRequestsException"},
{"shape":"NotFoundException"},
{"shape":"TooManyTagsException"}
],
"documentation":"<p>Replace the set of tags on a Amazon S3 batch operations job.</p>"
},
"PutPublicAccessBlock":{
"name":"PutPublicAccessBlock",
"http":{
@ -353,6 +403,10 @@
"RoleArn":{
"shape":"IAMRoleArn",
"documentation":"<p>The Amazon Resource Name (ARN) for the Identity and Access Management (IAM) Role that batch operations will use to execute this job's operation on each object in the manifest.</p>"
},
"Tags":{
"shape":"S3TagSet",
"documentation":"<p>An optional set of tags to associate with the job when it is created.</p>"
}
}
},
@ -408,6 +462,32 @@
}
}
},
"DeleteJobTaggingRequest":{
"type":"structure",
"required":[
"AccountId",
"JobId"
],
"members":{
"AccountId":{
"shape":"AccountId",
"documentation":"<p>The account ID for the Amazon Web Services account associated with the Amazon S3 batch operations job you want to remove tags from.</p>",
"location":"header",
"locationName":"x-amz-account-id"
},
"JobId":{
"shape":"JobId",
"documentation":"<p>The ID for the job whose tags you want to delete.</p>",
"location":"uri",
"locationName":"id"
}
}
},
"DeleteJobTaggingResult":{
"type":"structure",
"members":{
}
},
"DeletePublicAccessBlockRequest":{
"type":"structure",
"required":["AccountId"],
@ -562,6 +642,36 @@
}
}
},
"GetJobTaggingRequest":{
"type":"structure",
"required":[
"AccountId",
"JobId"
],
"members":{
"AccountId":{
"shape":"AccountId",
"documentation":"<p>The account ID for the Amazon Web Services account associated with the Amazon S3 batch operations job you want to retrieve tags for.</p>",
"location":"header",
"locationName":"x-amz-account-id"
},
"JobId":{
"shape":"JobId",
"documentation":"<p>The ID for the job whose tags you want to retrieve.</p>",
"location":"uri",
"locationName":"id"
}
}
},
"GetJobTaggingResult":{
"type":"structure",
"members":{
"Tags":{
"shape":"S3TagSet",
"documentation":"<p>The set of tags associated with the job.</p>"
}
}
},
"GetPublicAccessBlockOutput":{
"type":"structure",
"members":{
@ -1245,6 +1355,37 @@
}
}
},
"PutJobTaggingRequest":{
"type":"structure",
"required":[
"AccountId",
"JobId",
"Tags"
],
"members":{
"AccountId":{
"shape":"AccountId",
"documentation":"<p>The account ID for the Amazon Web Services account associated with the Amazon S3 batch operations job you want to replace tags on.</p>",
"location":"header",
"locationName":"x-amz-account-id"
},
"JobId":{
"shape":"JobId",
"documentation":"<p>The ID for the job whose tags you want to replace.</p>",
"location":"uri",
"locationName":"id"
},
"Tags":{
"shape":"S3TagSet",
"documentation":"<p>The set of tags to associate with the job.</p>"
}
}
},
"PutJobTaggingResult":{
"type":"structure",
"members":{
}
},
"PutPublicAccessBlockRequest":{
"type":"structure",
"required":[
@ -1661,6 +1802,13 @@
"documentation":"<p/>",
"exception":true
},
"TooManyTagsException":{
"type":"structure",
"members":{
"Message":{"shape":"ExceptionMessage"}
},
"exception":true
},
"UpdateJobPriorityRequest":{
"type":"structure",
"required":[

View file

@ -1524,6 +1524,14 @@
"Owner":{
"shape":"AccountId",
"documentation":"<p>The owner of the constraint.</p>"
},
"ProductId":{
"shape":"Id",
"documentation":"<p>The identifier of the product the constraint applies to. Note that a constraint applies to a specific instance of a product within a certain portfolio.</p>"
},
"PortfolioId":{
"shape":"Id",
"documentation":"<p>The identifier of the portfolio the product resides in. The constraint applies only to the instance of the product that lives within this portfolio.</p>"
}
},
"documentation":"<p>Information about a constraint.</p>"

View file

@ -5533,7 +5533,7 @@
},
"NextToken":{
"shape":"NextToken",
"documentation":"<p>The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.</p>"
"documentation":"<p>The token to use when requesting the next set of items.</p>"
}
}
},
@ -11300,8 +11300,8 @@
"box":true
},
"ApproveUntilDate":{
"shape":"PatchStringDate",
"documentation":"<p>The cutoff date for auto approval of released patches. Any patches released on or before this date will be installed automatically</p>",
"shape":"PatchStringDateTime",
"documentation":"<p>Example API</p>",
"box":true
},
"EnableNonSecurity":{
@ -11405,11 +11405,10 @@
},
"documentation":"<p>Information about the approval status of a patch.</p>"
},
"PatchStringDate":{
"PatchStringDateTime":{
"type":"string",
"max":10,
"min":1,
"pattern":"^(\\d{4}-(0[1-9]|1[0-2])-(0[1-9]|[12]\\d|3[01]))$"
"min":1
},
"PatchTitle":{"type":"string"},
"PatchUnreportedNotApplicableCount":{"type":"integer"},
@ -11969,6 +11968,21 @@
"exception":true
},
"ResourceDataSyncCreatedTime":{"type":"timestamp"},
"ResourceDataSyncDestinationDataSharing":{
"type":"structure",
"members":{
"DestinationDataSharingType":{
"shape":"ResourceDataSyncDestinationDataSharingType",
"documentation":"<p>The sharing data type. Only <code>Organization</code> is supported.</p>"
}
},
"documentation":"<p>Synchronize Systems Manager Inventory data from multiple AWS accounts defined in AWS Organizations to a centralized Amazon S3 bucket. Data is synchronized to individual key prefixes in the central bucket. Each key prefix represents a different AWS account ID.</p>"
},
"ResourceDataSyncDestinationDataSharingType":{
"type":"string",
"max":64,
"min":1
},
"ResourceDataSyncIncludeFutureRegions":{"type":"boolean"},
"ResourceDataSyncInvalidConfigurationException":{
"type":"structure",
@ -12103,6 +12117,10 @@
"AWSKMSKeyARN":{
"shape":"ResourceDataSyncAWSKMSKeyARN",
"documentation":"<p>The ARN of an encryption key for a destination in Amazon S3. Must belong to the same Region as the destination Amazon S3 bucket.</p>"
},
"DestinationDataSharing":{
"shape":"ResourceDataSyncDestinationDataSharing",
"documentation":"<p>Enables destination data sharing. By default, this field is <code>null</code>.</p>"
}
},
"documentation":"<p>Information about the target Amazon S3 bucket for the Resource Data Sync.</p>"

View file

@ -484,8 +484,7 @@ def parse_get_bucket_location(parsed, http_response, **kwargs):
# The "parsed" passed in only has the ResponseMetadata
# filled out. This handler will fill in the LocationConstraint
# value.
if 'LocationConstraint' in parsed:
# Response already set - a stub?
if http_response.raw is None:
return
response_body = http_response.content
parser = xml.etree.cElementTree.XMLParser(

View file

@ -54,7 +54,7 @@ copyright = u'2013, Mitch Garnaat'
# The short X.Y version.
version = '1.15.'
# The full version, including alpha/beta/rc tags.
release = '1.15.21'
release = '1.15.26'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.

View file

@ -313,3 +313,21 @@ class TestStubber(unittest.TestCase):
actual_response = self.client.list_objects(**expected_params)
self.assertEqual(desired_response, actual_response)
self.stubber.assert_no_pending_responses()
def test_parse_get_bucket_location(self):
error_code = "NoSuchBucket"
error_message = "The specified bucket does not exist"
self.stubber.add_client_error(
'get_bucket_location', error_code, error_message)
self.stubber.activate()
with self.assertRaises(ClientError):
self.client.get_bucket_location(Bucket='foo')
def test_parse_get_bucket_location_returns_response(self):
service_response = {"LocationConstraint": "us-west-2"}
self.stubber.add_response('get_bucket_location',service_response)
self.stubber.activate()
response = self.client.get_bucket_location(Bucket='foo')
self.assertEqual(response, service_response)

View file

@ -921,13 +921,6 @@ class TestHandlers(BaseSessionTest):
self.assertEqual(parsed['CommonPrefixes'][0]['Prefix'],
u'\xe7\xf6s% asd\x08 c')
def test_get_bucket_location_optional(self):
# This handler should no-op if another hook (i.e. stubber) has already
# filled in response
response = {"LocationConstraint": "eu-west-1"}
handlers.parse_get_bucket_location(response, None),
self.assertEqual(response["LocationConstraint"], "eu-west-1")
def test_set_operation_specific_signer_no_auth_type(self):
signing_name = 'myservice'
context = {'auth_type': None}