diff --git a/PKG-INFO b/PKG-INFO index 860194d7..069b1160 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: botocore -Version: 1.10.15 +Version: 1.10.55 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services diff --git a/botocore.egg-info/PKG-INFO b/botocore.egg-info/PKG-INFO index 860194d7..069b1160 100644 --- a/botocore.egg-info/PKG-INFO +++ b/botocore.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: botocore -Version: 1.10.15 +Version: 1.10.55 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services diff --git a/botocore.egg-info/SOURCES.txt b/botocore.egg-info/SOURCES.txt index 65831136..b54d5d6f 100644 --- a/botocore.egg-info/SOURCES.txt +++ b/botocore.egg-info/SOURCES.txt @@ -47,6 +47,7 @@ botocore/data/acm-pca/2017-08-22/service-2.json botocore/data/acm/2015-12-08/examples-1.json botocore/data/acm/2015-12-08/paginators-1.json botocore/data/acm/2015-12-08/service-2.json +botocore/data/acm/2015-12-08/waiters-2.json botocore/data/alexaforbusiness/2017-11-09/paginators-1.json botocore/data/alexaforbusiness/2017-11-09/service-2.json botocore/data/apigateway/2015-07-09/examples-1.json @@ -80,6 +81,8 @@ botocore/data/cloud9/2017-09-23/paginators-1.json botocore/data/cloud9/2017-09-23/service-2.json botocore/data/clouddirectory/2016-05-10/paginators-1.json botocore/data/clouddirectory/2016-05-10/service-2.json +botocore/data/clouddirectory/2017-01-11/paginators-1.json +botocore/data/clouddirectory/2017-01-11/service-2.json botocore/data/cloudformation/2010-05-15/examples-1.json botocore/data/cloudformation/2010-05-15/paginators-1.json botocore/data/cloudformation/2010-05-15/service-2.json @@ -243,6 +246,8 @@ botocore/data/ecs/2014-11-13/waiters-2.json botocore/data/efs/2015-02-01/examples-1.json botocore/data/efs/2015-02-01/paginators-1.json botocore/data/efs/2015-02-01/service-2.json +botocore/data/eks/2017-11-01/paginators-1.json +botocore/data/eks/2017-11-01/service-2.json botocore/data/elasticache/2014-09-30/paginators-1.json botocore/data/elasticache/2014-09-30/service-2.json botocore/data/elasticache/2014-09-30/waiters-2.json @@ -310,6 +315,9 @@ botocore/data/iot-jobs-data/2017-09-29/service-2.json botocore/data/iot/2015-05-28/examples-1.json botocore/data/iot/2015-05-28/paginators-1.json botocore/data/iot/2015-05-28/service-2.json +botocore/data/iot1click-devices/2018-05-14/service-2.json +botocore/data/iot1click-projects/2018-05-14/paginators-1.json +botocore/data/iot1click-projects/2018-05-14/service-2.json botocore/data/iotanalytics/2017-11-27/paginators-1.json botocore/data/iotanalytics/2017-11-27/service-2.json botocore/data/kinesis-video-archived-media/2017-09-30/paginators-1.json @@ -346,6 +354,8 @@ botocore/data/machinelearning/2014-12-12/examples-1.json botocore/data/machinelearning/2014-12-12/paginators-1.json botocore/data/machinelearning/2014-12-12/service-2.json botocore/data/machinelearning/2014-12-12/waiters-2.json +botocore/data/macie/2017-12-19/paginators-1.json +botocore/data/macie/2017-12-19/service-2.json botocore/data/marketplace-entitlement/2017-01-11/paginators-1.json botocore/data/marketplace-entitlement/2017-01-11/service-2.json botocore/data/marketplacecommerceanalytics/2015-07-01/examples-1.json @@ -360,6 +370,8 @@ botocore/data/mediastore-data/2017-09-01/paginators-1.json botocore/data/mediastore-data/2017-09-01/service-2.json botocore/data/mediastore/2017-09-01/paginators-1.json botocore/data/mediastore/2017-09-01/service-2.json +botocore/data/mediatailor/2018-04-23/paginators-1.json +botocore/data/mediatailor/2018-04-23/service-2.json botocore/data/meteringmarketplace/2016-01-14/examples-1.json botocore/data/meteringmarketplace/2016-01-14/service-2.json botocore/data/mgh/2017-05-31/paginators-1.json @@ -369,6 +381,9 @@ botocore/data/mobile/2017-07-01/service-2.json botocore/data/mq/2017-11-27/service-2.json botocore/data/mturk/2017-01-17/paginators-1.json botocore/data/mturk/2017-01-17/service-2.json +botocore/data/neptune/2014-10-31/paginators-1.json +botocore/data/neptune/2014-10-31/service-2.json +botocore/data/neptune/2014-10-31/waiters-2.json botocore/data/opsworks/2013-02-18/examples-1.json botocore/data/opsworks/2013-02-18/paginators-1.json botocore/data/opsworks/2013-02-18/service-2.json @@ -379,6 +394,8 @@ botocore/data/opsworkscm/2016-11-01/service-2.json botocore/data/opsworkscm/2016-11-01/waiters-2.json botocore/data/organizations/2016-11-28/paginators-1.json botocore/data/organizations/2016-11-28/service-2.json +botocore/data/pi/2018-02-27/paginators-1.json +botocore/data/pi/2018-02-27/service-2.json botocore/data/pinpoint/2016-12-01/examples-1.json botocore/data/pinpoint/2016-12-01/service-2.json botocore/data/polly/2016-06-10/examples-1.json diff --git a/botocore/__init__.py b/botocore/__init__.py index 4f2384ed..0720cb70 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re import logging -__version__ = '1.10.15' +__version__ = '1.10.55' class NullHandler(logging.Handler): @@ -29,7 +29,6 @@ log.addHandler(NullHandler()) _first_cap_regex = re.compile('(.)([A-Z][a-z]+)') -_number_cap_regex = re.compile('([a-z])([0-9]+)') _end_cap_regex = re.compile('([a-z0-9])([A-Z])') # The regex below handles the special case where some acryonym # name is pluralized, e.g GatewayARNs, ListWebACLs, SomeCNAMEs. @@ -52,10 +51,6 @@ _xform_cache = { # services which might have a matching argument or operation. This way a # common mis-translation can be fixed without having to call out each # individual case. -_partial_renames = { - 'ipv-6': 'ipv6', - 'ipv_6': 'ipv6', -} ScalarTypes = ('string', 'integer', 'boolean', 'timestamp', 'float', 'double') BOTOCORE_ROOT = os.path.dirname(os.path.abspath(__file__)) @@ -73,8 +68,7 @@ class UNSIGNED(object): UNSIGNED = UNSIGNED() -def xform_name(name, sep='_', _xform_cache=_xform_cache, - partial_renames=_partial_renames): +def xform_name(name, sep='_', _xform_cache=_xform_cache): """Convert camel case to a "pythonic" name. If the name contains the ``sep`` character, then it is @@ -93,12 +87,6 @@ def xform_name(name, sep='_', _xform_cache=_xform_cache, # Replace something like ARNs, ACLs with _arns, _acls. name = name[:-len(matched)] + sep + matched.lower() s1 = _first_cap_regex.sub(r'\1' + sep + r'\2', name) - s2 = _number_cap_regex.sub(r'\1' + sep + r'\2', s1) - transformed = _end_cap_regex.sub(r'\1' + sep + r'\2', s2).lower() - - # Do partial renames - for old, new in partial_renames.items(): - if old in transformed: - transformed = transformed.replace(old, new) + transformed = _end_cap_regex.sub(r'\1' + sep + r'\2', s1).lower() _xform_cache[key] = transformed return _xform_cache[key] diff --git a/botocore/credentials.py b/botocore/credentials.py index 6d2a0d59..d82c04d2 100644 --- a/botocore/credentials.py +++ b/botocore/credentials.py @@ -69,7 +69,8 @@ def create_credential_resolver(session, cache=None): instance_metadata_provider = InstanceMetadataProvider( iam_role_fetcher=InstanceMetadataFetcher( timeout=metadata_timeout, - num_attempts=num_attempts) + num_attempts=num_attempts, + user_agent=session.user_agent()) ) assume_role_provider = AssumeRoleProvider( load_config=lambda: session.full_config, diff --git a/botocore/data/_retry.json b/botocore/data/_retry.json index 3b626dbf..c477c9bd 100644 --- a/botocore/data/_retry.json +++ b/botocore/data/_retry.json @@ -102,6 +102,20 @@ "too_many_requests": {"$ref": "too_many_requests"} } }, + "organizations": { + "__default__": { + "policies": { + "too_many_requests": { + "applies_when": { + "response": { + "service_error_code": "TooManyRequestsException", + "http_status_code": 400 + } + } + } + } + } + }, "dynamodb": { "__default__": { "max_attempts": 10, diff --git a/botocore/data/acm-pca/2017-08-22/service-2.json b/botocore/data/acm-pca/2017-08-22/service-2.json index eed7bb1b..f24c7ba6 100644 --- a/botocore/data/acm-pca/2017-08-22/service-2.json +++ b/botocore/data/acm-pca/2017-08-22/service-2.json @@ -26,7 +26,7 @@ {"shape":"InvalidPolicyException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Creates a private subordinate certificate authority (CA). You must specify the CA configuration, the revocation configuration, the CA type, and an optional idempotency token. The CA configuration specifies the name of the algorithm and key size to be used to create the CA private key, the type of signing algorithm that the CA uses to sign, and X.500 subject information. The CRL (certificate revocation list) configuration specifies the CRL expiration period in days (the validity period of the CRL), the Amazon S3 bucket that will contain the CRL, and a CNAME alias for the S3 bucket that is included in certificates issued by the CA. If successful, this function returns the Amazon Resource Name (ARN) of the CA.

", + "documentation":"

Creates a private subordinate certificate authority (CA). You must specify the CA configuration, the revocation configuration, the CA type, and an optional idempotency token. The CA configuration specifies the name of the algorithm and key size to be used to create the CA private key, the type of signing algorithm that the CA uses to sign, and X.500 subject information. The CRL (certificate revocation list) configuration specifies the CRL expiration period in days (the validity period of the CRL), the Amazon S3 bucket that will contain the CRL, and a CNAME alias for the S3 bucket that is included in certificates issued by the CA. If successful, this operation returns the Amazon Resource Name (ARN) of the CA.

", "idempotent":true }, "CreateCertificateAuthorityAuditReport":{ @@ -45,7 +45,7 @@ {"shape":"InvalidArgsException"}, {"shape":"InvalidStateException"} ], - "documentation":"

Creates an audit report that lists every time that the your CA private key is used. The report is saved in the Amazon S3 bucket that you specify on input. The IssueCertificate and RevokeCertificate functions use the private key. You can generate a new report every 30 minutes.

", + "documentation":"

Creates an audit report that lists every time that the your CA private key is used. The report is saved in the Amazon S3 bucket that you specify on input. The IssueCertificate and RevokeCertificate operations use the private key. You can generate a new report every 30 minutes.

", "idempotent":true }, "DeleteCertificateAuthority":{ @@ -61,7 +61,7 @@ {"shape":"InvalidArnException"}, {"shape":"InvalidStateException"} ], - "documentation":"

Deletes the private certificate authority (CA) that you created or started to create by calling the CreateCertificateAuthority function. This action requires that you enter an ARN (Amazon Resource Name) for the private CA that you want to delete. You can find the ARN by calling the ListCertificateAuthorities function. You can delete the CA if you are waiting for it to be created (the Status field of the CertificateAuthority is CREATING) or if the CA has been created but you haven't yet imported the signed certificate (the Status is PENDING_CERTIFICATE) into ACM PCA. If you've already imported the certificate, you cannot delete the CA unless it has been disabled for more than 30 days. To disable a CA, call the UpdateCertificateAuthority function and set the CertificateAuthorityStatus argument to DISABLED.

" + "documentation":"

Deletes a private certificate authority (CA). You must provide the ARN (Amazon Resource Name) of the private CA that you want to delete. You can find the ARN by calling the ListCertificateAuthorities operation. Before you can delete a CA, you must disable it. Call the UpdateCertificateAuthority operation and set the CertificateAuthorityStatus parameter to DISABLED.

Additionally, you can delete a CA if you are waiting for it to be created (the Status field of the CertificateAuthority is CREATING). You can also delete it if the CA has been created but you haven't yet imported the signed certificate (the Status is PENDING_CERTIFICATE) into ACM PCA.

If the CA is in one of the aforementioned states and you call DeleteCertificateAuthority, the CA's status changes to DELETED. However, the CA won't be permentantly deleted until the restoration period has passed. By default, if you do not set the PermanentDeletionTimeInDays parameter, the CA remains restorable for 30 days. You can set the parameter from 7 to 30 days. The DescribeCertificateAuthority operation returns the time remaining in the restoration window of a Private CA in the DELETED state. To restore an eligable CA, call the RestoreCertificateAuthority operation.

" }, "DescribeCertificateAuthority":{ "name":"DescribeCertificateAuthority", @@ -75,7 +75,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidArnException"} ], - "documentation":"

Lists information about your private certificate authority (CA). You specify the private CA on input by its ARN (Amazon Resource Name). The output contains the status of your CA. This can be any of the following:

" + "documentation":"

Lists information about your private certificate authority (CA). You specify the private CA on input by its ARN (Amazon Resource Name). The output contains the status of your CA. This can be any of the following:

" }, "DescribeCertificateAuthorityAuditReport":{ "name":"DescribeCertificateAuthorityAuditReport", @@ -87,9 +87,10 @@ "output":{"shape":"DescribeCertificateAuthorityAuditReportResponse"}, "errors":[ {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidArnException"}, {"shape":"InvalidArgsException"} ], - "documentation":"

Lists information about a specific audit report created by calling the CreateCertificateAuthorityAuditReport function. Audit information is created every time the certificate authority (CA) private key is used. The private key is used when you call the IssueCertificate function or the RevokeCertificate function.

" + "documentation":"

Lists information about a specific audit report created by calling the CreateCertificateAuthorityAuditReport operation. Audit information is created every time the certificate authority (CA) private key is used. The private key is used when you call the IssueCertificate operation or the RevokeCertificate operation.

" }, "GetCertificate":{ "name":"GetCertificate", @@ -106,7 +107,7 @@ {"shape":"InvalidArnException"}, {"shape":"InvalidStateException"} ], - "documentation":"

Retrieves a certificate from your private CA. The ARN of the certificate is returned when you call the IssueCertificate function. You must specify both the ARN of your private CA and the ARN of the issued certificate when calling the GetCertificate function. You can retrieve the certificate if it is in the ISSUED state. You can call the CreateCertificateAuthorityAuditReport function to create a report that contains information about all of the certificates issued and revoked by your private CA.

" + "documentation":"

Retrieves a certificate from your private CA. The ARN of the certificate is returned when you call the IssueCertificate operation. You must specify both the ARN of your private CA and the ARN of the issued certificate when calling the GetCertificate operation. You can retrieve the certificate if it is in the ISSUED state. You can call the CreateCertificateAuthorityAuditReport operation to create a report that contains information about all of the certificates issued and revoked by your private CA.

" }, "GetCertificateAuthorityCertificate":{ "name":"GetCertificateAuthorityCertificate", @@ -135,9 +136,10 @@ {"shape":"RequestInProgressException"}, {"shape":"RequestFailedException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"InvalidArnException"} + {"shape":"InvalidArnException"}, + {"shape":"InvalidStateException"} ], - "documentation":"

Retrieves the certificate signing request (CSR) for your private certificate authority (CA). The CSR is created when you call the CreateCertificateAuthority function. Take the CSR to your on-premises X.509 infrastructure and sign it by using your root or a subordinate CA. Then import the signed certificate back into ACM PCA by calling the ImportCertificateAuthorityCertificate function. The CSR is returned as a base64 PEM-encoded string.

" + "documentation":"

Retrieves the certificate signing request (CSR) for your private certificate authority (CA). The CSR is created when you call the CreateCertificateAuthority operation. Take the CSR to your on-premises X.509 infrastructure and sign it by using your root or a subordinate CA. Then import the signed certificate back into ACM PCA by calling the ImportCertificateAuthorityCertificate operation. The CSR is returned as a base64 PEM-encoded string.

" }, "ImportCertificateAuthorityCertificate":{ "name":"ImportCertificateAuthorityCertificate", @@ -152,10 +154,11 @@ {"shape":"RequestFailedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InvalidArnException"}, + {"shape":"InvalidStateException"}, {"shape":"MalformedCertificateException"}, {"shape":"CertificateMismatchException"} ], - "documentation":"

Imports your signed private CA certificate into ACM PCA. Before you can call this function, you must create the private certificate authority by calling the CreateCertificateAuthority function. You must then generate a certificate signing request (CSR) by calling the GetCertificateAuthorityCsr function. Take the CSR to your on-premises CA and use the root certificate or a subordinate certificate to sign it. Create a certificate chain and copy the signed certificate and the certificate chain to your working directory.

Your certificate chain must not include the private CA certificate that you are importing.

Your on-premises CA certificate must be the last certificate in your chain. The subordinate certificate, if any, that your root CA signed must be next to last. The subordinate certificate signed by the preceding subordinate CA must come next, and so on until your chain is built.

The chain must be PEM-encoded.

" + "documentation":"

Imports your signed private CA certificate into ACM PCA. Before you can call this operation, you must create the private certificate authority by calling the CreateCertificateAuthority operation. You must then generate a certificate signing request (CSR) by calling the GetCertificateAuthorityCsr operation. Take the CSR to your on-premises CA and use the root certificate or a subordinate certificate to sign it. Create a certificate chain and copy the signed certificate and the certificate chain to your working directory.

Your certificate chain must not include the private CA certificate that you are importing.

Your on-premises CA certificate must be the last certificate in your chain. The subordinate certificate, if any, that your root CA signed must be next to last. The subordinate certificate signed by the preceding subordinate CA must come next, and so on until your chain is built.

The chain must be PEM-encoded.

" }, "IssueCertificate":{ "name":"IssueCertificate", @@ -173,7 +176,7 @@ {"shape":"InvalidArgsException"}, {"shape":"MalformedCSRException"} ], - "documentation":"

Uses your private certificate authority (CA) to issue a client certificate. This function returns the Amazon Resource Name (ARN) of the certificate. You can retrieve the certificate by calling the GetCertificate function and specifying the ARN.

You cannot use the ACM ListCertificateAuthorities function to retrieve the ARNs of the certificates that you issue by using ACM PCA.

", + "documentation":"

Uses your private certificate authority (CA) to issue a client certificate. This operation returns the Amazon Resource Name (ARN) of the certificate. You can retrieve the certificate by calling the GetCertificate operation and specifying the ARN.

You cannot use the ACM ListCertificateAuthorities operation to retrieve the ARNs of the certificates that you issue by using ACM PCA.

", "idempotent":true }, "ListCertificateAuthorities":{ @@ -187,7 +190,7 @@ "errors":[ {"shape":"InvalidNextTokenException"} ], - "documentation":"

Lists the private certificate authorities that you created by using the CreateCertificateAuthority function.

" + "documentation":"

Lists the private certificate authorities that you created by using the CreateCertificateAuthority operation.

" }, "ListTags":{ "name":"ListTags", @@ -201,7 +204,21 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidArnException"} ], - "documentation":"

Lists the tags, if any, that are associated with your private CA. Tags are labels that you can use to identify and organize your CAs. Each tag consists of a key and an optional value. Call the TagCertificateAuthority function to add one or more tags to your CA. Call the UntagCertificateAuthority function to remove tags.

" + "documentation":"

Lists the tags, if any, that are associated with your private CA. Tags are labels that you can use to identify and organize your CAs. Each tag consists of a key and an optional value. Call the TagCertificateAuthority operation to add one or more tags to your CA. Call the UntagCertificateAuthority operation to remove tags.

" + }, + "RestoreCertificateAuthority":{ + "name":"RestoreCertificateAuthority", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreCertificateAuthorityRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidStateException"}, + {"shape":"InvalidArnException"} + ], + "documentation":"

Restores a certificate authority (CA) that is in the DELETED state. You can restore a CA during the period that you defined in the PermanentDeletionTimeInDays parameter of the DeleteCertificateAuthority operation. Currently, you can specify 7 to 30 days. If you did not specify a PermanentDeletionTimeInDays value, by default you can restore the CA at any time in a 30 day period. You can check the time remaining in the restoration period of a private CA in the DELETED state by calling the DescribeCertificateAuthority or ListCertificateAuthorities operations. The status of a restored CA is set to its pre-deletion status when the RestoreCertificateAuthority operation returns. To change its status to ACTIVE, call the UpdateCertificateAuthority operation. If the private CA was in the PENDING_CERTIFICATE state at deletion, you must use the ImportCertificateAuthorityCertificate operation to import a certificate authority into the private CA before it can be activated. You cannot restore a CA after the restoration period has ended.

" }, "RevokeCertificate":{ "name":"RevokeCertificate", @@ -219,7 +236,7 @@ {"shape":"RequestInProgressException"}, {"shape":"RequestFailedException"} ], - "documentation":"

Revokes a certificate that you issued by calling the IssueCertificate function. If you enable a certificate revocation list (CRL) when you create or update your private CA, information about the revoked certificates will be included in the CRL. ACM PCA writes the CRL to an S3 bucket that you specify. For more information about revocation, see the CrlConfiguration structure. ACM PCA also writes revocation information to the audit report. For more information, see CreateCertificateAuthorityAuditReport.

" + "documentation":"

Revokes a certificate that you issued by calling the IssueCertificate operation. If you enable a certificate revocation list (CRL) when you create or update your private CA, information about the revoked certificates will be included in the CRL. ACM PCA writes the CRL to an S3 bucket that you specify. For more information about revocation, see the CrlConfiguration structure. ACM PCA also writes revocation information to the audit report. For more information, see CreateCertificateAuthorityAuditReport.

" }, "TagCertificateAuthority":{ "name":"TagCertificateAuthority", @@ -231,10 +248,11 @@ "errors":[ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidArnException"}, + {"shape":"InvalidStateException"}, {"shape":"InvalidTagException"}, {"shape":"TooManyTagsException"} ], - "documentation":"

Adds one or more tags to your private CA. Tags are labels that you can use to identify and organize your AWS resources. Each tag consists of a key and an optional value. You specify the private CA on input by its Amazon Resource Name (ARN). You specify the tag by using a key-value pair. You can apply a tag to just one private CA if you want to identify a specific characteristic of that CA, or you can apply the same tag to multiple private CAs if you want to filter for a common relationship among those CAs. To remove one or more tags, use the UntagCertificateAuthority function. Call the ListTags function to see what tags are associated with your CA.

" + "documentation":"

Adds one or more tags to your private CA. Tags are labels that you can use to identify and organize your AWS resources. Each tag consists of a key and an optional value. You specify the private CA on input by its Amazon Resource Name (ARN). You specify the tag by using a key-value pair. You can apply a tag to just one private CA if you want to identify a specific characteristic of that CA, or you can apply the same tag to multiple private CAs if you want to filter for a common relationship among those CAs. To remove one or more tags, use the UntagCertificateAuthority operation. Call the ListTags operation to see what tags are associated with your CA.

" }, "UntagCertificateAuthority":{ "name":"UntagCertificateAuthority", @@ -246,9 +264,10 @@ "errors":[ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidArnException"}, + {"shape":"InvalidStateException"}, {"shape":"InvalidTagException"} ], - "documentation":"

Remove one or more tags from your private CA. A tag consists of a key-value pair. If you do not specify the value portion of the tag when calling this function, the tag will be removed regardless of value. If you specify a value, the tag is removed only if it is associated with the specified value. To add tags to a private CA, use the TagCertificateAuthority. Call the ListTags function to see what tags are associated with your CA.

" + "documentation":"

Remove one or more tags from your private CA. A tag consists of a key-value pair. If you do not specify the value portion of the tag when calling this operation, the tag will be removed regardless of value. If you specify a value, the tag is removed only if it is associated with the specified value. To add tags to a private CA, use the TagCertificateAuthority. Call the ListTags operation to see what tags are associated with your CA.

" }, "UpdateCertificateAuthority":{ "name":"UpdateCertificateAuthority", @@ -265,7 +284,7 @@ {"shape":"InvalidStateException"}, {"shape":"InvalidPolicyException"} ], - "documentation":"

Updates the status or configuration of a private certificate authority (CA). Your private CA must be in the ACTIVE or DISABLED state before you can update it. You can disable a private CA that is in the ACTIVE state or make a CA that is in the DISABLED state active again.

" + "documentation":"

Updates the status or configuration of a private certificate authority (CA). Your private CA must be in the ACTIVE or DISABLED state before you can update it. You can disable a private CA that is in the ACTIVE state or make a CA that is in the DISABLED state active again.

" } }, "shapes":{ @@ -274,7 +293,7 @@ "members":{ "Country":{ "shape":"CountryCodeString", - "documentation":"

Two digit code that specifies the country in which the certificate subject located.

" + "documentation":"

Two-digit code that specifies the country in which the certificate subject located.

" }, "Organization":{ "shape":"String64", @@ -306,11 +325,11 @@ }, "Title":{ "shape":"String64", - "documentation":"

A title such as Mr. or Ms. which is pre-pended to the name to refer formally to the certificate subject.

" + "documentation":"

A title such as Mr. or Ms., which is pre-pended to the name to refer formally to the certificate subject.

" }, "Surname":{ "shape":"String40", - "documentation":"

Family name. In the US and the UK for example, the surname of an individual is ordered last. In Asian cultures the surname is typically ordered first.

" + "documentation":"

Family name. In the US and the UK, for example, the surname of an individual is ordered last. In Asian cultures the surname is typically ordered first.

" }, "GivenName":{ "shape":"String16", @@ -329,7 +348,7 @@ "documentation":"

Typically a qualifier appended to the name of an individual. Examples include Jr. for junior, Sr. for senior, and III for third.

" } }, - "documentation":"

Contains information about the certificate subject. The certificate can be one issued by your private certificate authority (CA) or it can be your private CA certificate. The Subject field in the certificate identifies the entity that owns or controls the public key in the certificate. The entity can be a user, computer, device, or service. The Subject must contain an X.500 distinguished name (DN). A DN is a sequence of relative distinguished names (RDNs). The RDNs are separated by commas in the certificate. The DN must be unique for each for each entity, but your private CA can issue more than one certificate with the same DN to the same entity.

" + "documentation":"

Contains information about the certificate subject. The certificate can be one issued by your private certificate authority (CA) or it can be your private CA certificate. The Subject field in the certificate identifies the entity that owns or controls the public key in the certificate. The entity can be a user, computer, device, or service. The Subject must contain an X.500 distinguished name (DN). A DN is a sequence of relative distinguished names (RDNs). The RDNs are separated by commas in the certificate. The DN must be unique for each entity, but your private CA can issue more than one certificate with the same DN to the same entity.

" }, "Arn":{ "type":"string", @@ -409,9 +428,13 @@ "RevocationConfiguration":{ "shape":"RevocationConfiguration", "documentation":"

Information about the certificate revocation list (CRL) created and maintained by your private CA.

" + }, + "RestorableUntil":{ + "shape":"TStamp", + "documentation":"

The period during which a deleted CA can be restored. For more information, see the PermanentDeletionTimeInDays parameter of the DeleteCertificateAuthorityRequest operation.

" } }, - "documentation":"

Contains information about your private certificate authority (CA). Your private CA can issue and revoke X.509 digital certificates. Digital certificates verify that the entity named in the certificate Subject field owns or controls the public key contained in the Subject Public Key Info field. Call the CreateCertificateAuthority function to create your private CA. You must then call the GetCertificateAuthorityCertificate function to retrieve a private CA certificate signing request (CSR). Take the CSR to your on-premises CA and sign it with the root CA certificate or a subordinate certificate. Call the ImportCertificateAuthorityCertificate function to import the signed certificate into AWS Certificate Manager (ACM).

" + "documentation":"

Contains information about your private certificate authority (CA). Your private CA can issue and revoke X.509 digital certificates. Digital certificates verify that the entity named in the certificate Subject field owns or controls the public key contained in the Subject Public Key Info field. Call the CreateCertificateAuthority operation to create your private CA. You must then call the GetCertificateAuthorityCertificate operation to retrieve a private CA certificate signing request (CSR). Take the CSR to your on-premises CA and sign it with the root CA certificate or a subordinate certificate. Call the ImportCertificateAuthorityCertificate operation to import the signed certificate into AWS Certificate Manager (ACM).

" }, "CertificateAuthorityConfiguration":{ "type":"structure", @@ -434,7 +457,7 @@ "documentation":"

Structure that contains X.500 distinguished name information for your private CA.

" } }, - "documentation":"

Contains configuration information for your private certificate authority (CA). This includes information about the class of public key algorithm and the key pair that your private CA creates when it issues a certificate, the signature algorithm it uses used when issuing certificates, and its X.500 distinguished name. You must specify this information when you call the CreateCertificateAuthority function.

" + "documentation":"

Contains configuration information for your private certificate authority (CA). This includes information about the class of public key algorithm and the key pair that your private CA creates when it issues a certificate, the signature algorithm it uses used when issuing certificates, and its X.500 distinguished name. You must specify this information when you call the CreateCertificateAuthority operation.

" }, "CertificateAuthorityStatus":{ "type":"string", @@ -442,6 +465,7 @@ "CREATING", "PENDING_CERTIFICATE", "ACTIVE", + "DELETED", "DISABLED", "EXPIRED", "FAILED" @@ -493,7 +517,7 @@ "members":{ "CertificateAuthorityArn":{ "shape":"Arn", - "documentation":"

Amazon Resource Name (ARN) of the CA to be audited. This is of the form:

arn:aws:acm:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 .

" + "documentation":"

Amazon Resource Name (ARN) of the CA to be audited. This is of the form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 .

" }, "S3BucketName":{ "shape":"String", @@ -539,7 +563,7 @@ }, "IdempotencyToken":{ "shape":"IdempotencyToken", - "documentation":"

Alphanumeric string that can be used to distinguish between calls to CreateCertificateAuthority. Idempotency tokens time out after five minutes. Therefore, if you call CreateCertificateAuthority multiple times with the same idempotency token within a five minute period, ACM PCA recognizes that you are requesting only one certificate and will issue only one. If you change the idempotency token for each call, however, ACM PCA recognizes that you are requesting multiple certificates.

" + "documentation":"

Alphanumeric string that can be used to distinguish between calls to CreateCertificateAuthority. Idempotency tokens time out after five minutes. Therefore, if you call CreateCertificateAuthority multiple times with the same idempotency token within a five minute period, ACM PCA recognizes that you are requesting only one certificate. As a result, ACM PCA issues only one. If you change the idempotency token for each call, however, ACM PCA recognizes that you are requesting multiple certificates.

" } } }, @@ -548,7 +572,7 @@ "members":{ "CertificateAuthorityArn":{ "shape":"Arn", - "documentation":"

If successful, the Amazon Resource Name (ARN) of the certificate authority (CA). This is of the form:

arn:aws:acm:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 .

" + "documentation":"

If successful, the Amazon Resource Name (ARN) of the certificate authority (CA). This is of the form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 .

" } } }, @@ -558,7 +582,7 @@ "members":{ "Enabled":{ "shape":"Boolean", - "documentation":"

Boolean value that specifies whether certificate revocation lists (CRLs) are enabled. You can use this value to enable certificate revocation for a new CA when you call the CreateCertificateAuthority function or for an existing CA when you call the UpdateCertificateAuthority function.

", + "documentation":"

Boolean value that specifies whether certificate revocation lists (CRLs) are enabled. You can use this value to enable certificate revocation for a new CA when you call the CreateCertificateAuthority operation or for an existing CA when you call the UpdateCertificateAuthority operation.

", "box":true }, "ExpirationInDays":{ @@ -572,7 +596,7 @@ }, "S3BucketName":{ "shape":"String3To255", - "documentation":"

Name of the S3 bucket that contains the CRL. If you do not provide a value for the CustomCname argument, the name of your S3 bucket is placed into the CRL Distribution Points extension of the issued certificate. You can change the name of your bucket by calling the UpdateCertificateAuthority function. You must specify a bucket policy that allows ACM PCA to write the CRL to your bucket.

" + "documentation":"

Name of the S3 bucket that contains the CRL. If you do not provide a value for the CustomCname argument, the name of your S3 bucket is placed into the CRL Distribution Points extension of the issued certificate. You can change the name of your bucket by calling the UpdateCertificateAuthority operation. You must specify a bucket policy that allows ACM PCA to write the CRL to your bucket.

" } }, "documentation":"

Contains configuration information for a certificate revocation list (CRL). Your private certificate authority (CA) creates base CRLs. Delta CRLs are not supported. You can enable CRLs for your new or an existing private CA by setting the Enabled parameter to true. Your private CA writes CRLs to an S3 bucket that you specify in the S3BucketName parameter. You can hide the name of your bucket by specifying a value for the CustomCname parameter. Your private CA copies the CNAME or the S3 bucket name to the CRL Distribution Points extension of each certificate it issues. Your S3 bucket policy must give write permission to ACM PCA.

Your private CA uses the value in the ExpirationInDays parameter to calculate the nextUpdate field in the CRL. The CRL is refreshed at 1/2 the age of next update or when a certificate is revoked. When a certificate is revoked, it is recorded in the next CRL that is generated and in the next audit report. Only time valid certificates are listed in the CRL. Expired certificates are not included.

CRLs contain the following fields:

Certificate revocation lists created by ACM PCA are DER-encoded. You can use the following OpenSSL command to list a CRL.

openssl crl -inform DER -text -in crl_path -noout

" @@ -589,7 +613,11 @@ "members":{ "CertificateAuthorityArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. This must be of the form:

arn:aws:acm:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 .

" + "documentation":"

The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. This must have the following form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 .

" + }, + "PermanentDeletionTimeInDays":{ + "shape":"PermanentDeletionTimeInDays", + "documentation":"

The number of days to make a CA restorable after it has been deleted. This can be anywhere from 7 to 30 days, with 30 being the default.

" } } }, @@ -602,11 +630,11 @@ "members":{ "CertificateAuthorityArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the private CA. This must be of the form:

arn:aws:acm:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 .

" + "documentation":"

The Amazon Resource Name (ARN) of the private CA. This must be of the form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 .

" }, "AuditReportId":{ "shape":"AuditReportId", - "documentation":"

The report ID returned by calling the CreateCertificateAuthorityAuditReport function.

" + "documentation":"

The report ID returned by calling the CreateCertificateAuthorityAuditReport operation.

" } } }, @@ -637,7 +665,7 @@ "members":{ "CertificateAuthorityArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. This must be of the form:

arn:aws:acm:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 .

" + "documentation":"

The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. This must be of the form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 .

" } } }, @@ -670,7 +698,7 @@ "members":{ "CertificateAuthorityArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of your private CA. This is of the form:

arn:aws:acm:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 .

" + "documentation":"

The Amazon Resource Name (ARN) of your private CA. This is of the form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 .

" } } }, @@ -693,7 +721,7 @@ "members":{ "CertificateAuthorityArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) that was returned when you called the CreateCertificateAuthority function. This must be of the form:

arn:aws:acm:region:account:certificate-authority/12345678-1234-1234-1234-123456789012

" + "documentation":"

The Amazon Resource Name (ARN) that was returned when you called the CreateCertificateAuthority operation. This must be of the form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012

" } } }, @@ -715,11 +743,11 @@ "members":{ "CertificateAuthorityArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. This must be of the form:

arn:aws:acm:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 .

" + "documentation":"

The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. This must be of the form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 .

" }, "CertificateArn":{ "shape":"Arn", - "documentation":"

The ARN of the issued certificate. The ARN contains the certificate serial number and must be in the following form:

arn:aws:acm:region:account:certificate-authority/12345678-1234-1234-1234-123456789012/certificate/286535153982981100925020015808220737245

" + "documentation":"

The ARN of the issued certificate. The ARN contains the certificate serial number and must be in the following form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012/certificate/286535153982981100925020015808220737245

" } } }, @@ -752,7 +780,7 @@ "members":{ "CertificateAuthorityArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. This must be of the form:

arn:aws:acm:region:account:certificate-authority/12345678-1234-1234-1234-123456789012

" + "documentation":"

The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. This must be of the form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012

" }, "Certificate":{ "shape":"CertificateBodyBlob", @@ -828,7 +856,7 @@ "members":{ "CertificateAuthorityArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. This must be of the form:

arn:aws:acm:region:account:certificate-authority/12345678-1234-1234-1234-123456789012

" + "documentation":"

The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. This must be of the form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012

" }, "Csr":{ "shape":"CsrBlob", @@ -844,7 +872,7 @@ }, "IdempotencyToken":{ "shape":"IdempotencyToken", - "documentation":"

Custom string that can be used to distinguish between calls to the IssueCertificate function. Idempotency tokens time out after one hour. Therefore, if you call IssueCertificate multiple times with the same idempotency token within 5 minutes, ACM PCA recognizes that you are requesting only one certificate and will issue only one. If you change the idempotency token for each call, PCA recognizes that you are requesting multiple certificates.

" + "documentation":"

Custom string that can be used to distinguish between calls to the IssueCertificate operation. Idempotency tokens time out after one hour. Therefore, if you call IssueCertificate multiple times with the same idempotency token within 5 minutes, ACM PCA recognizes that you are requesting only one certificate and will issue only one. If you change the idempotency token for each call, PCA recognizes that you are requesting multiple certificates.

" } } }, @@ -853,7 +881,7 @@ "members":{ "CertificateArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the issued certificate and the certificate serial number. This is of the form:

arn:aws:acm:region:account:certificate-authority/12345678-1234-1234-1234-123456789012/certificate/286535153982981100925020015808220737245

" + "documentation":"

The Amazon Resource Name (ARN) of the issued certificate and the certificate serial number. This is of the form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012/certificate/286535153982981100925020015808220737245

" } } }, @@ -906,7 +934,7 @@ "members":{ "CertificateAuthorityArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) that was returned when you called the CreateCertificateAuthority function. This must be of the form:

arn:aws:acm:region:account:certificate-authority/12345678-1234-1234-1234-123456789012

" + "documentation":"

The Amazon Resource Name (ARN) that was returned when you called the CreateCertificateAuthority operation. This must be of the form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012

" }, "NextToken":{ "shape":"NextToken", @@ -957,6 +985,11 @@ "max":500, "min":1 }, + "PermanentDeletionTimeInDays":{ + "type":"integer", + "max":30, + "min":7 + }, "PositiveLong":{ "type":"long", "min":1 @@ -993,6 +1026,16 @@ "documentation":"

A resource such as a private CA, S3 bucket, certificate, or audit report cannot be found.

", "exception":true }, + "RestoreCertificateAuthorityRequest":{ + "type":"structure", + "required":["CertificateAuthorityArn"], + "members":{ + "CertificateAuthorityArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that was returned when you called the CreateCertificateAuthority operation. This must be of the form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012

" + } + } + }, "RevocationConfiguration":{ "type":"structure", "members":{ @@ -1001,7 +1044,7 @@ "documentation":"

Configuration of the certificate revocation list (CRL), if any, maintained by your private CA.

" } }, - "documentation":"

Certificate revocation information used by the CreateCertificateAuthority and UpdateCertificateAuthority functions. Your private certificate authority (CA) can create and maintain a certificate revocation list (CRL). A CRL contains information about certificates revoked by your CA. For more information, see RevokeCertificate.

" + "documentation":"

Certificate revocation information used by the CreateCertificateAuthority and UpdateCertificateAuthority operations. Your private certificate authority (CA) can create and maintain a certificate revocation list (CRL). A CRL contains information about certificates revoked by your CA. For more information, see RevokeCertificate.

" }, "RevocationReason":{ "type":"string", @@ -1026,11 +1069,11 @@ "members":{ "CertificateAuthorityArn":{ "shape":"Arn", - "documentation":"

Amazon Resource Name (ARN) of the private CA that issued the certificate to be revoked. This must be of the form:

arn:aws:acm:region:account:certificate-authority/12345678-1234-1234-1234-123456789012

" + "documentation":"

Amazon Resource Name (ARN) of the private CA that issued the certificate to be revoked. This must be of the form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012

" }, "CertificateSerial":{ "shape":"String128", - "documentation":"

Serial number of the certificate to be revoked. This must be in hexadecimal format. You can retrieve the serial number by calling GetCertificate with the Amazon Resource Name (ARN) of the certificate you want and the ARN of your private CA. The GetCertificate function retrieves the certificate in the PEM format. You can use the following OpenSSL command to list the certificate in text format and copy the hexadecimal serial number.

openssl x509 -in file_path -text -noout

You can also copy the serial number from the console or use the DescribeCertificate function in the AWS Certificate Manager API Reference.

" + "documentation":"

Serial number of the certificate to be revoked. This must be in hexadecimal format. You can retrieve the serial number by calling GetCertificate with the Amazon Resource Name (ARN) of the certificate you want and the ARN of your private CA. The GetCertificate operation retrieves the certificate in the PEM format. You can use the following OpenSSL command to list the certificate in text format and copy the hexadecimal serial number.

openssl x509 -in file_path -text -noout

You can also copy the serial number from the console or use the DescribeCertificate operation in the AWS Certificate Manager API Reference.

" }, "RevocationReason":{ "shape":"RevocationReason", @@ -1104,7 +1147,7 @@ "documentation":"

Value of the tag.

" } }, - "documentation":"

Tags are labels that you can use to identify and organize your private CAs. Each tag consists of a key and an optional value. You can associate up to 50 tags with a private CA. To add one or more tags to a private CA, call the TagCertificateAuthority function. To remove a tag, call the UntagCertificateAuthority function.

" + "documentation":"

Tags are labels that you can use to identify and organize your private CAs. Each tag consists of a key and an optional value. You can associate up to 50 tags with a private CA. To add one or more tags to a private CA, call the TagCertificateAuthority operation. To remove a tag, call the UntagCertificateAuthority operation.

" }, "TagCertificateAuthorityRequest":{ "type":"structure", @@ -1115,7 +1158,7 @@ "members":{ "CertificateAuthorityArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. This must be of the form:

arn:aws:acm:region:account:certificate-authority/12345678-1234-1234-1234-123456789012

" + "documentation":"

The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. This must be of the form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012

" }, "Tags":{ "shape":"TagList", @@ -1158,7 +1201,7 @@ "members":{ "CertificateAuthorityArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. This must be of the form:

arn:aws:acm:region:account:certificate-authority/12345678-1234-1234-1234-123456789012

" + "documentation":"

The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. This must be of the form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012

" }, "Tags":{ "shape":"TagList", @@ -1172,7 +1215,7 @@ "members":{ "CertificateAuthorityArn":{ "shape":"Arn", - "documentation":"

Amazon Resource Name (ARN) of the private CA that issued the certificate to be revoked. This must be of the form:

arn:aws:acm:region:account:certificate-authority/12345678-1234-1234-1234-123456789012

" + "documentation":"

Amazon Resource Name (ARN) of the private CA that issued the certificate to be revoked. This must be of the form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012

" }, "RevocationConfiguration":{ "shape":"RevocationConfiguration", @@ -1201,7 +1244,7 @@ "documentation":"

Specifies whether the Value parameter represents days, months, or years.

" } }, - "documentation":"

Length of time for which the certificate issued by your private certificate authority (CA), or by the private CA itself, is valid in days, months, or years. You can issue a certificate by calling the IssueCertificate function.

" + "documentation":"

Length of time for which the certificate issued by your private certificate authority (CA), or by the private CA itself, is valid in days, months, or years. You can issue a certificate by calling the IssueCertificate operation.

" }, "ValidityPeriodType":{ "type":"string", @@ -1214,5 +1257,5 @@ ] } }, - "documentation":"

You can use the ACM PCA API to create a private certificate authority (CA). You must first call the CreateCertificateAuthority function. If successful, the function returns an Amazon Resource Name (ARN) for your private CA. Use this ARN as input to the GetCertificateAuthorityCsr function to retrieve the certificate signing request (CSR) for your private CA certificate. Sign the CSR using the root or an intermediate CA in your on-premises PKI hierarchy, and call the ImportCertificateAuthorityCertificate to import your signed private CA certificate into ACM PCA.

Use your private CA to issue and revoke certificates. These are private certificates that identify and secure client computers, servers, applications, services, devices, and users over SSLS/TLS connections within your organization. Call the IssueCertificate function to issue a certificate. Call the RevokeCertificate function to revoke a certificate.

Certificates issued by your private CA can be trusted only within your organization, not publicly.

Your private CA can optionally create a certificate revocation list (CRL) to track the certificates you revoke. To create a CRL, you must specify a RevocationConfiguration object when you call the CreateCertificateAuthority function. ACM PCA writes the CRL to an S3 bucket that you specify. You must specify a bucket policy that grants ACM PCA write permission.

You can also call the CreateCertificateAuthorityAuditReport to create an optional audit report that lists every time the CA private key is used. The private key is used for signing when the IssueCertificate or RevokeCertificate function is called.

" + "documentation":"

You can use the ACM PCA API to create a private certificate authority (CA). You must first call the CreateCertificateAuthority operation. If successful, the operation returns an Amazon Resource Name (ARN) for your private CA. Use this ARN as input to the GetCertificateAuthorityCsr operation to retrieve the certificate signing request (CSR) for your private CA certificate. Sign the CSR using the root or an intermediate CA in your on-premises PKI hierarchy, and call the ImportCertificateAuthorityCertificate to import your signed private CA certificate into ACM PCA.

Use your private CA to issue and revoke certificates. These are private certificates that identify and secure client computers, servers, applications, services, devices, and users over SSLS/TLS connections within your organization. Call the IssueCertificate operation to issue a certificate. Call the RevokeCertificate operation to revoke a certificate.

Certificates issued by your private CA can be trusted only within your organization, not publicly.

Your private CA can optionally create a certificate revocation list (CRL) to track the certificates you revoke. To create a CRL, you must specify a RevocationConfiguration object when you call the CreateCertificateAuthority operation. ACM PCA writes the CRL to an S3 bucket that you specify. You must specify a bucket policy that grants ACM PCA write permission.

You can also call the CreateCertificateAuthorityAuditReport to create an optional audit report that lists every time the CA private key is used. The private key is used for signing when the IssueCertificate or RevokeCertificate operation is called.

" } diff --git a/botocore/data/acm/2015-12-08/waiters-2.json b/botocore/data/acm/2015-12-08/waiters-2.json new file mode 100644 index 00000000..1fba453d --- /dev/null +++ b/botocore/data/acm/2015-12-08/waiters-2.json @@ -0,0 +1,35 @@ +{ + "version": 2, + "waiters": { + "CertificateValidated": { + "delay": 60, + "maxAttempts": 40, + "operation": "DescribeCertificate", + "acceptors": [ + { + "matcher": "pathAll", + "expected": "SUCCESS", + "argument": "Certificate.DomainValidationOptions[].ValidationStatus", + "state": "success" + }, + { + "matcher": "pathAny", + "expected": "PENDING_VALIDATION", + "argument": "Certificate.DomainValidationOptions[].ValidationStatus", + "state": "retry" + }, + { + "matcher": "path", + "expected": "FAILED", + "argument": "Certificate.Status", + "state": "failure" + }, + { + "matcher": "error", + "expected": "ResourceNotFoundException", + "state": "failure" + } + ] + } + } +} diff --git a/botocore/data/alexaforbusiness/2017-11-09/service-2.json b/botocore/data/alexaforbusiness/2017-11-09/service-2.json index 9f426532..7fca8bae 100644 --- a/botocore/data/alexaforbusiness/2017-11-09/service-2.json +++ b/botocore/data/alexaforbusiness/2017-11-09/service-2.json @@ -6,6 +6,7 @@ "jsonVersion":"1.1", "protocol":"json", "serviceFullName":"Alexa For Business", + "serviceId":"Alexa For Business", "signatureVersion":"v4", "targetPrefix":"AlexaForBusiness", "uid":"alexaforbusiness-2017-11-09" @@ -33,7 +34,8 @@ "input":{"shape":"AssociateDeviceWithRoomRequest"}, "output":{"shape":"AssociateDeviceWithRoomResponse"}, "errors":[ - {"shape":"LimitExceededException"} + {"shape":"LimitExceededException"}, + {"shape":"DeviceNotRegisteredException"} ], "documentation":"

Associates a device with a given room. This applies all the settings from the room profile to the device, and all the skills in any skill groups added to that room. This operation requires the device to be online, or else a manual sync is required.

" }, @@ -237,6 +239,9 @@ }, "input":{"shape":"DisassociateDeviceFromRoomRequest"}, "output":{"shape":"DisassociateDeviceFromRoomResponse"}, + "errors":[ + {"shape":"DeviceNotRegisteredException"} + ], "documentation":"

Disassociates a device from its current room. The device continues to be connected to the Wi-Fi network and is still registered to the account. The device settings and skills are removed from the room.

" }, "DisassociateSkillGroupFromRoom":{ @@ -504,6 +509,9 @@ }, "input":{"shape":"StartDeviceSyncRequest"}, "output":{"shape":"StartDeviceSyncResponse"}, + "errors":[ + {"shape":"DeviceNotRegisteredException"} + ], "documentation":"

Resets a device and its account to the known default settings, by clearing all information and settings set by previous users.

" }, "TagResource":{ @@ -568,7 +576,8 @@ "input":{"shape":"UpdateDeviceRequest"}, "output":{"shape":"UpdateDeviceResponse"}, "errors":[ - {"shape":"NotFoundException"} + {"shape":"NotFoundException"}, + {"shape":"DeviceNotRegisteredException"} ], "documentation":"

Updates the device name by device ARN.

" }, @@ -1303,6 +1312,13 @@ "min":2, "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u0085\\u00A0-\\uD7FF\\uE000-\\uFFFD\\u10000-\\u10FFFF]*" }, + "DeviceNotRegisteredException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "exception":true + }, "DeviceSerialNumber":{ "type":"string", "pattern":"[a-zA-Z0-9]{1,200}" diff --git a/botocore/data/apigateway/2015-07-09/service-2.json b/botocore/data/apigateway/2015-07-09/service-2.json index d3b848b0..d5659a14 100644 --- a/botocore/data/apigateway/2015-07-09/service-2.json +++ b/botocore/data/apigateway/2015-07-09/service-2.json @@ -3533,17 +3533,18 @@ "members":{ "types":{ "shape":"ListOfEndpointType", - "documentation":"

A list of endpoint types of an API (RestApi) or its custom domain name (DomainName). For an edge-optimized API and its custom domain name, the endpoint type is \"EDGE\". For a regional API and its custom domain name, the endpoint type is REGIONAL.

" + "documentation":"

A list of endpoint types of an API (RestApi) or its custom domain name (DomainName). For an edge-optimized API and its custom domain name, the endpoint type is \"EDGE\". For a regional API and its custom domain name, the endpoint type is REGIONAL. For a private API, the endpoint type is PRIVATE.

" } }, "documentation":"

The endpoint configuration to indicate the types of endpoints an API (RestApi) or its custom domain name (DomainName) has.

" }, "EndpointType":{ "type":"string", - "documentation":"

The endpoint type. The valid value is EDGE for edge-optimized API setup, most suitable for mobile applications, REGIONAL for regional API endpoint setup, most suitable for calling from AWS Region

", + "documentation":"

The endpoint type. The valid values are EDGE for edge-optimized API setup, most suitable for mobile applications; REGIONAL for regional API endpoint setup, most suitable for calling from AWS Region; and PRIVATE for private APIs.

", "enum":[ "REGIONAL", - "EDGE" + "EDGE", + "PRIVATE" ] }, "ExportResponse":{ @@ -4102,7 +4103,7 @@ }, "parameters":{ "shape":"MapOfStringToString", - "documentation":"

A key-value map of query string parameters that specify properties of the export, depending on the requested exportType. For exportType swagger, any combination of the following parameters are supported: integrations will export the API with x-amazon-apigateway-integration extensions. authorizers will export the API with x-amazon-apigateway-authorizer extensions. postman will export the API with Postman extensions, allowing for import to the Postman tool

", + "documentation":"

A key-value map of query string parameters that specify properties of the export, depending on the requested exportType. For exportType swagger, any combination of the following parameters are supported: extensions='integrations' or extensions='apigateway' will export the API with x-amazon-apigateway-integration extensions. extensions='authorizers' will export the API with x-amazon-apigateway-authorizer extensions. postman will export the API with Postman extensions, allowing for import to the Postman tool

", "location":"querystring" }, "accepts":{ @@ -4874,7 +4875,7 @@ }, "parameters":{ "shape":"MapOfStringToString", - "documentation":"

A key-value map of context-specific query string parameters specifying the behavior of different API importing operations. The following shows operation-specific parameters and their supported values.

To exclude DocumentationParts from the import, set parameters as ignore=documentation.

To configure the endpoint type, set parameters as endpointConfigurationTypes=EDGE orendpointConfigurationTypes=REGIONAL. The default endpoint type is EDGE.

To handle imported basePath, set parameters as basePath=ignore, basePath=prepend or basePath=split.

For example, the AWS CLI command to exclude documentation from the imported API is:

aws apigateway import-rest-api --parameters ignore=documentation --body 'file:///path/to/imported-api-body.json

The AWS CLI command to set the regional endpoint on the imported API is:

aws apigateway import-rest-api --parameters endpointConfigurationTypes=REGIONAL --body 'file:///path/to/imported-api-body.json
", + "documentation":"

A key-value map of context-specific query string parameters specifying the behavior of different API importing operations. The following shows operation-specific parameters and their supported values.

To exclude DocumentationParts from the import, set parameters as ignore=documentation.

To configure the endpoint type, set parameters as endpointConfigurationTypes=EDGE, endpointConfigurationTypes=REGIONAL, or endpointConfigurationTypes=PRIVATE. The default endpoint type is EDGE.

To handle imported basePath, set parameters as basePath=ignore, basePath=prepend or basePath=split.

For example, the AWS CLI command to exclude documentation from the imported API is:

aws apigateway import-rest-api --parameters ignore=documentation --body 'file:///path/to/imported-api-body.json'

The AWS CLI command to set the regional endpoint on the imported API is:

aws apigateway import-rest-api --parameters endpointConfigurationTypes=REGIONAL --body 'file:///path/to/imported-api-body.json'
", "location":"querystring" }, "body":{ @@ -5691,7 +5692,7 @@ }, "parameters":{ "shape":"MapOfStringToString", - "documentation":"

Custom header parameters as part of the request. For example, to exclude DocumentationParts from an imported API, set ignore=documentation as a parameters value, as in the AWS CLI command of aws apigateway import-rest-api --parameters ignore=documentation --body 'file:///path/to/imported-api-body.json.

", + "documentation":"

Custom header parameters as part of the request. For example, to exclude DocumentationParts from an imported API, set ignore=documentation as a parameters value, as in the AWS CLI command of aws apigateway import-rest-api --parameters ignore=documentation --body 'file:///path/to/imported-api-body.json'.

", "location":"querystring" }, "body":{ diff --git a/botocore/data/application-autoscaling/2016-02-06/service-2.json b/botocore/data/application-autoscaling/2016-02-06/service-2.json index a426e949..6ea4e64a 100644 --- a/botocore/data/application-autoscaling/2016-02-06/service-2.json +++ b/botocore/data/application-autoscaling/2016-02-06/service-2.json @@ -264,15 +264,15 @@ }, "ServiceNamespace":{ "shape":"ServiceNamespace", - "documentation":"

The namespace of the AWS service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" + "documentation":"

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

" + "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

" } } }, @@ -291,7 +291,7 @@ "members":{ "ServiceNamespace":{ "shape":"ServiceNamespace", - "documentation":"

The namespace of the AWS service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" + "documentation":"

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" }, "ScheduledActionName":{ "shape":"ResourceIdMaxLen1600", @@ -299,11 +299,11 @@ }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.

" + "documentation":"

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

" } } }, @@ -322,15 +322,15 @@ "members":{ "ServiceNamespace":{ "shape":"ServiceNamespace", - "documentation":"

The namespace of the AWS service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" + "documentation":"

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

" + "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

" + "documentation":"

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

" } } }, @@ -345,15 +345,15 @@ "members":{ "ServiceNamespace":{ "shape":"ServiceNamespace", - "documentation":"

The namespace of the AWS service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" + "documentation":"

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" }, "ResourceIds":{ "shape":"ResourceIdsMaxLen1600", - "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

" + "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

" + "documentation":"

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

" }, "MaxResults":{ "shape":"MaxResults", @@ -384,15 +384,15 @@ "members":{ "ServiceNamespace":{ "shape":"ServiceNamespace", - "documentation":"

The namespace of the AWS service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" + "documentation":"

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

" + "documentation":"

The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

" }, "MaxResults":{ "shape":"MaxResults", @@ -427,15 +427,15 @@ }, "ServiceNamespace":{ "shape":"ServiceNamespace", - "documentation":"

The namespace of the AWS service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" + "documentation":"

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

" + "documentation":"

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

" }, "MaxResults":{ "shape":"MaxResults", @@ -470,15 +470,15 @@ }, "ServiceNamespace":{ "shape":"ServiceNamespace", - "documentation":"

The namespace of the AWS service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" + "documentation":"

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

" + "documentation":"

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

" }, "MaxResults":{ "shape":"MaxResults", @@ -652,15 +652,15 @@ }, "ServiceNamespace":{ "shape":"ServiceNamespace", - "documentation":"

The namespace of the AWS service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" + "documentation":"

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

" + "documentation":"

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

" }, "PolicyType":{ "shape":"PolicyType", @@ -700,11 +700,11 @@ "members":{ "ServiceNamespace":{ "shape":"ServiceNamespace", - "documentation":"

The namespace of the AWS service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" + "documentation":"

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" }, "Schedule":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The schedule for this action. The following formats are supported:

At expressions are useful for one-time schedules. Specify the time, in UTC.

For rate expressions, value is a positive integer and unit is minute | minutes | hour | hours | day | days.

For more information about cron expressions, see Cron.

" + "documentation":"

The schedule for this action. The following formats are supported:

At expressions are useful for one-time schedules. Specify the time, in UTC.

For rate expressions, value is a positive integer and unit is minute | minutes | hour | hours | day | days.

For more information about cron expressions, see Cron Expressions in the Amazon CloudWatch Events User Guide.

" }, "ScheduledActionName":{ "shape":"ScheduledActionName", @@ -712,11 +712,11 @@ }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.

" + "documentation":"

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This parameter is required if you are creating a scheduled action. This string consists of the service namespace, resource type, and scaling property.

" + "documentation":"

The scalable dimension. This parameter is required if you are creating a scheduled action. This string consists of the service namespace, resource type, and scaling property.

" }, "StartTime":{ "shape":"TimestampType", @@ -747,15 +747,15 @@ "members":{ "ServiceNamespace":{ "shape":"ServiceNamespace", - "documentation":"

The namespace of the AWS service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" + "documentation":"

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

" + "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

" + "documentation":"

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

" }, "MinCapacity":{ "shape":"ResourceCapacity", @@ -767,7 +767,7 @@ }, "RoleARN":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

Application Auto Scaling creates a service-linked role that grants it permissions to modify the scalable target on your behalf. For more information, see Service-Linked Roles for Application Auto Scaling.

For resources that are not supported using a service-linked role, this parameter is required and must specify the ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf.

" + "documentation":"

Application Auto Scaling creates a service-linked role that grants it permissions to modify the scalable target on your behalf. For more information, see Service-Linked Roles for Application Auto Scaling.

For resources that are not supported using a service-linked role, this parameter is required and must specify the ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf.

" } } }, @@ -808,7 +808,8 @@ "dynamodb:index:ReadCapacityUnits", "dynamodb:index:WriteCapacityUnits", "rds:cluster:ReadReplicaCount", - "sagemaker:variant:DesiredInstanceCount" + "sagemaker:variant:DesiredInstanceCount", + "custom-resource:ResourceType:Property" ] }, "ScalableTarget":{ @@ -825,15 +826,15 @@ "members":{ "ServiceNamespace":{ "shape":"ServiceNamespace", - "documentation":"

The namespace of the AWS service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" + "documentation":"

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

" + "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

" + "documentation":"

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

" }, "MinCapacity":{ "shape":"ResourceCapacity", @@ -895,15 +896,15 @@ }, "ServiceNamespace":{ "shape":"ServiceNamespace", - "documentation":"

The namespace of the AWS service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" + "documentation":"

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier.

" + "documentation":"

The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

" }, "Description":{ "shape":"XmlString", @@ -974,15 +975,15 @@ }, "ServiceNamespace":{ "shape":"ServiceNamespace", - "documentation":"

The namespace of the AWS service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" + "documentation":"

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

" + "documentation":"

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

" }, "PolicyType":{ "shape":"PolicyType", @@ -1028,19 +1029,19 @@ }, "ServiceNamespace":{ "shape":"ServiceNamespace", - "documentation":"

The namespace of the AWS service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" + "documentation":"

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" }, "Schedule":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The schedule for this action. The following formats are supported:

At expressions are useful for one-time schedules. Specify the time, in UTC.

For rate expressions, value is a positive integer and unit is minute | minutes | hour | hours | day | days.

For more information about cron expressions, see Cron.

" + "documentation":"

The schedule for this action. The following formats are supported:

At expressions are useful for one-time schedules. Specify the time, in UTC.

For rate expressions, value is a positive integer and unit is minute | minutes | hour | hours | day | days.

For more information about cron expressions, see Cron Expressions in the Amazon CloudWatch Events User Guide.

" }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

" + "documentation":"

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

" }, "StartTime":{ "shape":"TimestampType", @@ -1080,7 +1081,8 @@ "appstream", "dynamodb", "rds", - "sagemaker" + "sagemaker", + "custom-resource" ] }, "StepAdjustment":{ @@ -1177,5 +1179,5 @@ "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" } }, - "documentation":"

With Application Auto Scaling, you can configure automatic scaling for your scalable AWS resources. You can use Application Auto Scaling to accomplish the following tasks:

Application Auto Scaling can scale the following AWS resources:

To configure automatic scaling for multiple resources across multiple services, use AWS Auto Scaling to create a scaling plan for your application. For more information, see AWS Auto Scaling.

For a list of supported regions, see AWS Regions and Endpoints: Application Auto Scaling in the AWS General Reference.

" + "documentation":"

With Application Auto Scaling, you can configure automatic scaling for your scalable resources. You can use Application Auto Scaling to accomplish the following tasks:

Application Auto Scaling can scale the following resources:

To learn more about Application Auto Scaling, see the Application Auto Scaling User Guide.

To configure automatic scaling for multiple resources across multiple services, use AWS Auto Scaling to create a scaling plan for your application. For more information, see the AWS Auto Scaling User Guide.

" } diff --git a/botocore/data/appstream/2016-12-01/service-2.json b/botocore/data/appstream/2016-12-01/service-2.json index dbef8ef9..a729e04d 100644 --- a/botocore/data/appstream/2016-12-01/service-2.json +++ b/botocore/data/appstream/2016-12-01/service-2.json @@ -62,7 +62,7 @@ {"shape":"LimitExceededException"}, {"shape":"InvalidAccountStatusException"} ], - "documentation":"

Creates a directory configuration.

" + "documentation":"

Creates a Directory Config object in AppStream 2.0. This object includes the information required to join streaming instances to an Active Directory domain.

" }, "CreateFleet":{ "name":"CreateFleet", @@ -83,7 +83,7 @@ {"shape":"InvalidParameterCombinationException"}, {"shape":"IncompatibleImageException"} ], - "documentation":"

Creates a fleet.

" + "documentation":"

Creates a fleet. A fleet consists of streaming instances that run a specified image.

" }, "CreateImageBuilder":{ "name":"CreateImageBuilder", @@ -104,7 +104,7 @@ {"shape":"InvalidParameterCombinationException"}, {"shape":"IncompatibleImageException"} ], - "documentation":"

Creates an image builder.

The initial state of the builder is PENDING. When it is ready, the state is RUNNING.

" + "documentation":"

Creates an image builder. An image builder is a virtual machine that is used to create an image.

The initial state of the builder is PENDING. When it is ready, the state is RUNNING.

" }, "CreateImageBuilderStreamingURL":{ "name":"CreateImageBuilderStreamingURL", @@ -137,7 +137,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Creates a stack.

" + "documentation":"

Creates a stack to start streaming applications to users. A stack consists of an associated fleet, user access policies, and storage configurations.

" }, "CreateStreamingURL":{ "name":"CreateStreamingURL", @@ -153,7 +153,7 @@ {"shape":"OperationNotPermittedException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Creates a URL to start a streaming session for the specified user.

" + "documentation":"

Creates a temporary URL to start an AppStream 2.0 streaming session for the specified user. A streaming URL enables application streaming to be tested without user setup.

" }, "DeleteDirectoryConfig":{ "name":"DeleteDirectoryConfig", @@ -167,7 +167,7 @@ {"shape":"ResourceInUseException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Deletes the specified directory configuration.

" + "documentation":"

Deletes the specified Directory Config object from AppStream 2.0. This object includes the information required to join streaming instances to an Active Directory domain.

" }, "DeleteFleet":{ "name":"DeleteFleet", @@ -198,7 +198,7 @@ {"shape":"OperationNotPermittedException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

Deletes the specified image. You cannot delete an image that is currently in use. After you delete an image, you cannot provision new capacity using the image.

" + "documentation":"

Deletes the specified image. You cannot delete an image when it is in use. After you delete an image, you cannot provision new capacity using the image.

" }, "DeleteImageBuilder":{ "name":"DeleteImageBuilder", @@ -228,7 +228,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

Deletes the specified stack. After this operation completes, the environment can no longer be activated and any reservations made for the stack are released.

" + "documentation":"

Deletes the specified stack. After the stack is deleted, the application streaming environment provided by the stack is no longer available to users. Also, any reservations made for application streaming sessions for the stack are released.

" }, "DescribeDirectoryConfigs":{ "name":"DescribeDirectoryConfigs", @@ -241,7 +241,7 @@ "errors":[ {"shape":"ResourceNotFoundException"} ], - "documentation":"

Describes the specified directory configurations. Note that although the response syntax in this topic includes the account password, this password is not returned in the actual response.

" + "documentation":"

Retrieves a list that describes one or more specified Directory Config objects for AppStream 2.0, if the names for these objects are provided. Otherwise, all Directory Config objects in the account are described. These objects include the information required to join streaming instances to an Active Directory domain.

Although the response syntax in this topic includes the account password, this password is not returned in the actual response.

" }, "DescribeFleets":{ "name":"DescribeFleets", @@ -254,7 +254,7 @@ "errors":[ {"shape":"ResourceNotFoundException"} ], - "documentation":"

Describes the specified fleets or all fleets in the account.

" + "documentation":"

Retrieves a list that describes one or more specified fleets, if the fleet names are provided. Otherwise, all fleets in the account are described.

" }, "DescribeImageBuilders":{ "name":"DescribeImageBuilders", @@ -267,7 +267,7 @@ "errors":[ {"shape":"ResourceNotFoundException"} ], - "documentation":"

Describes the specified image builders or all image builders in the account.

" + "documentation":"

Retrieves a list that describes one or more specified image builders, if the image builder names are provided. Otherwise, all image builders in the account are described.

" }, "DescribeImages":{ "name":"DescribeImages", @@ -280,7 +280,7 @@ "errors":[ {"shape":"ResourceNotFoundException"} ], - "documentation":"

Describes the specified images or all images in the account.

" + "documentation":"

Retrieves a list that describes one or more specified images, if the image names are provided. Otherwise, all images in the account are described.

" }, "DescribeSessions":{ "name":"DescribeSessions", @@ -293,7 +293,7 @@ "errors":[ {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Describes the streaming sessions for the specified stack and fleet. If a user ID is provided, only the streaming sessions for only that user are returned. If an authentication type is not provided, the default is to authenticate users using a streaming URL.

" + "documentation":"

Retrieves a list that describes the streaming sessions for a specified stack and fleet. If a user ID is provided for the stack and fleet, only streaming sessions for that user are described. If an authentication type is not provided, the default is to authenticate users using a streaming URL.

" }, "DescribeStacks":{ "name":"DescribeStacks", @@ -306,7 +306,7 @@ "errors":[ {"shape":"ResourceNotFoundException"} ], - "documentation":"

Describes the specified stacks or all stacks in the account.

" + "documentation":"

Retrieves a list that describes one or more specified stacks, if the stack names are provided. Otherwise, all stacks in the account are described.

" }, "DisassociateFleet":{ "name":"DisassociateFleet", @@ -331,7 +331,7 @@ }, "input":{"shape":"ExpireSessionRequest"}, "output":{"shape":"ExpireSessionResult"}, - "documentation":"

Stops the specified streaming session.

" + "documentation":"

Immediately stops the specified streaming session.

" }, "ListAssociatedFleets":{ "name":"ListAssociatedFleets", @@ -341,7 +341,7 @@ }, "input":{"shape":"ListAssociatedFleetsRequest"}, "output":{"shape":"ListAssociatedFleetsResult"}, - "documentation":"

Lists the fleets associated with the specified stack.

" + "documentation":"

Retrieves the name of the fleet that is associated with the specified stack.

" }, "ListAssociatedStacks":{ "name":"ListAssociatedStacks", @@ -351,7 +351,7 @@ }, "input":{"shape":"ListAssociatedStacksRequest"}, "output":{"shape":"ListAssociatedStacksResult"}, - "documentation":"

Lists the stacks associated with the specified fleet.

" + "documentation":"

Retrieves the name of the stack with which the specified fleet is associated.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -364,7 +364,7 @@ "errors":[ {"shape":"ResourceNotFoundException"} ], - "documentation":"

Lists the tags for the specified AppStream 2.0 resource. You can tag AppStream 2.0 image builders, images, fleets, and stacks.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide.

" + "documentation":"

Retrieves a list of all tags for the specified AppStream 2.0 resource. You can tag AppStream 2.0 image builders, images, fleets, and stacks.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide.

" }, "StartFleet":{ "name":"StartFleet", @@ -455,7 +455,7 @@ "errors":[ {"shape":"ResourceNotFoundException"} ], - "documentation":"

Disassociates the specified tags from the specified AppStream 2.0 resource.

To list the current tags for your resources, use ListTagsForResource.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide.

" + "documentation":"

Disassociates one or more specified tags from the specified AppStream 2.0 resource.

To list the current tags for your resources, use ListTagsForResource.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Developer Guide.

" }, "UpdateDirectoryConfig":{ "name":"UpdateDirectoryConfig", @@ -470,7 +470,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

Updates the specified directory configuration.

" + "documentation":"

Updates the specified Directory Config object in AppStream 2.0. This object includes the information required to join streaming instances to an Active Directory domain.

" }, "UpdateFleet":{ "name":"UpdateFleet", @@ -509,9 +509,10 @@ {"shape":"InvalidParameterCombinationException"}, {"shape":"LimitExceededException"}, {"shape":"InvalidAccountStatusException"}, - {"shape":"IncompatibleImageException"} + {"shape":"IncompatibleImageException"}, + {"shape":"OperationNotPermittedException"} ], - "documentation":"

Updates the specified stack.

" + "documentation":"

Updates the specified fields for the specified stack.

" } }, "shapes":{ @@ -526,6 +527,16 @@ "min":1, "sensitive":true }, + "Action":{ + "type":"string", + "enum":[ + "CLIPBOARD_COPY_FROM_LOCAL_DEVICE", + "CLIPBOARD_COPY_TO_LOCAL_DEVICE", + "FILE_UPLOAD", + "FILE_DOWNLOAD", + "PRINTING_TO_LOCAL_DEVICE" + ] + }, "Application":{ "type":"structure", "members":{ @@ -868,7 +879,7 @@ "required":["Name"], "members":{ "Name":{ - "shape":"String", + "shape":"Name", "documentation":"

The name of the stack.

" }, "Description":{ @@ -890,6 +901,10 @@ "FeedbackURL":{ "shape":"FeedbackURL", "documentation":"

The URL that users are redirected to after they click the Send Feedback link. If no URL is specified, no Send Feedback link is displayed.

" + }, + "UserSettings":{ + "shape":"UserSettingList", + "documentation":"

The actions that are enabled or disabled for users during their streaming sessions. By default, these actions are enabled.

" } } }, @@ -1118,12 +1133,26 @@ } } }, + "DescribeImagesMaxResults":{ + "type":"integer", + "box":true, + "max":25, + "min":0 + }, "DescribeImagesRequest":{ "type":"structure", "members":{ "Names":{ "shape":"StringList", "documentation":"

The names of the images to describe.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The pagination token to use to retrieve the next page of results. If this value is empty, only the first page is retrieved.

" + }, + "MaxResults":{ + "shape":"DescribeImagesMaxResults", + "documentation":"

The maximum size of each results page.

" } } }, @@ -1133,6 +1162,10 @@ "Images":{ "shape":"ImageList", "documentation":"

Information about the images.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The pagination token used to retrieve the next page of results. If this value is empty, only the first page is retrieved.

" } } }, @@ -1270,6 +1303,11 @@ "type":"string", "max":100 }, + "Domain":{ + "type":"string", + "documentation":"GSuite domain for GDrive integration.", + "max":64 + }, "DomainJoinInfo":{ "type":"structure", "members":{ @@ -1284,6 +1322,11 @@ }, "documentation":"

Contains the information needed to join a Microsoft Active Directory domain.

" }, + "DomainList":{ + "type":"list", + "member":{"shape":"Domain"}, + "max":10 + }, "ErrorMessage":{ "type":"string", "documentation":"

The error message in the exception.

" @@ -1734,7 +1777,7 @@ "members":{ "Names":{ "shape":"StringList", - "documentation":"

The names of the fleets.

" + "documentation":"

The name of the fleet.

" }, "NextToken":{ "shape":"String", @@ -1761,7 +1804,7 @@ "members":{ "Names":{ "shape":"StringList", - "documentation":"

The names of the stacks.

" + "documentation":"

The name of the stack.

" }, "NextToken":{ "shape":"String", @@ -1798,6 +1841,20 @@ "type":"string", "pattern":"^[a-zA-Z0-9][a-zA-Z0-9_.-]{0,100}$" }, + "NetworkAccessConfiguration":{ + "type":"structure", + "members":{ + "EniPrivateIpAddress":{ + "shape":"String", + "documentation":"

The private IP address of the elastic network interface that is attached to instances in your VPC.

" + }, + "EniId":{ + "shape":"String", + "documentation":"

The resource identifier of the elastic network interface that is attached to instances in your VPC. All network interfaces have the eni-xxxxxxxx resource identifier.

" + } + }, + "documentation":"

The network details of the fleet instance for the streaming session.

" + }, "OperationNotPermittedException":{ "type":"structure", "members":{ @@ -1814,6 +1871,13 @@ "type":"list", "member":{"shape":"OrganizationalUnitDistinguishedName"} }, + "Permission":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "PlatformType":{ "type":"string", "enum":["WINDOWS"] @@ -1943,6 +2007,10 @@ "AuthenticationType":{ "shape":"AuthenticationType", "documentation":"

The authentication method. The user is authenticated using a streaming URL (API) or SAML federation (SAML).

" + }, + "NetworkAccessConfiguration":{ + "shape":"NetworkAccessConfiguration", + "documentation":"

The network details for the streaming session.

" } }, "documentation":"

Describes a streaming session.

" @@ -2000,6 +2068,10 @@ "StackErrors":{ "shape":"StackErrors", "documentation":"

The errors for the stack.

" + }, + "UserSettings":{ + "shape":"UserSettingList", + "documentation":"

The actions that are enabled or disabled for users during their streaming sessions. By default these actions are enabled.

" } }, "documentation":"

Describes a stack.

" @@ -2008,9 +2080,13 @@ "type":"string", "enum":[ "STORAGE_CONNECTORS", + "STORAGE_CONNECTOR_HOMEFOLDERS", + "STORAGE_CONNECTOR_GOOGLE_DRIVE", + "STORAGE_CONNECTOR_ONE_DRIVE", "REDIRECT_URL", "FEEDBACK_URL", - "THEME_NAME" + "THEME_NAME", + "USER_SETTINGS" ] }, "StackAttributes":{ @@ -2131,9 +2207,13 @@ "ResourceIdentifier":{ "shape":"ResourceIdentifier", "documentation":"

The ARN of the storage connector.

" + }, + "Domains":{ + "shape":"DomainList", + "documentation":"

The names of the domains for the G Suite account.

" } }, - "documentation":"

Describes a storage connector.

" + "documentation":"

Describes a connector to enable persistent storage for users.

" }, "StorageConnectorList":{ "type":"list", @@ -2143,7 +2223,11 @@ "StorageConnectorType":{ "type":"string", "documentation":"

The type of storage connector.

", - "enum":["HOMEFOLDERS"] + "enum":[ + "HOMEFOLDERS", + "GOOGLE_DRIVE", + "ONE_DRIVE" + ] }, "StreamingUrlUserId":{ "type":"string", @@ -2240,7 +2324,7 @@ "members":{ "DirectoryName":{ "shape":"DirectoryName", - "documentation":"

The name of the directory configuration.

" + "documentation":"

The name of the Directory Config object.

" }, "OrganizationalUnitDistinguishedNames":{ "shape":"OrganizationalUnitDistinguishedNamesList", @@ -2257,7 +2341,7 @@ "members":{ "DirectoryConfig":{ "shape":"DirectoryConfig", - "documentation":"

Information about the directory configuration.

" + "documentation":"

Information about the Directory Config object.

" } } }, @@ -2365,6 +2449,10 @@ "AttributesToDelete":{ "shape":"StackAttributes", "documentation":"

The stack attributes to delete.

" + }, + "UserSettings":{ + "shape":"UserSettingList", + "documentation":"

The actions that are enabled or disabled for users during their streaming sessions. By default, these actions are enabled.

" } } }, @@ -2382,6 +2470,29 @@ "max":32, "min":2 }, + "UserSetting":{ + "type":"structure", + "required":[ + "Action", + "Permission" + ], + "members":{ + "Action":{ + "shape":"Action", + "documentation":"

The action that is enabled or disabled.

" + }, + "Permission":{ + "shape":"Permission", + "documentation":"

Indicates whether the action is enabled or disabled.

" + } + }, + "documentation":"

Describes an action and whether the action is enabled or disabled for users during their streaming sessions.

" + }, + "UserSettingList":{ + "type":"list", + "member":{"shape":"UserSetting"}, + "min":1 + }, "VisibilityType":{ "type":"string", "enum":[ diff --git a/botocore/data/appsync/2017-07-25/service-2.json b/botocore/data/appsync/2017-07-25/service-2.json index 992cbb3b..db339a28 100644 --- a/botocore/data/appsync/2017-07-25/service-2.json +++ b/botocore/data/appsync/2017-07-25/service-2.json @@ -7,6 +7,7 @@ "protocol":"rest-json", "serviceAbbreviation":"AWSAppSync", "serviceFullName":"AWS AppSync", + "serviceId":"AppSync", "signatureVersion":"v4", "signingName":"appsync", "uid":"appsync-2017-07-25" diff --git a/botocore/data/athena/2017-05-18/service-2.json b/botocore/data/athena/2017-05-18/service-2.json index c2d47896..c2c5e79e 100644 --- a/botocore/data/athena/2017-05-18/service-2.json +++ b/botocore/data/athena/2017-05-18/service-2.json @@ -6,6 +6,7 @@ "jsonVersion":"1.1", "protocol":"json", "serviceFullName":"Amazon Athena", + "serviceId":"Athena", "signatureVersion":"v4", "targetPrefix":"AmazonAthena", "uid":"athena-2017-05-18" diff --git a/botocore/data/autoscaling/2011-01-01/service-2.json b/botocore/data/autoscaling/2011-01-01/service-2.json index 74ee2691..936ab811 100644 --- a/botocore/data/autoscaling/2011-01-01/service-2.json +++ b/botocore/data/autoscaling/2011-01-01/service-2.json @@ -5,6 +5,7 @@ "endpointPrefix":"autoscaling", "protocol":"query", "serviceFullName":"Auto Scaling", + "serviceId":"Auto Scaling", "signatureVersion":"v4", "uid":"autoscaling-2011-01-01", "xmlNamespace":"http://autoscaling.amazonaws.com/doc/2011-01-01/" diff --git a/botocore/data/budgets/2016-10-20/service-2.json b/botocore/data/budgets/2016-10-20/service-2.json index e84726d9..67723dc8 100644 --- a/botocore/data/budgets/2016-10-20/service-2.json +++ b/botocore/data/budgets/2016-10-20/service-2.json @@ -7,6 +7,7 @@ "protocol":"json", "serviceAbbreviation":"AWSBudgets", "serviceFullName":"AWS Budgets", + "serviceId":"Budgets", "signatureVersion":"v4", "targetPrefix":"AWSBudgetServiceGateway", "uid":"budgets-2016-10-20" @@ -75,7 +76,7 @@ {"shape":"InvalidParameterException"}, {"shape":"NotFoundException"} ], - "documentation":"

Deletes a budget. You can delete your budget at any time.

Deleting a budget also deletes the notifications and subscribers that are associated with that budget.

" + "documentation":"

Deletes a budget. You can delete your budget at any time.

Deleting a budget also deletes the notifications and subscribers associated with that budget.

" }, "DeleteNotification":{ "name":"DeleteNotification", @@ -90,7 +91,7 @@ {"shape":"InternalErrorException"}, {"shape":"NotFoundException"} ], - "documentation":"

Deletes a notification.

Deleting a notification also deletes the subscribers that are associated with the notification.

" + "documentation":"

Deletes a notification.

Deleting a notification also deletes the subscribers associated with the notification.

" }, "DeleteSubscriber":{ "name":"DeleteSubscriber", @@ -105,7 +106,7 @@ {"shape":"InvalidParameterException"}, {"shape":"NotFoundException"} ], - "documentation":"

Deletes a subscriber.

Deleting the last subscriber to a notification also deletes the notification.

" + "documentation":"

Deletes a subscriber.

Deleting the last subscriber to a notification also deletes the notification.

" }, "DescribeBudget":{ "name":"DescribeBudget", @@ -137,7 +138,7 @@ {"shape":"InvalidNextTokenException"}, {"shape":"ExpiredNextTokenException"} ], - "documentation":"

Lists the budgets that are associated with an account.

" + "documentation":"

Lists the budgets associated with an account.

" }, "DescribeNotificationsForBudget":{ "name":"DescribeNotificationsForBudget", @@ -154,7 +155,7 @@ {"shape":"InvalidNextTokenException"}, {"shape":"ExpiredNextTokenException"} ], - "documentation":"

Lists the notifications that are associated with a budget.

" + "documentation":"

Lists the notifications associated with a budget.

" }, "DescribeSubscribersForNotification":{ "name":"DescribeSubscribersForNotification", @@ -171,7 +172,7 @@ {"shape":"InvalidNextTokenException"}, {"shape":"ExpiredNextTokenException"} ], - "documentation":"

Lists the subscribers that are associated with a notification.

" + "documentation":"

Lists the subscribers associated with a notification.

" }, "UpdateBudget":{ "name":"UpdateBudget", @@ -186,7 +187,7 @@ {"shape":"InvalidParameterException"}, {"shape":"NotFoundException"} ], - "documentation":"

Updates a budget. You can change every part of a budget except for the budgetName and the calculatedSpend. When you modify a budget, the calculatedSpend drops to zero until AWS has new usage data to use for forecasting.

" + "documentation":"

Updates a budget. You can change every part of a budget except for the budgetName and the calculatedSpend. When a budget is modified, the calculatedSpend drops to zero until AWS has new usage data to use for forecasting.

" }, "UpdateNotification":{ "name":"UpdateNotification", @@ -224,7 +225,7 @@ "shapes":{ "AccountId":{ "type":"string", - "documentation":"

The account ID of the user. It should be a 12-digit number.

", + "documentation":"

The account ID of the customer. It should be a 12 digit number.

", "max":12, "min":12 }, @@ -238,48 +239,48 @@ "members":{ "BudgetName":{ "shape":"BudgetName", - "documentation":"

The name of a budget. The name must be unique within accounts. The : and \\ characters are not allowed in BudgetName.

" + "documentation":"

The name of a budget. Unique within accounts. : and \\ characters are not allowed in the BudgetName.

" }, "BudgetLimit":{ "shape":"Spend", - "documentation":"

The total amount of cost, usage, RI utilization, or RI coverage that you want to track with your budget.

BudgetLimit is required for cost or usage budgets, but optional for RI utilization or coverage budgets. RI utilization or coverage budgets default to 100, which is the only valid value for RI utilization or coverage budgets.

" + "documentation":"

The total amount of cost, usage, or RI utilization that you want to track with your budget.

BudgetLimit is required for cost or usage budgets, but optional for RI utilization budgets. RI utilization budgets default to the only valid value for RI utilization budgets, which is 100.

" }, "CostFilters":{ "shape":"CostFilters", - "documentation":"

The cost filters, such as service or region, that are applied to a budget.

" + "documentation":"

The cost filters applied to a budget, such as service or region.

" }, "CostTypes":{ "shape":"CostTypes", - "documentation":"

The types of costs that are included in this COST budget.

USAGE, RI_UTILIZATION, and RI_COVERAGE budgets do not have CostTypes.

" + "documentation":"

The types of costs included in this budget.

" }, "TimeUnit":{ "shape":"TimeUnit", - "documentation":"

The length of time until a budget resets the actual and forecasted spend. DAILY is available only for RI_UTILIZATION and RI_COVERAGE budgets.

" + "documentation":"

The length of time until a budget resets the actual and forecasted spend.

" }, "TimePeriod":{ "shape":"TimePeriod", - "documentation":"

The period of time that is covered by a budget. The period has a start date and an end date. The start date must come before the end date. The end date must come before 06/15/87 00:00 UTC.

If you create your budget and don't specify a start date, AWS defaults to the start of your chosen time period (DAILY, MONTHLY, QUARTERLY, or ANNUALLY). For example, if you created your budget on January 24, 2018, chose DAILY, and didn't set a start date, AWS set your start date to 01/24/18 00:00 UTC. If you chose MONTHLY, AWS set your start date to 01/01/18 00:00 UTC. If you didn't specify an end date, AWS set your end date to 06/15/87 00:00 UTC. The defaults are the same for the AWS Billing and Cost Management console and the API.

You can change either date with the UpdateBudget operation.

After the end date, AWS deletes the budget and all associated notifications and subscribers.

" + "documentation":"

The period of time covered by a budget. Has a start date and an end date. The start date must come before the end date. There are no restrictions on the end date.

If you created your budget and didn't specify a start date, AWS defaults to the start of your chosen time period (i.e. DAILY, MONTHLY, QUARTERLY, ANNUALLY). For example, if you created your budget on January 24th 2018, chose DAILY, and didn't set a start date, AWS set your start date to 01/24/18 00:00 UTC. If you chose MONTHLY, AWS set your start date to 01/01/18 00:00 UTC. If you didn't specify an end date, AWS set your end date to 06/15/87 00:00 UTC. The defaults are the same for the AWS Billing and Cost Management console and the API.

You can change either date with the UpdateBudget operation.

After the end date, AWS deletes the budget and all associated notifications and subscribers.

" }, "CalculatedSpend":{ "shape":"CalculatedSpend", - "documentation":"

The actual and forecasted cost or usage that the budget tracks.

" + "documentation":"

The actual and forecasted cost or usage being tracked by a budget.

" }, "BudgetType":{ "shape":"BudgetType", - "documentation":"

Whether this budget tracks monetary costs, usage, RI utilization, or RI coverage.

" + "documentation":"

Whether this budget tracks monetary costs, usage, or RI utilization.

" } }, - "documentation":"

Represents the output of the CreateBudget operation. The content consists of the detailed metadata and data file information, and the current status of the budget object.

This is the ARN pattern for a budget:

arn:aws:budgetservice::AccountId:budget/budgetName

" + "documentation":"

Represents the output of the CreateBudget operation. The content consists of the detailed metadata and data file information, and the current status of the budget.

The ARN pattern for a budget is: arn:aws:budgetservice::AccountId:budget/budgetName

" }, "BudgetName":{ "type":"string", - "documentation":"

A string that represents the budget name. The \":\" and \"\\\" characters are not allowed.

", + "documentation":"

A string represents the budget name. No \":\" and \"\\\" character is allowed.

", "max":100, "pattern":"[^:\\\\]+" }, "BudgetType":{ "type":"string", - "documentation":"

The type of a budget. It must be one of the following types:

COST, USAGE, RI_UTILIZATION, or RI_COVERAGE.

", + "documentation":"

The type of a budget. It should be COST, USAGE, or RI_UTILIZATION.

", "enum":[ "USAGE", "COST", @@ -290,7 +291,7 @@ "Budgets":{ "type":"list", "member":{"shape":"Budget"}, - "documentation":"

A list of budgets.

" + "documentation":"

A list of budgets

" }, "CalculatedSpend":{ "type":"structure", @@ -305,11 +306,11 @@ "documentation":"

The amount of cost, usage, or RI units that you are forecasted to use.

" } }, - "documentation":"

The spend objects that are associated with this budget. The actualSpend tracks how much you've used, cost, usage, or RI units, and the forecastedSpend tracks how much you are predicted to spend if your current usage remains steady.

For example, if it is the 20th of the month and you have spent 50 dollars on Amazon EC2, your actualSpend is 50 USD, and your forecastedSpend is 75 USD.

" + "documentation":"

The spend objects associated with this budget. The actualSpend tracks how much you've used, cost, usage, or RI units, and the forecastedSpend tracks how much you are predicted to spend if your current usage remains steady.

For example, if it is the 20th of the month and you have spent 50 dollars on Amazon EC2, your actualSpend is 50 USD, and your forecastedSpend is 75 USD.

" }, "ComparisonOperator":{ "type":"string", - "documentation":"

The comparison operator of a notification. Currently the service supports the following operators:

GREATER_THAN, LESS_THAN, EQUAL_TO

", + "documentation":"

The comparison operator of a notification. Currently we support less than, equal to and greater than.

", "enum":[ "GREATER_THAN", "LESS_THAN", @@ -320,7 +321,7 @@ "type":"map", "key":{"shape":"GenericString"}, "value":{"shape":"DimensionValues"}, - "documentation":"

A map that represents the cost filters that are applied to the budget.

" + "documentation":"

A map that represents the cost filters applied to the budget.

" }, "CostTypes":{ "type":"structure", @@ -335,7 +336,7 @@ }, "UseBlended":{ "shape":"NullableBoolean", - "documentation":"

Specifies whether a budget uses a blended rate.

The default value is false.

" + "documentation":"

Specifies whether a budget uses blended rate.

The default value is false.

" }, "IncludeRefund":{ "shape":"NullableBoolean", @@ -370,7 +371,7 @@ "documentation":"

Specifies whether a budget uses the amortized rate.

The default value is false.

" } }, - "documentation":"

The types of cost that are included in a COST budget, such as tax and subscriptions.

USAGE, RI_UTILIZATION, and RI_COVERAGE budgets do not have CostTypes.

" + "documentation":"

The types of cost included in a budget, such as tax and subscriptions.

" }, "CreateBudgetRequest":{ "type":"structure", @@ -389,7 +390,7 @@ }, "NotificationsWithSubscribers":{ "shape":"NotificationWithSubscribersList", - "documentation":"

A notification that you want to associate with a budget. A budget can have up to five notifications, and each notification can have one SNS subscriber and up to 10 email subscribers. If you include notifications and subscribers in your CreateBudget call, AWS creates the notifications and subscribers for you.

" + "documentation":"

A notification that you want to associate with a budget. A budget can have up to five notifications, and each notification can have one SNS subscriber and up to ten email subscribers. If you include notifications and subscribers in your CreateBudget call, AWS creates the notifications and subscribers for you.

" } }, "documentation":"

Request of CreateBudget

" @@ -415,7 +416,7 @@ }, "BudgetName":{ "shape":"BudgetName", - "documentation":"

The name of the budget that you want AWS to notify you about. Budget names must be unique within an account.

" + "documentation":"

The name of the budget that you want AWS to notified you about. Budget names must be unique within an account.

" }, "Notification":{ "shape":"Notification", @@ -423,7 +424,7 @@ }, "Subscribers":{ "shape":"Subscribers", - "documentation":"

A list of subscribers that you want to associate with the notification. Each notification can have one SNS subscriber and up to 10 email subscribers.

" + "documentation":"

A list of subscribers that you want to associate with the notification. Each notification can have one SNS subscriber and up to ten email subscribers.

" } }, "documentation":"

Request of CreateNotification

" @@ -445,7 +446,7 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

The accountId that is associated with the budget that you want to create a subscriber for.

" + "documentation":"

The accountId associated with the budget that you want to create a subscriber for.

" }, "BudgetName":{ "shape":"BudgetName", @@ -601,11 +602,11 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

Optional integer. Specifies the maximum number of results to return in a response.

" + "documentation":"

Optional integer. Specifies the maximum number of results to return in response.

" }, "NextToken":{ "shape":"GenericString", - "documentation":"

The pagination token that you include in your request to indicate the next set of results that you want to retrieve.

" + "documentation":"

The pagination token that indicates the next set of results to retrieve.

" } }, "documentation":"

Request of DescribeBudgets

" @@ -619,7 +620,7 @@ }, "NextToken":{ "shape":"GenericString", - "documentation":"

The pagination token in the service response that indicates the next set of results that you can retrieve.

" + "documentation":"

The pagination token that indicates the next set of results that you can retrieve.

" } }, "documentation":"

Response of DescribeBudgets

" @@ -641,11 +642,11 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

Optional integer. Specifies the maximum number of results to return in a response.

" + "documentation":"

Optional integer. Specifies the maximum number of results to return in response.

" }, "NextToken":{ "shape":"GenericString", - "documentation":"

The pagination token that you include in your request to indicate the next set of results that you want to retrieve.

" + "documentation":"

The pagination token that indicates the next set of results to retrieve.

" } }, "documentation":"

Request of DescribeNotificationsForBudget

" @@ -655,11 +656,11 @@ "members":{ "Notifications":{ "shape":"Notifications", - "documentation":"

A list of notifications that are associated with a budget.

" + "documentation":"

A list of notifications associated with a budget.

" }, "NextToken":{ "shape":"GenericString", - "documentation":"

The pagination token in the service response that indicates the next set of results that you can retrieve.

" + "documentation":"

The pagination token that indicates the next set of results that you can retrieve.

" } }, "documentation":"

Response of GetNotificationsForBudget

" @@ -686,11 +687,11 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

Optional integer. Specifies the maximum number of results to return in a response.

" + "documentation":"

Optional integer. Specifies the maximum number of results to return in response.

" }, "NextToken":{ "shape":"GenericString", - "documentation":"

The pagination token that you include in your request to indicate the next set of results that you want to retrieve.

" + "documentation":"

The pagination token that indicates the next set of results to retrieve.

" } }, "documentation":"

Request of DescribeSubscribersForNotification

" @@ -700,11 +701,11 @@ "members":{ "Subscribers":{ "shape":"Subscribers", - "documentation":"

A list of subscribers that are associated with a notification.

" + "documentation":"

A list of subscribers associated with a notification.

" }, "NextToken":{ "shape":"GenericString", - "documentation":"

The pagination token in the service response that indicates the next set of results that you can retrieve.

" + "documentation":"

The pagination token that indicates the next set of results that you can retrieve.

" } }, "documentation":"

Response of DescribeSubscribersForNotification

" @@ -731,11 +732,11 @@ }, "GenericString":{ "type":"string", - "documentation":"

A generic string.

" + "documentation":"

A generic String.

" }, "GenericTimestamp":{ "type":"timestamp", - "documentation":"

A generic time stamp. In Java, it is transformed to a Date object.

" + "documentation":"

A generic timestamp. In Java it is transformed to a Date object.

" }, "InternalErrorException":{ "type":"structure", @@ -763,7 +764,7 @@ }, "MaxResults":{ "type":"integer", - "documentation":"

An integer that represents how many entries a paginated response contains. The maximum is 100.

", + "documentation":"

An integer to represent how many entries a paginated response contains. Maximum is set to 100.

", "box":true, "max":100, "min":1 @@ -790,28 +791,28 @@ }, "ComparisonOperator":{ "shape":"ComparisonOperator", - "documentation":"

The comparison that is used for this notification.

" + "documentation":"

The comparison used for this notification.

" }, "Threshold":{ "shape":"NotificationThreshold", - "documentation":"

The threshold that is associated with a notification. Thresholds are always a percentage.

" + "documentation":"

The threshold associated with a notification. Thresholds are always a percentage.

" }, "ThresholdType":{ "shape":"ThresholdType", - "documentation":"

The type of threshold for a notification. For ACTUAL thresholds, AWS notifies you when you go over the threshold. For FORECASTED thresholds, AWS notifies you when you are forecasted to go over the threshold.

" + "documentation":"

The type of threshold for a notification. For ACTUAL thresholds, AWS notifies you when you go over the threshold, and for FORECASTED thresholds AWS notifies you when you are forecasted to go over the threshold.

" } }, - "documentation":"

A notification that is associated with a budget. A budget can have up to five notifications.

Each notification must have at least one subscriber. A notification can have one SNS subscriber and up to 10 email subscribers, for a total of 11 subscribers.

For example, if you have a budget for 200 dollars and you want to be notified when you go over 160 dollars, create a notification with the following parameters:

" + "documentation":"

A notification associated with a budget. A budget can have up to five notifications.

Each notification must have at least one subscriber. A notification can have one SNS subscriber and up to ten email subscribers, for a total of 11 subscribers.

For example, if you have a budget for 200 dollars and you want to be notified when you go over 160 dollars, create a notification with the following parameters:

" }, "NotificationThreshold":{ "type":"double", - "documentation":"

The threshold of a notification. It must be a number between 0 and 1,000,000,000.

", + "documentation":"

The threshold of a notification. It should be a number between 0 and 1,000,000,000.

", "max":1000000000, "min":0.1 }, "NotificationType":{ "type":"string", - "documentation":"

The type of a notification. It must be ACTUAL or FORECASTED.

", + "documentation":"

The type of a notification. It should be ACTUAL or FORECASTED.

", "enum":[ "ACTUAL", "FORECASTED" @@ -826,19 +827,19 @@ "members":{ "Notification":{ "shape":"Notification", - "documentation":"

The notification that is associated with a budget.

" + "documentation":"

The notification associated with a budget.

" }, "Subscribers":{ "shape":"Subscribers", "documentation":"

A list of subscribers who are subscribed to this notification.

" } }, - "documentation":"

A notification with subscribers. A notification can have one SNS subscriber and up to 10 email subscribers, for a total of 11 subscribers.

" + "documentation":"

A notification with subscribers. A notification can have one SNS subscriber and up to ten email subscribers, for a total of 11 subscribers.

" }, "NotificationWithSubscribersList":{ "type":"list", "member":{"shape":"NotificationWithSubscribers"}, - "documentation":"

A list of notifications, each with a list of subscribers.

", + "documentation":"

A list of Notifications, each with a list of subscribers.

", "max":5 }, "Notifications":{ @@ -852,8 +853,8 @@ }, "NumericValue":{ "type":"string", - "documentation":"

A string that represents a numeric value.

", - "pattern":"[0-9]*(\\.)?[0-9]+" + "documentation":"

A string to represent NumericValue.

", + "pattern":"([0-9]*\\.)?[0-9]+" }, "Spend":{ "type":"structure", @@ -864,14 +865,14 @@ "members":{ "Amount":{ "shape":"NumericValue", - "documentation":"

The cost or usage amount that is associated with a budget forecast, actual spend, or budget threshold.

" + "documentation":"

The cost or usage amount associated with a budget forecast, actual spend, or budget threshold.

" }, "Unit":{ "shape":"UnitValue", - "documentation":"

The unit of measurement that is used for the budget forecast, actual spend, or budget threshold, such as dollars or GB.

" + "documentation":"

The unit of measurement used for the budget forecast, actual spend, or budget threshold, such as dollars or GB.

" } }, - "documentation":"

The amount of cost or usage that is measured for a budget.

For example, a Spend for 3 GB of S3 usage would have the following parameters:

" + "documentation":"

The amount of cost or usage being measured for a budget.

For example, a Spend for 3 GB of S3 usage would have the following parameters:

" }, "Subscriber":{ "type":"structure", @@ -889,11 +890,11 @@ "documentation":"

The address that AWS sends budget notifications to, either an SNS topic or an email.

" } }, - "documentation":"

The subscriber to a budget notification. The subscriber consists of a subscription type and either an Amazon SNS topic or an email address.

For example, an email subscriber would have the following parameters:

" + "documentation":"

The subscriber to a budget notification. The subscriber consists of a subscription type and either an Amazon Simple Notification Service topic or an email address.

For example, an email subscriber would have the following parameters:

" }, "SubscriberAddress":{ "type":"string", - "documentation":"

A string that contains an email address or SNS topic for the subscriber's address.

", + "documentation":"

String containing email or sns topic for the subscriber address.

", "min":1 }, "Subscribers":{ @@ -924,18 +925,18 @@ "members":{ "Start":{ "shape":"GenericTimestamp", - "documentation":"

The start date for a budget. If you created your budget and didn't specify a start date, AWS defaults to the start of your chosen time period (DAILY, MONTHLY, QUARTERLY, or ANNUALLY). For example, if you created your budget on January 24, 2018, chose DAILY, and didn't set a start date, AWS set your start date to 01/24/18 00:00 UTC. If you chose MONTHLY, AWS set your start date to 01/01/18 00:00 UTC. The defaults are the same for the AWS Billing and Cost Management console and the API.

You can change your start date with the UpdateBudget operation.

" + "documentation":"

The start date for a budget. If you created your budget and didn't specify a start date, AWS defaults to the start of your chosen time period (i.e. DAILY, MONTHLY, QUARTERLY, ANNUALLY). For example, if you created your budget on January 24th 2018, chose DAILY, and didn't set a start date, AWS set your start date to 01/24/18 00:00 UTC. If you chose MONTHLY, AWS set your start date to 01/01/18 00:00 UTC. The defaults are the same for the AWS Billing and Cost Management console and the API.

You can change your start date with the UpdateBudget operation.

" }, "End":{ "shape":"GenericTimestamp", "documentation":"

The end date for a budget. If you didn't specify an end date, AWS set your end date to 06/15/87 00:00 UTC. The defaults are the same for the AWS Billing and Cost Management console and the API.

After the end date, AWS deletes the budget and all associated notifications and subscribers. You can change your end date with the UpdateBudget operation.

" } }, - "documentation":"

The period of time that is covered by a budget. The period has a start date and an end date. The start date must come before the end date. There are no restrictions on the end date.

" + "documentation":"

The period of time covered by a budget. Has a start date and an end date. The start date must come before the end date. There are no restrictions on the end date.

" }, "TimeUnit":{ "type":"string", - "documentation":"

The time unit of the budget, such as MONTHLY or QUARTERLY.

", + "documentation":"

The time unit of the budget. e.g. MONTHLY, QUARTERLY, etc.

", "enum":[ "DAILY", "MONTHLY", @@ -945,7 +946,7 @@ }, "UnitValue":{ "type":"string", - "documentation":"

A string that represents the spend unit of a budget. It can't be null or empty.

", + "documentation":"

A string to represent budget spend unit. It should be not null and not empty.

", "min":1 }, "UpdateBudgetRequest":{ @@ -991,7 +992,7 @@ }, "OldNotification":{ "shape":"Notification", - "documentation":"

The previous notification that is associated with a budget.

" + "documentation":"

The previous notification associated with a budget.

" }, "NewNotification":{ "shape":"Notification", @@ -1030,11 +1031,11 @@ }, "OldSubscriber":{ "shape":"Subscriber", - "documentation":"

The previous subscriber that is associated with a budget notification.

" + "documentation":"

The previous subscriber associated with a budget notification.

" }, "NewSubscriber":{ "shape":"Subscriber", - "documentation":"

The updated subscriber that is associated with a budget notification.

" + "documentation":"

The updated subscriber associated with a budget notification.

" } }, "documentation":"

Request of UpdateSubscriber

" @@ -1050,5 +1051,5 @@ "documentation":"

The error message the exception carries.

" } }, - "documentation":"

The AWS Budgets API enables you to use AWS Budgets to plan your service usage, service costs, and instance reservations. The API reference provides descriptions, syntax, and usage examples for each of the actions and data types for AWS Budgets.

Budgets provide you with a way to see the following information:

AWS updates your budget status several times a day. Budgets track your unblended costs, subscriptions, refunds, and RIs. You can create the following types of budgets:

Service Endpoint

The AWS Budgets API provides the following endpoint:

For information about costs that are associated with the AWS Budgets API, see AWS Cost Management Pricing.

" + "documentation":"

Budgets enable you to plan your service usage, service costs, and your RI utilization. You can also track how close your plan is to your budgeted amount or to the free tier limits. Budgets provide you with a quick way to see your usage-to-date and current estimated charges from AWS and to see how much your predicted usage accrues in charges by the end of the month. Budgets also compare current estimates and charges to the amount that you indicated you want to use or spend and lets you see how much of your budget has been used. AWS updates your budget status several times a day. Budgets track your unblended costs, subscriptions, and refunds. You can create the following types of budgets:

You can create up to 20,000 budgets per AWS master account. Your first two budgets are free of charge. Each additional budget costs $0.02 per day. You can set up optional notifications that warn you if you exceed, or are forecasted to exceed, your budgeted amount. You can have notifications sent to an Amazon SNS topic, to an email address, or to both. For more information, see Creating an Amazon SNS Topic for Budget Notifications. AWS Free Tier usage alerts via AWS Budgets are provided for you, and do not count toward your budget limits.

Service Endpoint

The AWS Budgets API provides the following endpoint:

For information about costs associated with the AWS Budgets API, see AWS Cost Management Pricing.

" } diff --git a/botocore/data/ce/2017-10-25/service-2.json b/botocore/data/ce/2017-10-25/service-2.json index 8a5bafe0..9c88df55 100644 --- a/botocore/data/ce/2017-10-25/service-2.json +++ b/botocore/data/ce/2017-10-25/service-2.json @@ -114,8 +114,13 @@ "shapes":{ "AccountScope":{ "type":"string", - "enum":["PAYER"] + "enum":[ + "PAYER", + "LINKED" + ] }, + "AmortizedRecurringFee":{"type":"string"}, + "AmortizedUpfrontFee":{"type":"string"}, "AttributeType":{"type":"string"}, "AttributeValue":{"type":"string"}, "Attributes":{ @@ -351,7 +356,7 @@ "documentation":"

The specific Tag to use for Expression.

" } }, - "documentation":"

Use Expression to filter by cost or by usage. There are two patterns:

" + "documentation":"

Use Expression to filter by cost or by usage. There are two patterns:

" }, "Expressions":{ "type":"list", @@ -376,7 +381,7 @@ }, "Metrics":{ "shape":"MetricNames", - "documentation":"

Which metrics are returned in the query. For more information about blended and unblended rates, see Why does the \"blended\" annotation appear on some line items in my bill?.

Valid values are BlendedCost, UnblendedCost, and UsageQuantity.

If you return the UsageQuantity metric, the service aggregates all usage numbers without taking into account the units. For example, if you aggregate usageQuantity across all of EC2, the results aren't meaningful because EC2 compute hours and data transfer are measured in different units (for example, hours vs. GB). To get more meaningful UsageQuantity metrics, filter by UsageType or UsageTypeGroups.

Metrics is required for GetCostAndUsage requests.

" + "documentation":"

Which metrics are returned in the query. For more information about blended and unblended rates, see Why does the \"blended\" annotation appear on some line items in my bill?.

Valid values are AmortizedCost, BlendedCost, UnblendedCost, and UsageQuantity.

If you return the UsageQuantity metric, the service aggregates all usage numbers without taking into account the units. For example, if you aggregate usageQuantity across all of EC2, the results aren't meaningful because EC2 compute hours and data transfer are measured in different units (for example, hours vs. GB). To get more meaningful UsageQuantity metrics, filter by UsageType or UsageTypeGroups.

Metrics is required for GetCostAndUsage requests.

" }, "GroupBy":{ "shape":"GroupDefinitions", @@ -478,7 +483,7 @@ }, "Filter":{ "shape":"Expression", - "documentation":"

Filters utilization data by dimensions. You can filter by the following dimensions:

GetReservationCoverage uses the same Expression object as the other operations, but only AND is supported among each dimension. You can nest only one level deep. If there are multiple values for a dimension, they are OR'd together.

" + "documentation":"

Filters utilization data by dimensions. You can filter by the following dimensions:

GetReservationCoverage uses the same Expression object as the other operations, but only AND is supported among each dimension. You can nest only one level deep. If there are multiple values for a dimension, they are OR'd together.

If you don't provide a SERVICE filter, Cost Explorer defaults to EC2.

" }, "NextPageToken":{ "shape":"NextPageToken", @@ -519,7 +524,7 @@ }, "AccountScope":{ "shape":"AccountScope", - "documentation":"

The account scope that you want recommendations for. The only valid value is Payer. This means that AWS includes the master account and any member accounts when it calculates its recommendations.

" + "documentation":"

The account scope that you want recommendations for. PAYER means that AWS includes the master account and any member accounts when it calculates its recommendations. LINKED means that AWS includes only member accounts when it calculates its recommendations.

Valid values are PAYER and LINKED.

" }, "LookbackPeriodInDays":{ "shape":"LookbackPeriodInDays", @@ -776,6 +781,7 @@ "key":{"shape":"MetricName"}, "value":{"shape":"MetricValue"} }, + "NetRISavings":{"type":"string"}, "NextPageToken":{"type":"string"}, "NonNegativeInteger":{ "type":"integer", @@ -788,6 +794,7 @@ "CONVERTIBLE" ] }, + "OnDemandCostOfRIHoursUsed":{"type":"string"}, "OnDemandHours":{"type":"string"}, "PageSize":{"type":"integer"}, "PaymentOption":{ @@ -818,9 +825,13 @@ "shape":"GenericString", "documentation":"

The database engine that the recommended reservation supports.

" }, + "DatabaseEdition":{ + "shape":"GenericString", + "documentation":"

The database edition that the recommended reservation supports.

" + }, "DeploymentOption":{ "shape":"GenericString", - "documentation":"

Whether the recommendation is for a reservation in a single availability zone or a reservation with a backup in a second availability zone.

" + "documentation":"

Whether the recommendation is for a reservation in a single Availability Zone or a reservation with a backup in a second Availability Zone.

" }, "LicenseModel":{ "shape":"GenericString", @@ -863,9 +874,33 @@ "UnusedHours":{ "shape":"UnusedHours", "documentation":"

The number of RI hours that you didn't use.

" + }, + "OnDemandCostOfRIHoursUsed":{ + "shape":"OnDemandCostOfRIHoursUsed", + "documentation":"

How much your RIs would cost if charged On-Demand rates.

" + }, + "NetRISavings":{ + "shape":"NetRISavings", + "documentation":"

How much you saved due to purchasing and utilizing RIs. AWS calculates this by subtracting TotalAmortizedFee from OnDemandCostOfRIHoursUsed.

" + }, + "TotalPotentialRISavings":{ + "shape":"TotalPotentialRISavings", + "documentation":"

How much you could save if you use your entire reservation.

" + }, + "AmortizedUpfrontFee":{ + "shape":"AmortizedUpfrontFee", + "documentation":"

The upfront cost of your RI, amortized over the RI period.

" + }, + "AmortizedRecurringFee":{ + "shape":"AmortizedRecurringFee", + "documentation":"

The monthly cost of your RI, amortized over the RI period.

" + }, + "TotalAmortizedFee":{ + "shape":"TotalAmortizedFee", + "documentation":"

The total cost of your RI, amortized over the RI period.

" } }, - "documentation":"

The aggregated numbers for your RI usage.

" + "documentation":"

The aggregated numbers for your Reserved Instance (RI) usage.

" }, "ReservationCoverageGroup":{ "type":"structure", @@ -896,7 +931,7 @@ }, "LookbackPeriodInDays":{ "shape":"LookbackPeriodInDays", - "documentation":"

How many days of previous usage that AWS takes into consideration when making this recommendation.

" + "documentation":"

How many days of previous usage that AWS considers when making this recommendation.

" }, "TermInYears":{ "shape":"TermInYears", @@ -1033,7 +1068,7 @@ "documentation":"

The currency code used for this recommendation.

" } }, - "documentation":"

A summary about this recommendation, such as the currency code, the amount that AWS estimates you could save, and the total amount of reservation to purchase.

" + "documentation":"

A summary about this recommendation, such as the currency code, the amount that AWS estimates that you could save, and the total amount of reservation to purchase.

" }, "ReservationPurchaseRecommendations":{ "type":"list", @@ -1059,7 +1094,7 @@ "documentation":"

How much you used this group of RIs.

" } }, - "documentation":"

A group of RIs that share a set of attributes.

" + "documentation":"

A group of Reserved Instances (RIs) that share a set of attributes.

" }, "ReservationUtilizationGroups":{ "type":"list", @@ -1130,6 +1165,8 @@ ] }, "TotalActualHours":{"type":"string"}, + "TotalAmortizedFee":{"type":"string"}, + "TotalPotentialRISavings":{"type":"string"}, "TotalRunningHours":{"type":"string"}, "UnusedHours":{"type":"string"}, "UtilizationByTime":{ diff --git a/botocore/data/cloud9/2017-09-23/service-2.json b/botocore/data/cloud9/2017-09-23/service-2.json index 8b3deb56..46ef6d69 100644 --- a/botocore/data/cloud9/2017-09-23/service-2.json +++ b/botocore/data/cloud9/2017-09-23/service-2.json @@ -6,6 +6,7 @@ "jsonVersion":"1.1", "protocol":"json", "serviceFullName":"AWS Cloud9", + "serviceId":"Cloud9", "signatureVersion":"v4", "targetPrefix":"AWSCloud9WorkspaceManagementService", "uid":"cloud9-2017-09-23" diff --git a/botocore/data/clouddirectory/2016-05-10/service-2.json b/botocore/data/clouddirectory/2016-05-10/service-2.json index 05de5774..ca413a04 100644 --- a/botocore/data/clouddirectory/2016-05-10/service-2.json +++ b/botocore/data/clouddirectory/2016-05-10/service-2.json @@ -5,6 +5,7 @@ "endpointPrefix":"clouddirectory", "protocol":"rest-json", "serviceFullName":"Amazon CloudDirectory", + "serviceId":"CloudDirectory", "signatureVersion":"v4", "signingName":"clouddirectory", "uid":"clouddirectory-2016-05-10" @@ -624,6 +625,28 @@ ], "documentation":"

Gets details of the Facet, such as facet name, attributes, Rules, or ObjectType. You can call this on all kinds of schema facets -- published, development, or applied.

" }, + "GetLinkAttributes":{ + "name":"GetLinkAttributes", + "http":{ + "method":"POST", + "requestUri":"/amazonclouddirectory/2017-01-11/typedlink/attributes/get", + "responseCode":200 + }, + "input":{"shape":"GetLinkAttributesRequest"}, + "output":{"shape":"GetLinkAttributesResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"FacetValidationException"} + ], + "documentation":"

Retrieves attributes that are associated with a typed link.

" + }, "GetObjectAttributes":{ "name":"GetObjectAttributes", "http":{ @@ -1278,6 +1301,28 @@ ], "documentation":"

Does the following:

  1. Adds new Attributes, Rules, or ObjectTypes.

  2. Updates existing Attributes, Rules, or ObjectTypes.

  3. Deletes existing Attributes, Rules, or ObjectTypes.

" }, + "UpdateLinkAttributes":{ + "name":"UpdateLinkAttributes", + "http":{ + "method":"POST", + "requestUri":"/amazonclouddirectory/2017-01-11/typedlink/attributes/update", + "responseCode":200 + }, + "input":{"shape":"UpdateLinkAttributesRequest"}, + "output":{"shape":"UpdateLinkAttributesResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"FacetValidationException"} + ], + "documentation":"

Updates a given typed link’s attributes. Attributes to be updated must not contribute to the typed link’s identity, as defined by its IdentityAttributeOrder.

" + }, "UpdateObjectAttributes":{ "name":"UpdateObjectAttributes", "http":{ @@ -1296,6 +1341,7 @@ {"shape":"AccessDeniedException"}, {"shape":"DirectoryNotEnabledException"}, {"shape":"ResourceNotFoundException"}, + {"shape":"LinkNameAlreadyInUseException"}, {"shape":"FacetValidationException"} ], "documentation":"

Updates a given object's attributes.

" @@ -2042,6 +2088,34 @@ }, "documentation":"

Represents the output of a DetachTypedLink response operation.

" }, + "BatchGetLinkAttributes":{ + "type":"structure", + "required":[ + "TypedLinkSpecifier", + "AttributeNames" + ], + "members":{ + "TypedLinkSpecifier":{ + "shape":"TypedLinkSpecifier", + "documentation":"

Allows a typed link specifier to be accepted as input.

" + }, + "AttributeNames":{ + "shape":"AttributeNameList", + "documentation":"

A list of attribute names whose values will be retrieved.

" + } + }, + "documentation":"

Retrieves attributes that are associated with a typed link inside a BatchRead operation. For more information, see GetLinkAttributes and BatchReadRequest$Operations.

" + }, + "BatchGetLinkAttributesResponse":{ + "type":"structure", + "members":{ + "Attributes":{ + "shape":"AttributeKeyAndValueList", + "documentation":"

The attributes that are associated with the typed link.

" + } + }, + "documentation":"

Represents the output of a GetLinkAttributes response operation.

" + }, "BatchGetObjectAttributes":{ "type":"structure", "required":[ @@ -2537,6 +2611,10 @@ "ListIncomingTypedLinks":{ "shape":"BatchListIncomingTypedLinks", "documentation":"

Returns a paginated list of all the incoming TypedLinkSpecifier information for an object. It also supports filtering by typed link facet and identity attributes. For more information, see Typed link.

" + }, + "GetLinkAttributes":{ + "shape":"BatchGetLinkAttributes", + "documentation":"

Retrieves attributes that are associated with a typed link.

" } }, "documentation":"

Represents the output of a BatchRead operation.

" @@ -2647,6 +2725,10 @@ "ListIncomingTypedLinks":{ "shape":"BatchListIncomingTypedLinksResponse", "documentation":"

Returns a paginated list of all the incoming TypedLinkSpecifier information for an object. It also supports filtering by typed link facet and identity attributes. For more information, see Typed link.

" + }, + "GetLinkAttributes":{ + "shape":"BatchGetLinkAttributesResponse", + "documentation":"

The list of attributes to retrieve from the typed link.

" } }, "documentation":"

Represents the output of a BatchRead success response operation.

" @@ -2676,6 +2758,30 @@ }, "documentation":"

An empty result that represents success.

" }, + "BatchUpdateLinkAttributes":{ + "type":"structure", + "required":[ + "TypedLinkSpecifier", + "AttributeUpdates" + ], + "members":{ + "TypedLinkSpecifier":{ + "shape":"TypedLinkSpecifier", + "documentation":"

Allows a typed link specifier to be accepted as input.

" + }, + "AttributeUpdates":{ + "shape":"LinkAttributeUpdateList", + "documentation":"

The attributes update structure.

" + } + }, + "documentation":"

Updates a given typed link’s attributes inside a BatchRead operation. Attributes to be updated must not contribute to the typed link’s identity, as defined by its IdentityAttributeOrder. For more information, see UpdateLinkAttributes and BatchReadRequest$Operations.

" + }, + "BatchUpdateLinkAttributesResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

Represents the output of a UpdateLinkAttributes response operation.

" + }, "BatchUpdateObjectAttributes":{ "type":"structure", "required":[ @@ -2795,6 +2901,10 @@ "DetachTypedLink":{ "shape":"BatchDetachTypedLink", "documentation":"

Detaches a typed link from a specified source and target object. For more information, see Typed link.

" + }, + "UpdateLinkAttributes":{ + "shape":"BatchUpdateLinkAttributes", + "documentation":"

Updates a given object's attributes.

" } }, "documentation":"

Represents the output of a BatchWrite operation.

" @@ -2861,6 +2971,10 @@ "DetachTypedLink":{ "shape":"BatchDetachTypedLinkResponse", "documentation":"

Detaches a typed link from a specified source and target object. For more information, see Typed link.

" + }, + "UpdateLinkAttributes":{ + "shape":"BatchUpdateLinkAttributesResponse", + "documentation":"

Represents the output of a BatchWrite response operation.

" } }, "documentation":"

Represents the output of a BatchWrite response operation.

" @@ -3697,6 +3811,43 @@ } } }, + "GetLinkAttributesRequest":{ + "type":"structure", + "required":[ + "DirectoryArn", + "TypedLinkSpecifier", + "AttributeNames" + ], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that is associated with the Directory where the typed link resides. For more information, see arns or Typed link.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "TypedLinkSpecifier":{ + "shape":"TypedLinkSpecifier", + "documentation":"

Allows a typed link specifier to be accepted as input.

" + }, + "AttributeNames":{ + "shape":"AttributeNameList", + "documentation":"

A list of attribute names whose values will be retrieved.

" + }, + "ConsistencyLevel":{ + "shape":"ConsistencyLevel", + "documentation":"

The consistency level at which to retrieve the attributes on a typed link.

" + } + } + }, + "GetLinkAttributesResponse":{ + "type":"structure", + "members":{ + "Attributes":{ + "shape":"AttributeKeyAndValueList", + "documentation":"

The attributes that are associated with the typed link.

" + } + } + }, "GetObjectAttributesRequest":{ "type":"structure", "required":[ @@ -3949,6 +4100,38 @@ "error":{"httpStatusCode":400}, "exception":true }, + "LinkAttributeAction":{ + "type":"structure", + "members":{ + "AttributeActionType":{ + "shape":"UpdateActionType", + "documentation":"

A type that can be either UPDATE_OR_CREATE or DELETE.

" + }, + "AttributeUpdateValue":{ + "shape":"TypedAttributeValue", + "documentation":"

The value that you want to update to.

" + } + }, + "documentation":"

The action to take on a typed link attribute value. Updates are only supported for attributes which don’t contribute to link identity.

" + }, + "LinkAttributeUpdate":{ + "type":"structure", + "members":{ + "AttributeKey":{ + "shape":"AttributeKey", + "documentation":"

The key of the attribute being updated.

" + }, + "AttributeAction":{ + "shape":"LinkAttributeAction", + "documentation":"

The action to perform as part of the attribute update.

" + } + }, + "documentation":"

Structure that contains attribute update information.

" + }, + "LinkAttributeUpdateList":{ + "type":"list", + "member":{"shape":"LinkAttributeUpdate"} + }, "LinkName":{ "type":"string", "max":64, @@ -5528,6 +5711,35 @@ "members":{ } }, + "UpdateLinkAttributesRequest":{ + "type":"structure", + "required":[ + "DirectoryArn", + "TypedLinkSpecifier", + "AttributeUpdates" + ], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that is associated with the Directory where the updated typed link resides. For more information, see arns or Typed link.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "TypedLinkSpecifier":{ + "shape":"TypedLinkSpecifier", + "documentation":"

Allows a typed link specifier to be accepted as input.

" + }, + "AttributeUpdates":{ + "shape":"LinkAttributeUpdateList", + "documentation":"

The attributes update structure.

" + } + } + }, + "UpdateLinkAttributesResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateObjectAttributesRequest":{ "type":"structure", "required":[ diff --git a/botocore/data/clouddirectory/2017-01-11/paginators-1.json b/botocore/data/clouddirectory/2017-01-11/paginators-1.json new file mode 100644 index 00000000..22cc439e --- /dev/null +++ b/botocore/data/clouddirectory/2017-01-11/paginators-1.json @@ -0,0 +1,100 @@ +{ + "pagination": { + "ListObjectParentPaths": { + "result_key": "PathToObjectIdentifiersList", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListFacetNames": { + "result_key": "FacetNames", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListPublishedSchemaArns": { + "result_key": "SchemaArns", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListDirectories": { + "result_key": "Directories", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListDevelopmentSchemaArns": { + "result_key": "SchemaArns", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListTypedLinkFacetNames": { + "result_key": "FacetNames", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListIndex": { + "result_key": "IndexAttachments", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListFacetAttributes": { + "result_key": "Attributes", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListObjectPolicies": { + "result_key": "AttachedPolicyIds", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListTagsForResource": { + "result_key": "Tags", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListAttachedIndices": { + "result_key": "IndexAttachments", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "LookupPolicy": { + "result_key": "PolicyToPathList", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListPolicyAttachments": { + "result_key": "ObjectIdentifiers", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListObjectAttributes": { + "result_key": "Attributes", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListAppliedSchemaArns": { + "result_key": "SchemaArns", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListTypedLinkFacetAttributes": { + "result_key": "Attributes", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + } + } +} diff --git a/botocore/data/clouddirectory/2017-01-11/service-2.json b/botocore/data/clouddirectory/2017-01-11/service-2.json new file mode 100644 index 00000000..e9d5ee69 --- /dev/null +++ b/botocore/data/clouddirectory/2017-01-11/service-2.json @@ -0,0 +1,5991 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2017-01-11", + "endpointPrefix":"clouddirectory", + "protocol":"rest-json", + "serviceFullName":"Amazon CloudDirectory", + "serviceId":"CloudDirectory", + "signatureVersion":"v4", + "signingName":"clouddirectory", + "uid":"clouddirectory-2017-01-11" + }, + "operations":{ + "AddFacetToObject":{ + "name":"AddFacetToObject", + "http":{ + "method":"PUT", + "requestUri":"/amazonclouddirectory/2017-01-11/object/facets", + "responseCode":200 + }, + "input":{"shape":"AddFacetToObjectRequest"}, + "output":{"shape":"AddFacetToObjectResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"FacetValidationException"} + ], + "documentation":"

Adds a new Facet to an object. An object can have more than one facet applied on it.

" + }, + "ApplySchema":{ + "name":"ApplySchema", + "http":{ + "method":"PUT", + "requestUri":"/amazonclouddirectory/2017-01-11/schema/apply", + "responseCode":200 + }, + "input":{"shape":"ApplySchemaRequest"}, + "output":{"shape":"ApplySchemaResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"SchemaAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidAttachmentException"} + ], + "documentation":"

Copies the input published schema, at the specified version, into the Directory with the same name and version as that of the published schema.

" + }, + "AttachObject":{ + "name":"AttachObject", + "http":{ + "method":"PUT", + "requestUri":"/amazonclouddirectory/2017-01-11/object/attach", + "responseCode":200 + }, + "input":{"shape":"AttachObjectRequest"}, + "output":{"shape":"AttachObjectResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"LinkNameAlreadyInUseException"}, + {"shape":"InvalidAttachmentException"}, + {"shape":"ValidationException"}, + {"shape":"FacetValidationException"} + ], + "documentation":"

Attaches an existing object to another object. An object can be accessed in two ways:

  1. Using the path

  2. Using ObjectIdentifier

" + }, + "AttachPolicy":{ + "name":"AttachPolicy", + "http":{ + "method":"PUT", + "requestUri":"/amazonclouddirectory/2017-01-11/policy/attach", + "responseCode":200 + }, + "input":{"shape":"AttachPolicyRequest"}, + "output":{"shape":"AttachPolicyResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"NotPolicyException"} + ], + "documentation":"

Attaches a policy object to a regular object. An object can have a limited number of attached policies.

" + }, + "AttachToIndex":{ + "name":"AttachToIndex", + "http":{ + "method":"PUT", + "requestUri":"/amazonclouddirectory/2017-01-11/index/attach", + "responseCode":200 + }, + "input":{"shape":"AttachToIndexRequest"}, + "output":{"shape":"AttachToIndexResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, + {"shape":"InvalidAttachmentException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"LinkNameAlreadyInUseException"}, + {"shape":"IndexedAttributeMissingException"}, + {"shape":"NotIndexException"} + ], + "documentation":"

Attaches the specified object to the specified index.

" + }, + "AttachTypedLink":{ + "name":"AttachTypedLink", + "http":{ + "method":"PUT", + "requestUri":"/amazonclouddirectory/2017-01-11/typedlink/attach", + "responseCode":200 + }, + "input":{"shape":"AttachTypedLinkRequest"}, + "output":{"shape":"AttachTypedLinkResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidAttachmentException"}, + {"shape":"ValidationException"}, + {"shape":"FacetValidationException"} + ], + "documentation":"

Attaches a typed link to a specified source and target object. For more information, see Typed link.

" + }, + "BatchRead":{ + "name":"BatchRead", + "http":{ + "method":"POST", + "requestUri":"/amazonclouddirectory/2017-01-11/batchread", + "responseCode":200 + }, + "input":{"shape":"BatchReadRequest"}, + "output":{"shape":"BatchReadResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"} + ], + "documentation":"

Performs all the read operations in a batch.

" + }, + "BatchWrite":{ + "name":"BatchWrite", + "http":{ + "method":"PUT", + "requestUri":"/amazonclouddirectory/2017-01-11/batchwrite", + "responseCode":200 + }, + "input":{"shape":"BatchWriteRequest"}, + "output":{"shape":"BatchWriteResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, + {"shape":"BatchWriteException"} + ], + "documentation":"

Performs all the write operations in a batch. Either all the operations succeed or none.

" + }, + "CreateDirectory":{ + "name":"CreateDirectory", + "http":{ + "method":"PUT", + "requestUri":"/amazonclouddirectory/2017-01-11/directory/create", + "responseCode":200 + }, + "input":{"shape":"CreateDirectoryRequest"}, + "output":{"shape":"CreateDirectoryResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DirectoryAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Creates a Directory by copying the published schema into the directory. A directory cannot be created without a schema.

" + }, + "CreateFacet":{ + "name":"CreateFacet", + "http":{ + "method":"PUT", + "requestUri":"/amazonclouddirectory/2017-01-11/facet/create", + "responseCode":200 + }, + "input":{"shape":"CreateFacetRequest"}, + "output":{"shape":"CreateFacetResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"FacetAlreadyExistsException"}, + {"shape":"InvalidRuleException"}, + {"shape":"FacetValidationException"} + ], + "documentation":"

Creates a new Facet in a schema. Facet creation is allowed only in development or applied schemas.

" + }, + "CreateIndex":{ + "name":"CreateIndex", + "http":{ + "method":"PUT", + "requestUri":"/amazonclouddirectory/2017-01-11/index", + "responseCode":200 + }, + "input":{"shape":"CreateIndexRequest"}, + "output":{"shape":"CreateIndexResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"FacetValidationException"}, + {"shape":"LinkNameAlreadyInUseException"}, + {"shape":"UnsupportedIndexTypeException"} + ], + "documentation":"

Creates an index object. See Indexing for more information.

" + }, + "CreateObject":{ + "name":"CreateObject", + "http":{ + "method":"PUT", + "requestUri":"/amazonclouddirectory/2017-01-11/object", + "responseCode":200 + }, + "input":{"shape":"CreateObjectRequest"}, + "output":{"shape":"CreateObjectResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"FacetValidationException"}, + {"shape":"LinkNameAlreadyInUseException"}, + {"shape":"UnsupportedIndexTypeException"} + ], + "documentation":"

Creates an object in a Directory. Additionally attaches the object to a parent, if a parent reference and LinkName is specified. An object is simply a collection of Facet attributes. You can also use this API call to create a policy object, if the facet from which you create the object is a policy facet.

" + }, + "CreateSchema":{ + "name":"CreateSchema", + "http":{ + "method":"PUT", + "requestUri":"/amazonclouddirectory/2017-01-11/schema/create", + "responseCode":200 + }, + "input":{"shape":"CreateSchemaRequest"}, + "output":{"shape":"CreateSchemaResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"SchemaAlreadyExistsException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Creates a new schema in a development state. A schema can exist in three phases:

" + }, + "CreateTypedLinkFacet":{ + "name":"CreateTypedLinkFacet", + "http":{ + "method":"PUT", + "requestUri":"/amazonclouddirectory/2017-01-11/typedlink/facet/create", + "responseCode":200 + }, + "input":{"shape":"CreateTypedLinkFacetRequest"}, + "output":{"shape":"CreateTypedLinkFacetResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"FacetAlreadyExistsException"}, + {"shape":"InvalidRuleException"}, + {"shape":"FacetValidationException"} + ], + "documentation":"

Creates a TypedLinkFacet. For more information, see Typed link.

" + }, + "DeleteDirectory":{ + "name":"DeleteDirectory", + "http":{ + "method":"PUT", + "requestUri":"/amazonclouddirectory/2017-01-11/directory", + "responseCode":200 + }, + "input":{"shape":"DeleteDirectoryRequest"}, + "output":{"shape":"DeleteDirectoryResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"DirectoryNotDisabledException"}, + {"shape":"InternalServiceException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DirectoryDeletedException"}, + {"shape":"RetryableConflictException"}, + {"shape":"InvalidArnException"} + ], + "documentation":"

Deletes a directory. Only disabled directories can be deleted. A deleted directory cannot be undone. Exercise extreme caution when deleting directories.

" + }, + "DeleteFacet":{ + "name":"DeleteFacet", + "http":{ + "method":"PUT", + "requestUri":"/amazonclouddirectory/2017-01-11/facet/delete", + "responseCode":200 + }, + "input":{"shape":"DeleteFacetRequest"}, + "output":{"shape":"DeleteFacetResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"FacetNotFoundException"}, + {"shape":"FacetInUseException"} + ], + "documentation":"

Deletes a given Facet. All attributes and Rules that are associated with the facet will be deleted. Only development schema facets are allowed deletion.

" + }, + "DeleteObject":{ + "name":"DeleteObject", + "http":{ + "method":"PUT", + "requestUri":"/amazonclouddirectory/2017-01-11/object/delete", + "responseCode":200 + }, + "input":{"shape":"DeleteObjectRequest"}, + "output":{"shape":"DeleteObjectResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ObjectNotDetachedException"} + ], + "documentation":"

Deletes an object and its associated attributes. Only objects with no children and no parents can be deleted.

" + }, + "DeleteSchema":{ + "name":"DeleteSchema", + "http":{ + "method":"PUT", + "requestUri":"/amazonclouddirectory/2017-01-11/schema", + "responseCode":200 + }, + "input":{"shape":"DeleteSchemaRequest"}, + "output":{"shape":"DeleteSchemaResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"StillContainsLinksException"} + ], + "documentation":"

Deletes a given schema. Schemas in a development and published state can only be deleted.

" + }, + "DeleteTypedLinkFacet":{ + "name":"DeleteTypedLinkFacet", + "http":{ + "method":"PUT", + "requestUri":"/amazonclouddirectory/2017-01-11/typedlink/facet/delete", + "responseCode":200 + }, + "input":{"shape":"DeleteTypedLinkFacetRequest"}, + "output":{"shape":"DeleteTypedLinkFacetResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"FacetNotFoundException"} + ], + "documentation":"

Deletes a TypedLinkFacet. For more information, see Typed link.

" + }, + "DetachFromIndex":{ + "name":"DetachFromIndex", + "http":{ + "method":"PUT", + "requestUri":"/amazonclouddirectory/2017-01-11/index/detach", + "responseCode":200 + }, + "input":{"shape":"DetachFromIndexRequest"}, + "output":{"shape":"DetachFromIndexResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ObjectAlreadyDetachedException"}, + {"shape":"NotIndexException"} + ], + "documentation":"

Detaches the specified object from the specified index.

" + }, + "DetachObject":{ + "name":"DetachObject", + "http":{ + "method":"PUT", + "requestUri":"/amazonclouddirectory/2017-01-11/object/detach", + "responseCode":200 + }, + "input":{"shape":"DetachObjectRequest"}, + "output":{"shape":"DetachObjectResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"NotNodeException"} + ], + "documentation":"

Detaches a given object from the parent object. The object that is to be detached from the parent is specified by the link name.

" + }, + "DetachPolicy":{ + "name":"DetachPolicy", + "http":{ + "method":"PUT", + "requestUri":"/amazonclouddirectory/2017-01-11/policy/detach", + "responseCode":200 + }, + "input":{"shape":"DetachPolicyRequest"}, + "output":{"shape":"DetachPolicyResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"NotPolicyException"} + ], + "documentation":"

Detaches a policy from an object.

" + }, + "DetachTypedLink":{ + "name":"DetachTypedLink", + "http":{ + "method":"PUT", + "requestUri":"/amazonclouddirectory/2017-01-11/typedlink/detach", + "responseCode":200 + }, + "input":{"shape":"DetachTypedLinkRequest"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"FacetValidationException"} + ], + "documentation":"

Detaches a typed link from a specified source and target object. For more information, see Typed link.

" + }, + "DisableDirectory":{ + "name":"DisableDirectory", + "http":{ + "method":"PUT", + "requestUri":"/amazonclouddirectory/2017-01-11/directory/disable", + "responseCode":200 + }, + "input":{"shape":"DisableDirectoryRequest"}, + "output":{"shape":"DisableDirectoryResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"DirectoryDeletedException"}, + {"shape":"InternalServiceException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RetryableConflictException"}, + {"shape":"InvalidArnException"} + ], + "documentation":"

Disables the specified directory. Disabled directories cannot be read or written to. Only enabled directories can be disabled. Disabled directories may be reenabled.

" + }, + "EnableDirectory":{ + "name":"EnableDirectory", + "http":{ + "method":"PUT", + "requestUri":"/amazonclouddirectory/2017-01-11/directory/enable", + "responseCode":200 + }, + "input":{"shape":"EnableDirectoryRequest"}, + "output":{"shape":"EnableDirectoryResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"DirectoryDeletedException"}, + {"shape":"InternalServiceException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RetryableConflictException"}, + {"shape":"InvalidArnException"} + ], + "documentation":"

Enables the specified directory. Only disabled directories can be enabled. Once enabled, the directory can then be read and written to.

" + }, + "GetAppliedSchemaVersion":{ + "name":"GetAppliedSchemaVersion", + "http":{ + "method":"POST", + "requestUri":"/amazonclouddirectory/2017-01-11/schema/getappliedschema", + "responseCode":200 + }, + "input":{"shape":"GetAppliedSchemaVersionRequest"}, + "output":{"shape":"GetAppliedSchemaVersionResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Returns current applied schema version ARN, including the minor version in use.

" + }, + "GetDirectory":{ + "name":"GetDirectory", + "http":{ + "method":"POST", + "requestUri":"/amazonclouddirectory/2017-01-11/directory/get", + "responseCode":200 + }, + "input":{"shape":"GetDirectoryRequest"}, + "output":{"shape":"GetDirectoryResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Retrieves metadata about a directory.

" + }, + "GetFacet":{ + "name":"GetFacet", + "http":{ + "method":"POST", + "requestUri":"/amazonclouddirectory/2017-01-11/facet", + "responseCode":200 + }, + "input":{"shape":"GetFacetRequest"}, + "output":{"shape":"GetFacetResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"FacetNotFoundException"} + ], + "documentation":"

Gets details of the Facet, such as facet name, attributes, Rules, or ObjectType. You can call this on all kinds of schema facets -- published, development, or applied.

" + }, + "GetLinkAttributes":{ + "name":"GetLinkAttributes", + "http":{ + "method":"POST", + "requestUri":"/amazonclouddirectory/2017-01-11/typedlink/attributes/get", + "responseCode":200 + }, + "input":{"shape":"GetLinkAttributesRequest"}, + "output":{"shape":"GetLinkAttributesResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"FacetValidationException"} + ], + "documentation":"

Retrieves attributes that are associated with a typed link.

" + }, + "GetObjectAttributes":{ + "name":"GetObjectAttributes", + "http":{ + "method":"POST", + "requestUri":"/amazonclouddirectory/2017-01-11/object/attributes/get", + "responseCode":200 + }, + "input":{"shape":"GetObjectAttributesRequest"}, + "output":{"shape":"GetObjectAttributesResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"FacetValidationException"} + ], + "documentation":"

Retrieves attributes within a facet that are associated with an object.

" + }, + "GetObjectInformation":{ + "name":"GetObjectInformation", + "http":{ + "method":"POST", + "requestUri":"/amazonclouddirectory/2017-01-11/object/information", + "responseCode":200 + }, + "input":{"shape":"GetObjectInformationRequest"}, + "output":{"shape":"GetObjectInformationResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Retrieves metadata about an object.

" + }, + "GetSchemaAsJson":{ + "name":"GetSchemaAsJson", + "http":{ + "method":"POST", + "requestUri":"/amazonclouddirectory/2017-01-11/schema/json", + "responseCode":200 + }, + "input":{"shape":"GetSchemaAsJsonRequest"}, + "output":{"shape":"GetSchemaAsJsonResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Retrieves a JSON representation of the schema. See JSON Schema Format for more information.

" + }, + "GetTypedLinkFacetInformation":{ + "name":"GetTypedLinkFacetInformation", + "http":{ + "method":"POST", + "requestUri":"/amazonclouddirectory/2017-01-11/typedlink/facet/get", + "responseCode":200 + }, + "input":{"shape":"GetTypedLinkFacetInformationRequest"}, + "output":{"shape":"GetTypedLinkFacetInformationResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"FacetNotFoundException"} + ], + "documentation":"

Returns the identity attribute order for a specific TypedLinkFacet. For more information, see Typed link.

" + }, + "ListAppliedSchemaArns":{ + "name":"ListAppliedSchemaArns", + "http":{ + "method":"POST", + "requestUri":"/amazonclouddirectory/2017-01-11/schema/applied", + "responseCode":200 + }, + "input":{"shape":"ListAppliedSchemaArnsRequest"}, + "output":{"shape":"ListAppliedSchemaArnsResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidNextTokenException"} + ], + "documentation":"

Lists schema major versions applied to a directory. If SchemaArn is provided, lists the minor version.

" + }, + "ListAttachedIndices":{ + "name":"ListAttachedIndices", + "http":{ + "method":"POST", + "requestUri":"/amazonclouddirectory/2017-01-11/object/indices", + "responseCode":200 + }, + "input":{"shape":"ListAttachedIndicesRequest"}, + "output":{"shape":"ListAttachedIndicesResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Lists indices attached to the specified object.

" + }, + "ListDevelopmentSchemaArns":{ + "name":"ListDevelopmentSchemaArns", + "http":{ + "method":"POST", + "requestUri":"/amazonclouddirectory/2017-01-11/schema/development", + "responseCode":200 + }, + "input":{"shape":"ListDevelopmentSchemaArnsRequest"}, + "output":{"shape":"ListDevelopmentSchemaArnsResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidNextTokenException"} + ], + "documentation":"

Retrieves each Amazon Resource Name (ARN) of schemas in the development state.

" + }, + "ListDirectories":{ + "name":"ListDirectories", + "http":{ + "method":"POST", + "requestUri":"/amazonclouddirectory/2017-01-11/directory/list", + "responseCode":200 + }, + "input":{"shape":"ListDirectoriesRequest"}, + "output":{"shape":"ListDirectoriesResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidNextTokenException"} + ], + "documentation":"

Lists directories created within an account.

" + }, + "ListFacetAttributes":{ + "name":"ListFacetAttributes", + "http":{ + "method":"POST", + "requestUri":"/amazonclouddirectory/2017-01-11/facet/attributes", + "responseCode":200 + }, + "input":{"shape":"ListFacetAttributesRequest"}, + "output":{"shape":"ListFacetAttributesResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"FacetNotFoundException"}, + {"shape":"InvalidNextTokenException"} + ], + "documentation":"

Retrieves attributes attached to the facet.

" + }, + "ListFacetNames":{ + "name":"ListFacetNames", + "http":{ + "method":"POST", + "requestUri":"/amazonclouddirectory/2017-01-11/facet/list", + "responseCode":200 + }, + "input":{"shape":"ListFacetNamesRequest"}, + "output":{"shape":"ListFacetNamesResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidNextTokenException"} + ], + "documentation":"

Retrieves the names of facets that exist in a schema.

" + }, + "ListIncomingTypedLinks":{ + "name":"ListIncomingTypedLinks", + "http":{ + "method":"POST", + "requestUri":"/amazonclouddirectory/2017-01-11/typedlink/incoming", + "responseCode":200 + }, + "input":{"shape":"ListIncomingTypedLinksRequest"}, + "output":{"shape":"ListIncomingTypedLinksResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"FacetValidationException"} + ], + "documentation":"

Returns a paginated list of all the incoming TypedLinkSpecifier information for an object. It also supports filtering by typed link facet and identity attributes. For more information, see Typed link.

" + }, + "ListIndex":{ + "name":"ListIndex", + "http":{ + "method":"POST", + "requestUri":"/amazonclouddirectory/2017-01-11/index/targets", + "responseCode":200 + }, + "input":{"shape":"ListIndexRequest"}, + "output":{"shape":"ListIndexResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"FacetValidationException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"NotIndexException"} + ], + "documentation":"

Lists objects attached to the specified index.

" + }, + "ListManagedSchemaArns":{ + "name":"ListManagedSchemaArns", + "http":{ + "method":"POST", + "requestUri":"/amazonclouddirectory/2017-01-11/schema/managed", + "responseCode":200 + }, + "input":{"shape":"ListManagedSchemaArnsRequest"}, + "output":{"shape":"ListManagedSchemaArnsResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidNextTokenException"} + ], + "documentation":"

Lists the major version families of each managed schema. If a major version ARN is provided as SchemaArn, the minor version revisions in that family are listed instead.

" + }, + "ListObjectAttributes":{ + "name":"ListObjectAttributes", + "http":{ + "method":"POST", + "requestUri":"/amazonclouddirectory/2017-01-11/object/attributes", + "responseCode":200 + }, + "input":{"shape":"ListObjectAttributesRequest"}, + "output":{"shape":"ListObjectAttributesResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"FacetValidationException"} + ], + "documentation":"

Lists all attributes that are associated with an object.

" + }, + "ListObjectChildren":{ + "name":"ListObjectChildren", + "http":{ + "method":"POST", + "requestUri":"/amazonclouddirectory/2017-01-11/object/children", + "responseCode":200 + }, + "input":{"shape":"ListObjectChildrenRequest"}, + "output":{"shape":"ListObjectChildrenResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"NotNodeException"} + ], + "documentation":"

Returns a paginated list of child objects that are associated with a given object.

" + }, + "ListObjectParentPaths":{ + "name":"ListObjectParentPaths", + "http":{ + "method":"POST", + "requestUri":"/amazonclouddirectory/2017-01-11/object/parentpaths", + "responseCode":200 + }, + "input":{"shape":"ListObjectParentPathsRequest"}, + "output":{"shape":"ListObjectParentPathsResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Retrieves all available parent paths for any object type such as node, leaf node, policy node, and index node objects. For more information about objects, see Directory Structure.

Use this API to evaluate all parents for an object. The call returns all objects from the root of the directory up to the requested object. The API returns the number of paths based on user-defined MaxResults, in case there are multiple paths to the parent. The order of the paths and nodes returned is consistent among multiple API calls unless the objects are deleted or moved. Paths not leading to the directory root are ignored from the target object.

" + }, + "ListObjectParents":{ + "name":"ListObjectParents", + "http":{ + "method":"POST", + "requestUri":"/amazonclouddirectory/2017-01-11/object/parent", + "responseCode":200 + }, + "input":{"shape":"ListObjectParentsRequest"}, + "output":{"shape":"ListObjectParentsResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"CannotListParentOfRootException"} + ], + "documentation":"

Lists parent objects that are associated with a given object in pagination fashion.

" + }, + "ListObjectPolicies":{ + "name":"ListObjectPolicies", + "http":{ + "method":"POST", + "requestUri":"/amazonclouddirectory/2017-01-11/object/policy", + "responseCode":200 + }, + "input":{"shape":"ListObjectPoliciesRequest"}, + "output":{"shape":"ListObjectPoliciesResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidNextTokenException"} + ], + "documentation":"

Returns policies attached to an object in pagination fashion.

" + }, + "ListOutgoingTypedLinks":{ + "name":"ListOutgoingTypedLinks", + "http":{ + "method":"POST", + "requestUri":"/amazonclouddirectory/2017-01-11/typedlink/outgoing", + "responseCode":200 + }, + "input":{"shape":"ListOutgoingTypedLinksRequest"}, + "output":{"shape":"ListOutgoingTypedLinksResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"FacetValidationException"} + ], + "documentation":"

Returns a paginated list of all the outgoing TypedLinkSpecifier information for an object. It also supports filtering by typed link facet and identity attributes. For more information, see Typed link.

" + }, + "ListPolicyAttachments":{ + "name":"ListPolicyAttachments", + "http":{ + "method":"POST", + "requestUri":"/amazonclouddirectory/2017-01-11/policy/attachment", + "responseCode":200 + }, + "input":{"shape":"ListPolicyAttachmentsRequest"}, + "output":{"shape":"ListPolicyAttachmentsResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"NotPolicyException"} + ], + "documentation":"

Returns all of the ObjectIdentifiers to which a given policy is attached.

" + }, + "ListPublishedSchemaArns":{ + "name":"ListPublishedSchemaArns", + "http":{ + "method":"POST", + "requestUri":"/amazonclouddirectory/2017-01-11/schema/published", + "responseCode":200 + }, + "input":{"shape":"ListPublishedSchemaArnsRequest"}, + "output":{"shape":"ListPublishedSchemaArnsResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidNextTokenException"} + ], + "documentation":"

Lists the major version families of each published schema. If a major version ARN is provided as SchemaArn, the minor version revisions in that family are listed instead.

" + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/amazonclouddirectory/2017-01-11/tags", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidTaggingRequestException"} + ], + "documentation":"

Returns tags for a resource. Tagging is currently supported only for directories with a limit of 50 tags per directory. All 50 tags are returned for a given directory with this API call.

" + }, + "ListTypedLinkFacetAttributes":{ + "name":"ListTypedLinkFacetAttributes", + "http":{ + "method":"POST", + "requestUri":"/amazonclouddirectory/2017-01-11/typedlink/facet/attributes", + "responseCode":200 + }, + "input":{"shape":"ListTypedLinkFacetAttributesRequest"}, + "output":{"shape":"ListTypedLinkFacetAttributesResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"FacetNotFoundException"}, + {"shape":"InvalidNextTokenException"} + ], + "documentation":"

Returns a paginated list of all attribute definitions for a particular TypedLinkFacet. For more information, see Typed link.

" + }, + "ListTypedLinkFacetNames":{ + "name":"ListTypedLinkFacetNames", + "http":{ + "method":"POST", + "requestUri":"/amazonclouddirectory/2017-01-11/typedlink/facet/list", + "responseCode":200 + }, + "input":{"shape":"ListTypedLinkFacetNamesRequest"}, + "output":{"shape":"ListTypedLinkFacetNamesResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidNextTokenException"} + ], + "documentation":"

Returns a paginated list of TypedLink facet names for a particular schema. For more information, see Typed link.

" + }, + "LookupPolicy":{ + "name":"LookupPolicy", + "http":{ + "method":"POST", + "requestUri":"/amazonclouddirectory/2017-01-11/policy/lookup", + "responseCode":200 + }, + "input":{"shape":"LookupPolicyRequest"}, + "output":{"shape":"LookupPolicyResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Lists all policies from the root of the Directory to the object specified. If there are no policies present, an empty list is returned. If policies are present, and if some objects don't have the policies attached, it returns the ObjectIdentifier for such objects. If policies are present, it returns ObjectIdentifier, policyId, and policyType. Paths that don't lead to the root from the target object are ignored. For more information, see Policies.

" + }, + "PublishSchema":{ + "name":"PublishSchema", + "http":{ + "method":"PUT", + "requestUri":"/amazonclouddirectory/2017-01-11/schema/publish", + "responseCode":200 + }, + "input":{"shape":"PublishSchemaRequest"}, + "output":{"shape":"PublishSchemaResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"SchemaAlreadyPublishedException"} + ], + "documentation":"

Publishes a development schema with a major version and a recommended minor version.

" + }, + "PutSchemaFromJson":{ + "name":"PutSchemaFromJson", + "http":{ + "method":"PUT", + "requestUri":"/amazonclouddirectory/2017-01-11/schema/json", + "responseCode":200 + }, + "input":{"shape":"PutSchemaFromJsonRequest"}, + "output":{"shape":"PutSchemaFromJsonResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidSchemaDocException"}, + {"shape":"InvalidRuleException"} + ], + "documentation":"

Allows a schema to be updated using JSON upload. Only available for development schemas. See JSON Schema Format for more information.

" + }, + "RemoveFacetFromObject":{ + "name":"RemoveFacetFromObject", + "http":{ + "method":"PUT", + "requestUri":"/amazonclouddirectory/2017-01-11/object/facets/delete", + "responseCode":200 + }, + "input":{"shape":"RemoveFacetFromObjectRequest"}, + "output":{"shape":"RemoveFacetFromObjectResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"FacetValidationException"} + ], + "documentation":"

Removes the specified facet from the specified object.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"PUT", + "requestUri":"/amazonclouddirectory/2017-01-11/tags/add", + "responseCode":200 + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidTaggingRequestException"} + ], + "documentation":"

An API operation for adding tags to a resource.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"PUT", + "requestUri":"/amazonclouddirectory/2017-01-11/tags/remove", + "responseCode":200 + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidTaggingRequestException"} + ], + "documentation":"

An API operation for removing tags from a resource.

" + }, + "UpdateFacet":{ + "name":"UpdateFacet", + "http":{ + "method":"PUT", + "requestUri":"/amazonclouddirectory/2017-01-11/facet", + "responseCode":200 + }, + "input":{"shape":"UpdateFacetRequest"}, + "output":{"shape":"UpdateFacetResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidFacetUpdateException"}, + {"shape":"FacetValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"FacetNotFoundException"}, + {"shape":"InvalidRuleException"} + ], + "documentation":"

Does the following:

  1. Adds new Attributes, Rules, or ObjectTypes.

  2. Updates existing Attributes, Rules, or ObjectTypes.

  3. Deletes existing Attributes, Rules, or ObjectTypes.

" + }, + "UpdateLinkAttributes":{ + "name":"UpdateLinkAttributes", + "http":{ + "method":"POST", + "requestUri":"/amazonclouddirectory/2017-01-11/typedlink/attributes/update", + "responseCode":200 + }, + "input":{"shape":"UpdateLinkAttributesRequest"}, + "output":{"shape":"UpdateLinkAttributesResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"FacetValidationException"} + ], + "documentation":"

Updates a given typed link’s attributes. Attributes to be updated must not contribute to the typed link’s identity, as defined by its IdentityAttributeOrder.

" + }, + "UpdateObjectAttributes":{ + "name":"UpdateObjectAttributes", + "http":{ + "method":"PUT", + "requestUri":"/amazonclouddirectory/2017-01-11/object/update", + "responseCode":200 + }, + "input":{"shape":"UpdateObjectAttributesRequest"}, + "output":{"shape":"UpdateObjectAttributesResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"LinkNameAlreadyInUseException"}, + {"shape":"FacetValidationException"} + ], + "documentation":"

Updates a given object's attributes.

" + }, + "UpdateSchema":{ + "name":"UpdateSchema", + "http":{ + "method":"PUT", + "requestUri":"/amazonclouddirectory/2017-01-11/schema/update", + "responseCode":200 + }, + "input":{"shape":"UpdateSchemaRequest"}, + "output":{"shape":"UpdateSchemaResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Updates the schema name with a new name. Only development schema names can be updated.

" + }, + "UpdateTypedLinkFacet":{ + "name":"UpdateTypedLinkFacet", + "http":{ + "method":"PUT", + "requestUri":"/amazonclouddirectory/2017-01-11/typedlink/facet", + "responseCode":200 + }, + "input":{"shape":"UpdateTypedLinkFacetRequest"}, + "output":{"shape":"UpdateTypedLinkFacetResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"FacetValidationException"}, + {"shape":"InvalidFacetUpdateException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"FacetNotFoundException"}, + {"shape":"InvalidRuleException"} + ], + "documentation":"

Updates a TypedLinkFacet. For more information, see Typed link.

" + }, + "UpgradeAppliedSchema":{ + "name":"UpgradeAppliedSchema", + "http":{ + "method":"PUT", + "requestUri":"/amazonclouddirectory/2017-01-11/schema/upgradeapplied", + "responseCode":200 + }, + "input":{"shape":"UpgradeAppliedSchemaRequest"}, + "output":{"shape":"UpgradeAppliedSchemaResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"IncompatibleSchemaException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidAttachmentException"}, + {"shape":"SchemaAlreadyExistsException"} + ], + "documentation":"

Upgrades a single directory in-place using the PublishedSchemaArn with schema updates found in MinorVersion. Backwards-compatible minor version upgrades are instantaneously available for readers on all objects in the directory. Note: This is a synchronous API call and upgrades only one schema on a given directory per call. To upgrade multiple directories from one schema, you would need to call this API on each directory.

" + }, + "UpgradePublishedSchema":{ + "name":"UpgradePublishedSchema", + "http":{ + "method":"PUT", + "requestUri":"/amazonclouddirectory/2017-01-11/schema/upgradepublished", + "responseCode":200 + }, + "input":{"shape":"UpgradePublishedSchemaRequest"}, + "output":{"shape":"UpgradePublishedSchemaResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidArnException"}, + {"shape":"RetryableConflictException"}, + {"shape":"ValidationException"}, + {"shape":"IncompatibleSchemaException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidAttachmentException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Upgrades a published schema under a new minor version revision using the current contents of DevelopmentSchemaArn.

" + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

Access denied. Check your permissions.

", + "error":{"httpStatusCode":403}, + "exception":true + }, + "AddFacetToObjectRequest":{ + "type":"structure", + "required":[ + "DirectoryArn", + "SchemaFacet", + "ObjectReference" + ], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that is associated with the Directory where the object resides. For more information, see arns.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "SchemaFacet":{ + "shape":"SchemaFacet", + "documentation":"

Identifiers for the facet that you are adding to the object. See SchemaFacet for details.

" + }, + "ObjectAttributeList":{ + "shape":"AttributeKeyAndValueList", + "documentation":"

Attributes on the facet that you are adding to the object.

" + }, + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

A reference to the object you are adding the specified facet to.

" + } + } + }, + "AddFacetToObjectResponse":{ + "type":"structure", + "members":{ + } + }, + "ApplySchemaRequest":{ + "type":"structure", + "required":[ + "PublishedSchemaArn", + "DirectoryArn" + ], + "members":{ + "PublishedSchemaArn":{ + "shape":"Arn", + "documentation":"

Published schema Amazon Resource Name (ARN) that needs to be copied. For more information, see arns.

" + }, + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that is associated with the Directory into which the schema is copied. For more information, see arns.

", + "location":"header", + "locationName":"x-amz-data-partition" + } + } + }, + "ApplySchemaResponse":{ + "type":"structure", + "members":{ + "AppliedSchemaArn":{ + "shape":"Arn", + "documentation":"

The applied schema ARN that is associated with the copied schema in the Directory. You can use this ARN to describe the schema information applied on this directory. For more information, see arns.

" + }, + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The ARN that is associated with the Directory. For more information, see arns.

" + } + } + }, + "Arn":{"type":"string"}, + "Arns":{ + "type":"list", + "member":{"shape":"Arn"} + }, + "AttachObjectRequest":{ + "type":"structure", + "required":[ + "DirectoryArn", + "ParentReference", + "ChildReference", + "LinkName" + ], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

Amazon Resource Name (ARN) that is associated with the Directory where both objects reside. For more information, see arns.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "ParentReference":{ + "shape":"ObjectReference", + "documentation":"

The parent object reference.

" + }, + "ChildReference":{ + "shape":"ObjectReference", + "documentation":"

The child object reference to be attached to the object.

" + }, + "LinkName":{ + "shape":"LinkName", + "documentation":"

The link name with which the child object is attached to the parent.

" + } + } + }, + "AttachObjectResponse":{ + "type":"structure", + "members":{ + "AttachedObjectIdentifier":{ + "shape":"ObjectIdentifier", + "documentation":"

The attached ObjectIdentifier, which is the child ObjectIdentifier.

" + } + } + }, + "AttachPolicyRequest":{ + "type":"structure", + "required":[ + "DirectoryArn", + "PolicyReference", + "ObjectReference" + ], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that is associated with the Directory where both objects reside. For more information, see arns.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "PolicyReference":{ + "shape":"ObjectReference", + "documentation":"

The reference that is associated with the policy object.

" + }, + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

The reference that identifies the object to which the policy will be attached.

" + } + } + }, + "AttachPolicyResponse":{ + "type":"structure", + "members":{ + } + }, + "AttachToIndexRequest":{ + "type":"structure", + "required":[ + "DirectoryArn", + "IndexReference", + "TargetReference" + ], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the directory where the object and index exist.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "IndexReference":{ + "shape":"ObjectReference", + "documentation":"

A reference to the index that you are attaching the object to.

" + }, + "TargetReference":{ + "shape":"ObjectReference", + "documentation":"

A reference to the object that you are attaching to the index.

" + } + } + }, + "AttachToIndexResponse":{ + "type":"structure", + "members":{ + "AttachedObjectIdentifier":{ + "shape":"ObjectIdentifier", + "documentation":"

The ObjectIdentifier of the object that was attached to the index.

" + } + } + }, + "AttachTypedLinkRequest":{ + "type":"structure", + "required":[ + "DirectoryArn", + "SourceObjectReference", + "TargetObjectReference", + "TypedLinkFacet", + "Attributes" + ], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the directory where you want to attach the typed link.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "SourceObjectReference":{ + "shape":"ObjectReference", + "documentation":"

Identifies the source object that the typed link will attach to.

" + }, + "TargetObjectReference":{ + "shape":"ObjectReference", + "documentation":"

Identifies the target object that the typed link will attach to.

" + }, + "TypedLinkFacet":{ + "shape":"TypedLinkSchemaAndFacetName", + "documentation":"

Identifies the typed link facet that is associated with the typed link.

" + }, + "Attributes":{ + "shape":"AttributeNameAndValueList", + "documentation":"

A set of attributes that are associated with the typed link.

" + } + } + }, + "AttachTypedLinkResponse":{ + "type":"structure", + "members":{ + "TypedLinkSpecifier":{ + "shape":"TypedLinkSpecifier", + "documentation":"

Returns a typed link specifier as output.

" + } + } + }, + "AttributeKey":{ + "type":"structure", + "required":[ + "SchemaArn", + "FacetName", + "Name" + ], + "members":{ + "SchemaArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the schema that contains the facet and attribute.

" + }, + "FacetName":{ + "shape":"FacetName", + "documentation":"

The name of the facet that the attribute exists within.

" + }, + "Name":{ + "shape":"AttributeName", + "documentation":"

The name of the attribute.

" + } + }, + "documentation":"

A unique identifier for an attribute.

" + }, + "AttributeKeyAndValue":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"AttributeKey", + "documentation":"

The key of the attribute.

" + }, + "Value":{ + "shape":"TypedAttributeValue", + "documentation":"

The value of the attribute.

" + } + }, + "documentation":"

The combination of an attribute key and an attribute value.

" + }, + "AttributeKeyAndValueList":{ + "type":"list", + "member":{"shape":"AttributeKeyAndValue"} + }, + "AttributeKeyList":{ + "type":"list", + "member":{"shape":"AttributeKey"} + }, + "AttributeName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-zA-Z0-9._-]*$" + }, + "AttributeNameAndValue":{ + "type":"structure", + "required":[ + "AttributeName", + "Value" + ], + "members":{ + "AttributeName":{ + "shape":"AttributeName", + "documentation":"

The attribute name of the typed link.

" + }, + "Value":{ + "shape":"TypedAttributeValue", + "documentation":"

The value for the typed link.

" + } + }, + "documentation":"

Identifies the attribute name and value for a typed link.

" + }, + "AttributeNameAndValueList":{ + "type":"list", + "member":{"shape":"AttributeNameAndValue"} + }, + "AttributeNameList":{ + "type":"list", + "member":{"shape":"AttributeName"} + }, + "BatchAddFacetToObject":{ + "type":"structure", + "required":[ + "SchemaFacet", + "ObjectAttributeList", + "ObjectReference" + ], + "members":{ + "SchemaFacet":{ + "shape":"SchemaFacet", + "documentation":"

Represents the facet being added to the object.

" + }, + "ObjectAttributeList":{ + "shape":"AttributeKeyAndValueList", + "documentation":"

The attributes to set on the object.

" + }, + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

A reference to the object being mutated.

" + } + }, + "documentation":"

Represents the output of a batch add facet to object operation.

" + }, + "BatchAddFacetToObjectResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

The result of a batch add facet to object operation.

" + }, + "BatchAttachObject":{ + "type":"structure", + "required":[ + "ParentReference", + "ChildReference", + "LinkName" + ], + "members":{ + "ParentReference":{ + "shape":"ObjectReference", + "documentation":"

The parent object reference.

" + }, + "ChildReference":{ + "shape":"ObjectReference", + "documentation":"

The child object reference that is to be attached to the object.

" + }, + "LinkName":{ + "shape":"LinkName", + "documentation":"

The name of the link.

" + } + }, + "documentation":"

Represents the output of an AttachObject operation.

" + }, + "BatchAttachObjectResponse":{ + "type":"structure", + "members":{ + "attachedObjectIdentifier":{ + "shape":"ObjectIdentifier", + "documentation":"

The ObjectIdentifier of the object that has been attached.

" + } + }, + "documentation":"

Represents the output batch AttachObject response operation.

" + }, + "BatchAttachPolicy":{ + "type":"structure", + "required":[ + "PolicyReference", + "ObjectReference" + ], + "members":{ + "PolicyReference":{ + "shape":"ObjectReference", + "documentation":"

The reference that is associated with the policy object.

" + }, + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

The reference that identifies the object to which the policy will be attached.

" + } + }, + "documentation":"

Attaches a policy object to a regular object inside a BatchRead operation. For more information, see AttachPolicy and BatchReadRequest$Operations.

" + }, + "BatchAttachPolicyResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

Represents the output of an AttachPolicy response operation.

" + }, + "BatchAttachToIndex":{ + "type":"structure", + "required":[ + "IndexReference", + "TargetReference" + ], + "members":{ + "IndexReference":{ + "shape":"ObjectReference", + "documentation":"

A reference to the index that you are attaching the object to.

" + }, + "TargetReference":{ + "shape":"ObjectReference", + "documentation":"

A reference to the object that you are attaching to the index.

" + } + }, + "documentation":"

Attaches the specified object to the specified index inside a BatchRead operation. For more information, see AttachToIndex and BatchReadRequest$Operations.

" + }, + "BatchAttachToIndexResponse":{ + "type":"structure", + "members":{ + "AttachedObjectIdentifier":{ + "shape":"ObjectIdentifier", + "documentation":"

The ObjectIdentifier of the object that was attached to the index.

" + } + }, + "documentation":"

Represents the output of a AttachToIndex response operation.

" + }, + "BatchAttachTypedLink":{ + "type":"structure", + "required":[ + "SourceObjectReference", + "TargetObjectReference", + "TypedLinkFacet", + "Attributes" + ], + "members":{ + "SourceObjectReference":{ + "shape":"ObjectReference", + "documentation":"

Identifies the source object that the typed link will attach to.

" + }, + "TargetObjectReference":{ + "shape":"ObjectReference", + "documentation":"

Identifies the target object that the typed link will attach to.

" + }, + "TypedLinkFacet":{ + "shape":"TypedLinkSchemaAndFacetName", + "documentation":"

Identifies the typed link facet that is associated with the typed link.

" + }, + "Attributes":{ + "shape":"AttributeNameAndValueList", + "documentation":"

A set of attributes that are associated with the typed link.

" + } + }, + "documentation":"

Attaches a typed link to a specified source and target object inside a BatchRead operation. For more information, see AttachTypedLink and BatchReadRequest$Operations.

" + }, + "BatchAttachTypedLinkResponse":{ + "type":"structure", + "members":{ + "TypedLinkSpecifier":{ + "shape":"TypedLinkSpecifier", + "documentation":"

Returns a typed link specifier as output.

" + } + }, + "documentation":"

Represents the output of a AttachTypedLink response operation.

" + }, + "BatchCreateIndex":{ + "type":"structure", + "required":[ + "OrderedIndexedAttributeList", + "IsUnique" + ], + "members":{ + "OrderedIndexedAttributeList":{ + "shape":"AttributeKeyList", + "documentation":"

Specifies the attributes that should be indexed on. Currently only a single attribute is supported.

" + }, + "IsUnique":{ + "shape":"Bool", + "documentation":"

Indicates whether the attribute that is being indexed has unique values or not.

" + }, + "ParentReference":{ + "shape":"ObjectReference", + "documentation":"

A reference to the parent object that contains the index object.

" + }, + "LinkName":{ + "shape":"LinkName", + "documentation":"

The name of the link between the parent object and the index object.

" + }, + "BatchReferenceName":{ + "shape":"BatchReferenceName", + "documentation":"

The batch reference name. See Batches for more information.

" + } + }, + "documentation":"

Creates an index object inside of a BatchRead operation. For more information, see CreateIndex and BatchReadRequest$Operations.

" + }, + "BatchCreateIndexResponse":{ + "type":"structure", + "members":{ + "ObjectIdentifier":{ + "shape":"ObjectIdentifier", + "documentation":"

The ObjectIdentifier of the index created by this operation.

" + } + }, + "documentation":"

Represents the output of a CreateIndex response operation.

" + }, + "BatchCreateObject":{ + "type":"structure", + "required":[ + "SchemaFacet", + "ObjectAttributeList" + ], + "members":{ + "SchemaFacet":{ + "shape":"SchemaFacetList", + "documentation":"

A list of FacetArns that will be associated with the object. For more information, see arns.

" + }, + "ObjectAttributeList":{ + "shape":"AttributeKeyAndValueList", + "documentation":"

An attribute map, which contains an attribute ARN as the key and attribute value as the map value.

" + }, + "ParentReference":{ + "shape":"ObjectReference", + "documentation":"

If specified, the parent reference to which this object will be attached.

" + }, + "LinkName":{ + "shape":"LinkName", + "documentation":"

The name of the link.

" + }, + "BatchReferenceName":{ + "shape":"BatchReferenceName", + "documentation":"

The batch reference name. See Batches for more information.

" + } + }, + "documentation":"

Represents the output of a CreateObject operation.

" + }, + "BatchCreateObjectResponse":{ + "type":"structure", + "members":{ + "ObjectIdentifier":{ + "shape":"ObjectIdentifier", + "documentation":"

The ID that is associated with the object.

" + } + }, + "documentation":"

Represents the output of a CreateObject response operation.

" + }, + "BatchDeleteObject":{ + "type":"structure", + "required":["ObjectReference"], + "members":{ + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

The reference that identifies the object.

" + } + }, + "documentation":"

Represents the output of a DeleteObject operation.

" + }, + "BatchDeleteObjectResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

Represents the output of a DeleteObject response operation.

" + }, + "BatchDetachFromIndex":{ + "type":"structure", + "required":[ + "IndexReference", + "TargetReference" + ], + "members":{ + "IndexReference":{ + "shape":"ObjectReference", + "documentation":"

A reference to the index object.

" + }, + "TargetReference":{ + "shape":"ObjectReference", + "documentation":"

A reference to the object being detached from the index.

" + } + }, + "documentation":"

Detaches the specified object from the specified index inside a BatchRead operation. For more information, see DetachFromIndex and BatchReadRequest$Operations.

" + }, + "BatchDetachFromIndexResponse":{ + "type":"structure", + "members":{ + "DetachedObjectIdentifier":{ + "shape":"ObjectIdentifier", + "documentation":"

The ObjectIdentifier of the object that was detached from the index.

" + } + }, + "documentation":"

Represents the output of a DetachFromIndex response operation.

" + }, + "BatchDetachObject":{ + "type":"structure", + "required":[ + "ParentReference", + "LinkName" + ], + "members":{ + "ParentReference":{ + "shape":"ObjectReference", + "documentation":"

Parent reference from which the object with the specified link name is detached.

" + }, + "LinkName":{ + "shape":"LinkName", + "documentation":"

The name of the link.

" + }, + "BatchReferenceName":{ + "shape":"BatchReferenceName", + "documentation":"

The batch reference name. See Batches for more information.

" + } + }, + "documentation":"

Represents the output of a DetachObject operation.

" + }, + "BatchDetachObjectResponse":{ + "type":"structure", + "members":{ + "detachedObjectIdentifier":{ + "shape":"ObjectIdentifier", + "documentation":"

The ObjectIdentifier of the detached object.

" + } + }, + "documentation":"

Represents the output of a DetachObject response operation.

" + }, + "BatchDetachPolicy":{ + "type":"structure", + "required":[ + "PolicyReference", + "ObjectReference" + ], + "members":{ + "PolicyReference":{ + "shape":"ObjectReference", + "documentation":"

Reference that identifies the policy object.

" + }, + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

Reference that identifies the object whose policy object will be detached.

" + } + }, + "documentation":"

Detaches the specified policy from the specified directory inside a BatchWrite operation. For more information, see DetachPolicy and BatchWriteRequest$Operations.

" + }, + "BatchDetachPolicyResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

Represents the output of a DetachPolicy response operation.

" + }, + "BatchDetachTypedLink":{ + "type":"structure", + "required":["TypedLinkSpecifier"], + "members":{ + "TypedLinkSpecifier":{ + "shape":"TypedLinkSpecifier", + "documentation":"

Used to accept a typed link specifier as input.

" + } + }, + "documentation":"

Detaches a typed link from a specified source and target object inside a BatchRead operation. For more information, see DetachTypedLink and BatchReadRequest$Operations.

" + }, + "BatchDetachTypedLinkResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

Represents the output of a DetachTypedLink response operation.

" + }, + "BatchGetLinkAttributes":{ + "type":"structure", + "required":[ + "TypedLinkSpecifier", + "AttributeNames" + ], + "members":{ + "TypedLinkSpecifier":{ + "shape":"TypedLinkSpecifier", + "documentation":"

Allows a typed link specifier to be accepted as input.

" + }, + "AttributeNames":{ + "shape":"AttributeNameList", + "documentation":"

A list of attribute names whose values will be retrieved.

" + } + }, + "documentation":"

Retrieves attributes that are associated with a typed link inside a BatchRead operation. For more information, see GetLinkAttributes and BatchReadRequest$Operations.

" + }, + "BatchGetLinkAttributesResponse":{ + "type":"structure", + "members":{ + "Attributes":{ + "shape":"AttributeKeyAndValueList", + "documentation":"

The attributes that are associated with the typed link.

" + } + }, + "documentation":"

Represents the output of a GetLinkAttributes response operation.

" + }, + "BatchGetObjectAttributes":{ + "type":"structure", + "required":[ + "ObjectReference", + "SchemaFacet", + "AttributeNames" + ], + "members":{ + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

Reference that identifies the object whose attributes will be retrieved.

" + }, + "SchemaFacet":{ + "shape":"SchemaFacet", + "documentation":"

Identifier for the facet whose attributes will be retrieved. See SchemaFacet for details.

" + }, + "AttributeNames":{ + "shape":"AttributeNameList", + "documentation":"

List of attribute names whose values will be retrieved.

" + } + }, + "documentation":"

Retrieves attributes within a facet that are associated with an object inside an BatchRead operation. For more information, see GetObjectAttributes and BatchReadRequest$Operations.

" + }, + "BatchGetObjectAttributesResponse":{ + "type":"structure", + "members":{ + "Attributes":{ + "shape":"AttributeKeyAndValueList", + "documentation":"

The attribute values that are associated with an object.

" + } + }, + "documentation":"

Represents the output of a GetObjectAttributes response operation.

" + }, + "BatchGetObjectInformation":{ + "type":"structure", + "required":["ObjectReference"], + "members":{ + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

A reference to the object.

" + } + }, + "documentation":"

Retrieves metadata about an object inside a BatchRead operation. For more information, see GetObjectInformation and BatchReadRequest$Operations.

" + }, + "BatchGetObjectInformationResponse":{ + "type":"structure", + "members":{ + "SchemaFacets":{ + "shape":"SchemaFacetList", + "documentation":"

The facets attached to the specified object.

" + }, + "ObjectIdentifier":{ + "shape":"ObjectIdentifier", + "documentation":"

The ObjectIdentifier of the specified object.

" + } + }, + "documentation":"

Represents the output of a GetObjectInformation response operation.

" + }, + "BatchListAttachedIndices":{ + "type":"structure", + "required":["TargetReference"], + "members":{ + "TargetReference":{ + "shape":"ObjectReference", + "documentation":"

A reference to the object that has indices attached.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of results to retrieve.

" + } + }, + "documentation":"

Lists indices attached to an object inside a BatchRead operation. For more information, see ListAttachedIndices and BatchReadRequest$Operations.

" + }, + "BatchListAttachedIndicesResponse":{ + "type":"structure", + "members":{ + "IndexAttachments":{ + "shape":"IndexAttachmentList", + "documentation":"

The indices attached to the specified object.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + }, + "documentation":"

Represents the output of a ListAttachedIndices response operation.

" + }, + "BatchListIncomingTypedLinks":{ + "type":"structure", + "required":["ObjectReference"], + "members":{ + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

The reference that identifies the object whose attributes will be listed.

" + }, + "FilterAttributeRanges":{ + "shape":"TypedLinkAttributeRangeList", + "documentation":"

Provides range filters for multiple attributes. When providing ranges to typed link selection, any inexact ranges must be specified at the end. Any attributes that do not have a range specified are presumed to match the entire range.

" + }, + "FilterTypedLink":{ + "shape":"TypedLinkSchemaAndFacetName", + "documentation":"

Filters are interpreted in the order of the attributes on the typed link facet, not the order in which they are supplied to any API calls.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of results to retrieve.

" + } + }, + "documentation":"

Returns a paginated list of all the incoming TypedLinkSpecifier information for an object inside a BatchRead operation. For more information, see ListIncomingTypedLinks and BatchReadRequest$Operations.

" + }, + "BatchListIncomingTypedLinksResponse":{ + "type":"structure", + "members":{ + "LinkSpecifiers":{ + "shape":"TypedLinkSpecifierList", + "documentation":"

Returns one or more typed link specifiers as output.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + }, + "documentation":"

Represents the output of a ListIncomingTypedLinks response operation.

" + }, + "BatchListIndex":{ + "type":"structure", + "required":["IndexReference"], + "members":{ + "RangesOnIndexedValues":{ + "shape":"ObjectAttributeRangeList", + "documentation":"

Specifies the ranges of indexed values that you want to query.

" + }, + "IndexReference":{ + "shape":"ObjectReference", + "documentation":"

The reference to the index to list.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of results to retrieve.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + }, + "documentation":"

Lists objects attached to the specified index inside a BatchRead operation. For more information, see ListIndex and BatchReadRequest$Operations.

" + }, + "BatchListIndexResponse":{ + "type":"structure", + "members":{ + "IndexAttachments":{ + "shape":"IndexAttachmentList", + "documentation":"

The objects and indexed values attached to the index.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + }, + "documentation":"

Represents the output of a ListIndex response operation.

" + }, + "BatchListObjectAttributes":{ + "type":"structure", + "required":["ObjectReference"], + "members":{ + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

Reference of the object whose attributes need to be listed.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of items to be retrieved in a single call. This is an approximate number.

" + }, + "FacetFilter":{ + "shape":"SchemaFacet", + "documentation":"

Used to filter the list of object attributes that are associated with a certain facet.

" + } + }, + "documentation":"

Represents the output of a ListObjectAttributes operation.

" + }, + "BatchListObjectAttributesResponse":{ + "type":"structure", + "members":{ + "Attributes":{ + "shape":"AttributeKeyAndValueList", + "documentation":"

The attributes map that is associated with the object. AttributeArn is the key; attribute value is the value.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + }, + "documentation":"

Represents the output of a ListObjectAttributes response operation.

" + }, + "BatchListObjectChildren":{ + "type":"structure", + "required":["ObjectReference"], + "members":{ + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

Reference of the object for which child objects are being listed.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

Maximum number of items to be retrieved in a single call. This is an approximate number.

" + } + }, + "documentation":"

Represents the output of a ListObjectChildren operation.

" + }, + "BatchListObjectChildrenResponse":{ + "type":"structure", + "members":{ + "Children":{ + "shape":"LinkNameToObjectIdentifierMap", + "documentation":"

The children structure, which is a map with the key as the LinkName and ObjectIdentifier as the value.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + }, + "documentation":"

Represents the output of a ListObjectChildren response operation.

" + }, + "BatchListObjectParentPaths":{ + "type":"structure", + "required":["ObjectReference"], + "members":{ + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

The reference that identifies the object whose attributes will be listed.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of results to retrieve.

" + } + }, + "documentation":"

Retrieves all available parent paths for any object type such as node, leaf node, policy node, and index node objects inside a BatchRead operation. For more information, see ListObjectParentPaths and BatchReadRequest$Operations.

" + }, + "BatchListObjectParentPathsResponse":{ + "type":"structure", + "members":{ + "PathToObjectIdentifiersList":{ + "shape":"PathToObjectIdentifiersList", + "documentation":"

Returns the path to the ObjectIdentifiers that are associated with the directory.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + }, + "documentation":"

Represents the output of a ListObjectParentPaths response operation.

" + }, + "BatchListObjectPolicies":{ + "type":"structure", + "required":["ObjectReference"], + "members":{ + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

The reference that identifies the object whose attributes will be listed.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of results to retrieve.

" + } + }, + "documentation":"

Returns policies attached to an object in pagination fashion inside a BatchRead operation. For more information, see ListObjectPolicies and BatchReadRequest$Operations.

" + }, + "BatchListObjectPoliciesResponse":{ + "type":"structure", + "members":{ + "AttachedPolicyIds":{ + "shape":"ObjectIdentifierList", + "documentation":"

A list of policy ObjectIdentifiers, that are attached to the object.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + }, + "documentation":"

Represents the output of a ListObjectPolicies response operation.

" + }, + "BatchListOutgoingTypedLinks":{ + "type":"structure", + "required":["ObjectReference"], + "members":{ + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

The reference that identifies the object whose attributes will be listed.

" + }, + "FilterAttributeRanges":{ + "shape":"TypedLinkAttributeRangeList", + "documentation":"

Provides range filters for multiple attributes. When providing ranges to typed link selection, any inexact ranges must be specified at the end. Any attributes that do not have a range specified are presumed to match the entire range.

" + }, + "FilterTypedLink":{ + "shape":"TypedLinkSchemaAndFacetName", + "documentation":"

Filters are interpreted in the order of the attributes defined on the typed link facet, not the order they are supplied to any API calls.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of results to retrieve.

" + } + }, + "documentation":"

Returns a paginated list of all the outgoing TypedLinkSpecifier information for an object inside a BatchRead operation. For more information, see ListOutgoingTypedLinks and BatchReadRequest$Operations.

" + }, + "BatchListOutgoingTypedLinksResponse":{ + "type":"structure", + "members":{ + "TypedLinkSpecifiers":{ + "shape":"TypedLinkSpecifierList", + "documentation":"

Returns a typed link specifier as output.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + }, + "documentation":"

Represents the output of a ListOutgoingTypedLinks response operation.

" + }, + "BatchListPolicyAttachments":{ + "type":"structure", + "required":["PolicyReference"], + "members":{ + "PolicyReference":{ + "shape":"ObjectReference", + "documentation":"

The reference that identifies the policy object.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of results to retrieve.

" + } + }, + "documentation":"

Returns all of the ObjectIdentifiers to which a given policy is attached inside a BatchRead operation. For more information, see ListPolicyAttachments and BatchReadRequest$Operations.

" + }, + "BatchListPolicyAttachmentsResponse":{ + "type":"structure", + "members":{ + "ObjectIdentifiers":{ + "shape":"ObjectIdentifierList", + "documentation":"

A list of ObjectIdentifiers to which the policy is attached.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + }, + "documentation":"

Represents the output of a ListPolicyAttachments response operation.

" + }, + "BatchLookupPolicy":{ + "type":"structure", + "required":["ObjectReference"], + "members":{ + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

Reference that identifies the object whose policies will be looked up.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of results to retrieve.

" + } + }, + "documentation":"

Lists all policies from the root of the Directory to the object specified inside a BatchRead operation. For more information, see LookupPolicy and BatchReadRequest$Operations.

" + }, + "BatchLookupPolicyResponse":{ + "type":"structure", + "members":{ + "PolicyToPathList":{ + "shape":"PolicyToPathList", + "documentation":"

Provides list of path to policies. Policies contain PolicyId, ObjectIdentifier, and PolicyType. For more information, see Policies.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + }, + "documentation":"

Represents the output of a LookupPolicy response operation.

" + }, + "BatchOperationIndex":{"type":"integer"}, + "BatchReadException":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"BatchReadExceptionType", + "documentation":"

A type of exception, such as InvalidArnException.

" + }, + "Message":{ + "shape":"ExceptionMessage", + "documentation":"

An exception message that is associated with the failure.

" + } + }, + "documentation":"

The batch read exception structure, which contains the exception type and message.

" + }, + "BatchReadExceptionType":{ + "type":"string", + "enum":[ + "ValidationException", + "InvalidArnException", + "ResourceNotFoundException", + "InvalidNextTokenException", + "AccessDeniedException", + "NotNodeException", + "FacetValidationException", + "CannotListParentOfRootException", + "NotIndexException", + "NotPolicyException", + "DirectoryNotEnabledException", + "LimitExceededException", + "InternalServiceException" + ] + }, + "BatchReadOperation":{ + "type":"structure", + "members":{ + "ListObjectAttributes":{ + "shape":"BatchListObjectAttributes", + "documentation":"

Lists all attributes that are associated with an object.

" + }, + "ListObjectChildren":{ + "shape":"BatchListObjectChildren", + "documentation":"

Returns a paginated list of child objects that are associated with a given object.

" + }, + "ListAttachedIndices":{ + "shape":"BatchListAttachedIndices", + "documentation":"

Lists indices attached to an object.

" + }, + "ListObjectParentPaths":{ + "shape":"BatchListObjectParentPaths", + "documentation":"

Retrieves all available parent paths for any object type such as node, leaf node, policy node, and index node objects. For more information about objects, see Directory Structure.

" + }, + "GetObjectInformation":{ + "shape":"BatchGetObjectInformation", + "documentation":"

Retrieves metadata about an object.

" + }, + "GetObjectAttributes":{ + "shape":"BatchGetObjectAttributes", + "documentation":"

Retrieves attributes within a facet that are associated with an object.

" + }, + "ListObjectPolicies":{ + "shape":"BatchListObjectPolicies", + "documentation":"

Returns policies attached to an object in pagination fashion.

" + }, + "ListPolicyAttachments":{ + "shape":"BatchListPolicyAttachments", + "documentation":"

Returns all of the ObjectIdentifiers to which a given policy is attached.

" + }, + "LookupPolicy":{ + "shape":"BatchLookupPolicy", + "documentation":"

Lists all policies from the root of the Directory to the object specified. If there are no policies present, an empty list is returned. If policies are present, and if some objects don't have the policies attached, it returns the ObjectIdentifier for such objects. If policies are present, it returns ObjectIdentifier, policyId, and policyType. Paths that don't lead to the root from the target object are ignored. For more information, see Policies.

" + }, + "ListIndex":{ + "shape":"BatchListIndex", + "documentation":"

Lists objects attached to the specified index.

" + }, + "ListOutgoingTypedLinks":{ + "shape":"BatchListOutgoingTypedLinks", + "documentation":"

Returns a paginated list of all the outgoing TypedLinkSpecifier information for an object. It also supports filtering by typed link facet and identity attributes. For more information, see Typed link.

" + }, + "ListIncomingTypedLinks":{ + "shape":"BatchListIncomingTypedLinks", + "documentation":"

Returns a paginated list of all the incoming TypedLinkSpecifier information for an object. It also supports filtering by typed link facet and identity attributes. For more information, see Typed link.

" + }, + "GetLinkAttributes":{ + "shape":"BatchGetLinkAttributes", + "documentation":"

Retrieves attributes that are associated with a typed link.

" + } + }, + "documentation":"

Represents the output of a BatchRead operation.

" + }, + "BatchReadOperationList":{ + "type":"list", + "member":{"shape":"BatchReadOperation"} + }, + "BatchReadOperationResponse":{ + "type":"structure", + "members":{ + "SuccessfulResponse":{ + "shape":"BatchReadSuccessfulResponse", + "documentation":"

Identifies which operation in a batch has succeeded.

" + }, + "ExceptionResponse":{ + "shape":"BatchReadException", + "documentation":"

Identifies which operation in a batch has failed.

" + } + }, + "documentation":"

Represents the output of a BatchRead response operation.

" + }, + "BatchReadOperationResponseList":{ + "type":"list", + "member":{"shape":"BatchReadOperationResponse"} + }, + "BatchReadRequest":{ + "type":"structure", + "required":[ + "DirectoryArn", + "Operations" + ], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that is associated with the Directory. For more information, see arns.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "Operations":{ + "shape":"BatchReadOperationList", + "documentation":"

A list of operations that are part of the batch.

" + }, + "ConsistencyLevel":{ + "shape":"ConsistencyLevel", + "documentation":"

Represents the manner and timing in which the successful write or update of an object is reflected in a subsequent read operation of that same object.

", + "location":"header", + "locationName":"x-amz-consistency-level" + } + } + }, + "BatchReadResponse":{ + "type":"structure", + "members":{ + "Responses":{ + "shape":"BatchReadOperationResponseList", + "documentation":"

A list of all the responses for each batch read.

" + } + } + }, + "BatchReadSuccessfulResponse":{ + "type":"structure", + "members":{ + "ListObjectAttributes":{ + "shape":"BatchListObjectAttributesResponse", + "documentation":"

Lists all attributes that are associated with an object.

" + }, + "ListObjectChildren":{ + "shape":"BatchListObjectChildrenResponse", + "documentation":"

Returns a paginated list of child objects that are associated with a given object.

" + }, + "GetObjectInformation":{ + "shape":"BatchGetObjectInformationResponse", + "documentation":"

Retrieves metadata about an object.

" + }, + "GetObjectAttributes":{ + "shape":"BatchGetObjectAttributesResponse", + "documentation":"

Retrieves attributes within a facet that are associated with an object.

" + }, + "ListAttachedIndices":{ + "shape":"BatchListAttachedIndicesResponse", + "documentation":"

Lists indices attached to an object.

" + }, + "ListObjectParentPaths":{ + "shape":"BatchListObjectParentPathsResponse", + "documentation":"

Retrieves all available parent paths for any object type such as node, leaf node, policy node, and index node objects. For more information about objects, see Directory Structure.

" + }, + "ListObjectPolicies":{ + "shape":"BatchListObjectPoliciesResponse", + "documentation":"

Returns policies attached to an object in pagination fashion.

" + }, + "ListPolicyAttachments":{ + "shape":"BatchListPolicyAttachmentsResponse", + "documentation":"

Returns all of the ObjectIdentifiers to which a given policy is attached.

" + }, + "LookupPolicy":{ + "shape":"BatchLookupPolicyResponse", + "documentation":"

Lists all policies from the root of the Directory to the object specified. If there are no policies present, an empty list is returned. If policies are present, and if some objects don't have the policies attached, it returns the ObjectIdentifier for such objects. If policies are present, it returns ObjectIdentifier, policyId, and policyType. Paths that don't lead to the root from the target object are ignored. For more information, see Policies.

" + }, + "ListIndex":{ + "shape":"BatchListIndexResponse", + "documentation":"

Lists objects attached to the specified index.

" + }, + "ListOutgoingTypedLinks":{ + "shape":"BatchListOutgoingTypedLinksResponse", + "documentation":"

Returns a paginated list of all the outgoing TypedLinkSpecifier information for an object. It also supports filtering by typed link facet and identity attributes. For more information, see Typed link.

" + }, + "ListIncomingTypedLinks":{ + "shape":"BatchListIncomingTypedLinksResponse", + "documentation":"

Returns a paginated list of all the incoming TypedLinkSpecifier information for an object. It also supports filtering by typed link facet and identity attributes. For more information, see Typed link.

" + }, + "GetLinkAttributes":{ + "shape":"BatchGetLinkAttributesResponse", + "documentation":"

The list of attributes to retrieve from the typed link.

" + } + }, + "documentation":"

Represents the output of a BatchRead success response operation.

" + }, + "BatchReferenceName":{"type":"string"}, + "BatchRemoveFacetFromObject":{ + "type":"structure", + "required":[ + "SchemaFacet", + "ObjectReference" + ], + "members":{ + "SchemaFacet":{ + "shape":"SchemaFacet", + "documentation":"

The facet to remove from the object.

" + }, + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

A reference to the object whose facet will be removed.

" + } + }, + "documentation":"

A batch operation to remove a facet from an object.

" + }, + "BatchRemoveFacetFromObjectResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

An empty result that represents success.

" + }, + "BatchUpdateLinkAttributes":{ + "type":"structure", + "required":[ + "TypedLinkSpecifier", + "AttributeUpdates" + ], + "members":{ + "TypedLinkSpecifier":{ + "shape":"TypedLinkSpecifier", + "documentation":"

Allows a typed link specifier to be accepted as input.

" + }, + "AttributeUpdates":{ + "shape":"LinkAttributeUpdateList", + "documentation":"

The attributes update structure.

" + } + }, + "documentation":"

Updates a given typed link’s attributes inside a BatchRead operation. Attributes to be updated must not contribute to the typed link’s identity, as defined by its IdentityAttributeOrder. For more information, see UpdateLinkAttributes and BatchReadRequest$Operations.

" + }, + "BatchUpdateLinkAttributesResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

Represents the output of a UpdateLinkAttributes response operation.

" + }, + "BatchUpdateObjectAttributes":{ + "type":"structure", + "required":[ + "ObjectReference", + "AttributeUpdates" + ], + "members":{ + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

Reference that identifies the object.

" + }, + "AttributeUpdates":{ + "shape":"ObjectAttributeUpdateList", + "documentation":"

Attributes update structure.

" + } + }, + "documentation":"

Represents the output of a BatchUpdate operation.

" + }, + "BatchUpdateObjectAttributesResponse":{ + "type":"structure", + "members":{ + "ObjectIdentifier":{ + "shape":"ObjectIdentifier", + "documentation":"

ID that is associated with the object.

" + } + }, + "documentation":"

Represents the output of a BatchUpdate response operation.

" + }, + "BatchWriteException":{ + "type":"structure", + "members":{ + "Index":{"shape":"BatchOperationIndex"}, + "Type":{"shape":"BatchWriteExceptionType"}, + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

A BatchWrite exception has occurred.

", + "exception":true + }, + "BatchWriteExceptionType":{ + "type":"string", + "enum":[ + "InternalServiceException", + "ValidationException", + "InvalidArnException", + "LinkNameAlreadyInUseException", + "StillContainsLinksException", + "FacetValidationException", + "ObjectNotDetachedException", + "ResourceNotFoundException", + "AccessDeniedException", + "InvalidAttachmentException", + "NotIndexException", + "NotNodeException", + "IndexedAttributeMissingException", + "ObjectAlreadyDetachedException", + "NotPolicyException", + "DirectoryNotEnabledException", + "LimitExceededException", + "UnsupportedIndexTypeException" + ] + }, + "BatchWriteOperation":{ + "type":"structure", + "members":{ + "CreateObject":{ + "shape":"BatchCreateObject", + "documentation":"

Creates an object.

" + }, + "AttachObject":{ + "shape":"BatchAttachObject", + "documentation":"

Attaches an object to a Directory.

" + }, + "DetachObject":{ + "shape":"BatchDetachObject", + "documentation":"

Detaches an object from a Directory.

" + }, + "UpdateObjectAttributes":{ + "shape":"BatchUpdateObjectAttributes", + "documentation":"

Updates a given object's attributes.

" + }, + "DeleteObject":{ + "shape":"BatchDeleteObject", + "documentation":"

Deletes an object in a Directory.

" + }, + "AddFacetToObject":{ + "shape":"BatchAddFacetToObject", + "documentation":"

A batch operation that adds a facet to an object.

" + }, + "RemoveFacetFromObject":{ + "shape":"BatchRemoveFacetFromObject", + "documentation":"

A batch operation that removes a facet from an object.

" + }, + "AttachPolicy":{ + "shape":"BatchAttachPolicy", + "documentation":"

Attaches a policy object to a regular object. An object can have a limited number of attached policies.

" + }, + "DetachPolicy":{ + "shape":"BatchDetachPolicy", + "documentation":"

Detaches a policy from a Directory.

" + }, + "CreateIndex":{ + "shape":"BatchCreateIndex", + "documentation":"

Creates an index object. See Indexing for more information.

" + }, + "AttachToIndex":{ + "shape":"BatchAttachToIndex", + "documentation":"

Attaches the specified object to the specified index.

" + }, + "DetachFromIndex":{ + "shape":"BatchDetachFromIndex", + "documentation":"

Detaches the specified object from the specified index.

" + }, + "AttachTypedLink":{ + "shape":"BatchAttachTypedLink", + "documentation":"

Attaches a typed link to a specified source and target object. For more information, see Typed link.

" + }, + "DetachTypedLink":{ + "shape":"BatchDetachTypedLink", + "documentation":"

Detaches a typed link from a specified source and target object. For more information, see Typed link.

" + }, + "UpdateLinkAttributes":{ + "shape":"BatchUpdateLinkAttributes", + "documentation":"

Updates a given object's attributes.

" + } + }, + "documentation":"

Represents the output of a BatchWrite operation.

" + }, + "BatchWriteOperationList":{ + "type":"list", + "member":{"shape":"BatchWriteOperation"} + }, + "BatchWriteOperationResponse":{ + "type":"structure", + "members":{ + "CreateObject":{ + "shape":"BatchCreateObjectResponse", + "documentation":"

Creates an object in a Directory.

" + }, + "AttachObject":{ + "shape":"BatchAttachObjectResponse", + "documentation":"

Attaches an object to a Directory.

" + }, + "DetachObject":{ + "shape":"BatchDetachObjectResponse", + "documentation":"

Detaches an object from a Directory.

" + }, + "UpdateObjectAttributes":{ + "shape":"BatchUpdateObjectAttributesResponse", + "documentation":"

Updates a given object’s attributes.

" + }, + "DeleteObject":{ + "shape":"BatchDeleteObjectResponse", + "documentation":"

Deletes an object in a Directory.

" + }, + "AddFacetToObject":{ + "shape":"BatchAddFacetToObjectResponse", + "documentation":"

The result of an add facet to object batch operation.

" + }, + "RemoveFacetFromObject":{ + "shape":"BatchRemoveFacetFromObjectResponse", + "documentation":"

The result of a batch remove facet from object operation.

" + }, + "AttachPolicy":{ + "shape":"BatchAttachPolicyResponse", + "documentation":"

Attaches a policy object to a regular object. An object can have a limited number of attached policies.

" + }, + "DetachPolicy":{ + "shape":"BatchDetachPolicyResponse", + "documentation":"

Detaches a policy from a Directory.

" + }, + "CreateIndex":{ + "shape":"BatchCreateIndexResponse", + "documentation":"

Creates an index object. See Indexing for more information.

" + }, + "AttachToIndex":{ + "shape":"BatchAttachToIndexResponse", + "documentation":"

Attaches the specified object to the specified index.

" + }, + "DetachFromIndex":{ + "shape":"BatchDetachFromIndexResponse", + "documentation":"

Detaches the specified object from the specified index.

" + }, + "AttachTypedLink":{ + "shape":"BatchAttachTypedLinkResponse", + "documentation":"

Attaches a typed link to a specified source and target object. For more information, see Typed link.

" + }, + "DetachTypedLink":{ + "shape":"BatchDetachTypedLinkResponse", + "documentation":"

Detaches a typed link from a specified source and target object. For more information, see Typed link.

" + }, + "UpdateLinkAttributes":{ + "shape":"BatchUpdateLinkAttributesResponse", + "documentation":"

Represents the output of a BatchWrite response operation.

" + } + }, + "documentation":"

Represents the output of a BatchWrite response operation.

" + }, + "BatchWriteOperationResponseList":{ + "type":"list", + "member":{"shape":"BatchWriteOperationResponse"} + }, + "BatchWriteRequest":{ + "type":"structure", + "required":[ + "DirectoryArn", + "Operations" + ], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that is associated with the Directory. For more information, see arns.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "Operations":{ + "shape":"BatchWriteOperationList", + "documentation":"

A list of operations that are part of the batch.

" + } + } + }, + "BatchWriteResponse":{ + "type":"structure", + "members":{ + "Responses":{ + "shape":"BatchWriteOperationResponseList", + "documentation":"

A list of all the responses for each batch write.

" + } + } + }, + "BinaryAttributeValue":{"type":"blob"}, + "Bool":{"type":"boolean"}, + "BooleanAttributeValue":{"type":"boolean"}, + "CannotListParentOfRootException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

Cannot list the parents of a Directory root.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ConsistencyLevel":{ + "type":"string", + "enum":[ + "SERIALIZABLE", + "EVENTUAL" + ] + }, + "CreateDirectoryRequest":{ + "type":"structure", + "required":[ + "Name", + "SchemaArn" + ], + "members":{ + "Name":{ + "shape":"DirectoryName", + "documentation":"

The name of the Directory. Should be unique per account, per region.

" + }, + "SchemaArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the published schema that will be copied into the data Directory. For more information, see arns.

", + "location":"header", + "locationName":"x-amz-data-partition" + } + } + }, + "CreateDirectoryResponse":{ + "type":"structure", + "required":[ + "DirectoryArn", + "Name", + "ObjectIdentifier", + "AppliedSchemaArn" + ], + "members":{ + "DirectoryArn":{ + "shape":"DirectoryArn", + "documentation":"

The ARN that is associated with the Directory. For more information, see arns.

" + }, + "Name":{ + "shape":"DirectoryName", + "documentation":"

The name of the Directory.

" + }, + "ObjectIdentifier":{ + "shape":"ObjectIdentifier", + "documentation":"

The root object node of the created directory.

" + }, + "AppliedSchemaArn":{ + "shape":"Arn", + "documentation":"

The ARN of the published schema in the Directory. Once a published schema is copied into the directory, it has its own ARN, which is referred to applied schema ARN. For more information, see arns.

" + } + } + }, + "CreateFacetRequest":{ + "type":"structure", + "required":[ + "SchemaArn", + "Name" + ], + "members":{ + "SchemaArn":{ + "shape":"Arn", + "documentation":"

The schema ARN in which the new Facet will be created. For more information, see arns.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "Name":{ + "shape":"FacetName", + "documentation":"

The name of the Facet, which is unique for a given schema.

" + }, + "Attributes":{ + "shape":"FacetAttributeList", + "documentation":"

The attributes that are associated with the Facet.

" + }, + "ObjectType":{ + "shape":"ObjectType", + "documentation":"

Specifies whether a given object created from this facet is of type node, leaf node, policy or index.

" + }, + "FacetStyle":{ + "shape":"FacetStyle", + "documentation":"

There are two different styles that you can define on any given facet, Static and Dynamic. For static facets, all attributes must be defined in the schema. For dynamic facets, attributes can be defined during data plane operations.

" + } + } + }, + "CreateFacetResponse":{ + "type":"structure", + "members":{ + } + }, + "CreateIndexRequest":{ + "type":"structure", + "required":[ + "DirectoryArn", + "OrderedIndexedAttributeList", + "IsUnique" + ], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The ARN of the directory where the index should be created.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "OrderedIndexedAttributeList":{ + "shape":"AttributeKeyList", + "documentation":"

Specifies the attributes that should be indexed on. Currently only a single attribute is supported.

" + }, + "IsUnique":{ + "shape":"Bool", + "documentation":"

Indicates whether the attribute that is being indexed has unique values or not.

" + }, + "ParentReference":{ + "shape":"ObjectReference", + "documentation":"

A reference to the parent object that contains the index object.

" + }, + "LinkName":{ + "shape":"LinkName", + "documentation":"

The name of the link between the parent object and the index object.

" + } + } + }, + "CreateIndexResponse":{ + "type":"structure", + "members":{ + "ObjectIdentifier":{ + "shape":"ObjectIdentifier", + "documentation":"

The ObjectIdentifier of the index created by this operation.

" + } + } + }, + "CreateObjectRequest":{ + "type":"structure", + "required":[ + "DirectoryArn", + "SchemaFacets" + ], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that is associated with the Directory in which the object will be created. For more information, see arns.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "SchemaFacets":{ + "shape":"SchemaFacetList", + "documentation":"

A list of schema facets to be associated with the object. Do not provide minor version components. See SchemaFacet for details.

" + }, + "ObjectAttributeList":{ + "shape":"AttributeKeyAndValueList", + "documentation":"

The attribute map whose attribute ARN contains the key and attribute value as the map value.

" + }, + "ParentReference":{ + "shape":"ObjectReference", + "documentation":"

If specified, the parent reference to which this object will be attached.

" + }, + "LinkName":{ + "shape":"LinkName", + "documentation":"

The name of link that is used to attach this object to a parent.

" + } + } + }, + "CreateObjectResponse":{ + "type":"structure", + "members":{ + "ObjectIdentifier":{ + "shape":"ObjectIdentifier", + "documentation":"

The identifier that is associated with the object.

" + } + } + }, + "CreateSchemaRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"SchemaName", + "documentation":"

The name that is associated with the schema. This is unique to each account and in each region.

" + } + } + }, + "CreateSchemaResponse":{ + "type":"structure", + "members":{ + "SchemaArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that is associated with the schema. For more information, see arns.

" + } + } + }, + "CreateTypedLinkFacetRequest":{ + "type":"structure", + "required":[ + "SchemaArn", + "Facet" + ], + "members":{ + "SchemaArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that is associated with the schema. For more information, see arns.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "Facet":{ + "shape":"TypedLinkFacet", + "documentation":"

Facet structure that is associated with the typed link facet.

" + } + } + }, + "CreateTypedLinkFacetResponse":{ + "type":"structure", + "members":{ + } + }, + "Date":{"type":"timestamp"}, + "DatetimeAttributeValue":{"type":"timestamp"}, + "DeleteDirectoryRequest":{ + "type":"structure", + "required":["DirectoryArn"], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The ARN of the directory to delete.

", + "location":"header", + "locationName":"x-amz-data-partition" + } + } + }, + "DeleteDirectoryResponse":{ + "type":"structure", + "required":["DirectoryArn"], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The ARN of the deleted directory.

" + } + } + }, + "DeleteFacetRequest":{ + "type":"structure", + "required":[ + "SchemaArn", + "Name" + ], + "members":{ + "SchemaArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that is associated with the Facet. For more information, see arns.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "Name":{ + "shape":"FacetName", + "documentation":"

The name of the facet to delete.

" + } + } + }, + "DeleteFacetResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteObjectRequest":{ + "type":"structure", + "required":[ + "DirectoryArn", + "ObjectReference" + ], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that is associated with the Directory where the object resides. For more information, see arns.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

A reference that identifies the object.

" + } + } + }, + "DeleteObjectResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteSchemaRequest":{ + "type":"structure", + "required":["SchemaArn"], + "members":{ + "SchemaArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the development schema. For more information, see arns.

", + "location":"header", + "locationName":"x-amz-data-partition" + } + } + }, + "DeleteSchemaResponse":{ + "type":"structure", + "members":{ + "SchemaArn":{ + "shape":"Arn", + "documentation":"

The input ARN that is returned as part of the response. For more information, see arns.

" + } + } + }, + "DeleteTypedLinkFacetRequest":{ + "type":"structure", + "required":[ + "SchemaArn", + "Name" + ], + "members":{ + "SchemaArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that is associated with the schema. For more information, see arns.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "Name":{ + "shape":"TypedLinkName", + "documentation":"

The unique name of the typed link facet.

" + } + } + }, + "DeleteTypedLinkFacetResponse":{ + "type":"structure", + "members":{ + } + }, + "DetachFromIndexRequest":{ + "type":"structure", + "required":[ + "DirectoryArn", + "IndexReference", + "TargetReference" + ], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the directory the index and object exist in.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "IndexReference":{ + "shape":"ObjectReference", + "documentation":"

A reference to the index object.

" + }, + "TargetReference":{ + "shape":"ObjectReference", + "documentation":"

A reference to the object being detached from the index.

" + } + } + }, + "DetachFromIndexResponse":{ + "type":"structure", + "members":{ + "DetachedObjectIdentifier":{ + "shape":"ObjectIdentifier", + "documentation":"

The ObjectIdentifier of the object that was detached from the index.

" + } + } + }, + "DetachObjectRequest":{ + "type":"structure", + "required":[ + "DirectoryArn", + "ParentReference", + "LinkName" + ], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that is associated with the Directory where objects reside. For more information, see arns.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "ParentReference":{ + "shape":"ObjectReference", + "documentation":"

The parent reference from which the object with the specified link name is detached.

" + }, + "LinkName":{ + "shape":"LinkName", + "documentation":"

The link name associated with the object that needs to be detached.

" + } + } + }, + "DetachObjectResponse":{ + "type":"structure", + "members":{ + "DetachedObjectIdentifier":{ + "shape":"ObjectIdentifier", + "documentation":"

The ObjectIdentifier that was detached from the object.

" + } + } + }, + "DetachPolicyRequest":{ + "type":"structure", + "required":[ + "DirectoryArn", + "PolicyReference", + "ObjectReference" + ], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that is associated with the Directory where both objects reside. For more information, see arns.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "PolicyReference":{ + "shape":"ObjectReference", + "documentation":"

Reference that identifies the policy object.

" + }, + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

Reference that identifies the object whose policy object will be detached.

" + } + } + }, + "DetachPolicyResponse":{ + "type":"structure", + "members":{ + } + }, + "DetachTypedLinkRequest":{ + "type":"structure", + "required":[ + "DirectoryArn", + "TypedLinkSpecifier" + ], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the directory where you want to detach the typed link.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "TypedLinkSpecifier":{ + "shape":"TypedLinkSpecifier", + "documentation":"

Used to accept a typed link specifier as input.

" + } + } + }, + "Directory":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"DirectoryName", + "documentation":"

The name of the directory.

" + }, + "DirectoryArn":{ + "shape":"DirectoryArn", + "documentation":"

The Amazon Resource Name (ARN) that is associated with the directory. For more information, see arns.

" + }, + "State":{ + "shape":"DirectoryState", + "documentation":"

The state of the directory. Can be either Enabled, Disabled, or Deleted.

" + }, + "CreationDateTime":{ + "shape":"Date", + "documentation":"

The date and time when the directory was created.

" + } + }, + "documentation":"

Directory structure that includes the directory name and directory ARN.

" + }, + "DirectoryAlreadyExistsException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

Indicates that a Directory could not be created due to a naming conflict. Choose a different name and try again.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "DirectoryArn":{"type":"string"}, + "DirectoryDeletedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

A directory that has been deleted and to which access has been attempted. Note: The requested resource will eventually cease to exist.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "DirectoryList":{ + "type":"list", + "member":{"shape":"Directory"} + }, + "DirectoryName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-zA-Z0-9._-]*$" + }, + "DirectoryNotDisabledException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

An operation can only operate on a disabled directory.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "DirectoryNotEnabledException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

Operations are only permitted on enabled directories.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "DirectoryState":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED", + "DELETED" + ] + }, + "DisableDirectoryRequest":{ + "type":"structure", + "required":["DirectoryArn"], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The ARN of the directory to disable.

", + "location":"header", + "locationName":"x-amz-data-partition" + } + } + }, + "DisableDirectoryResponse":{ + "type":"structure", + "required":["DirectoryArn"], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The ARN of the directory that has been disabled.

" + } + } + }, + "EnableDirectoryRequest":{ + "type":"structure", + "required":["DirectoryArn"], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The ARN of the directory to enable.

", + "location":"header", + "locationName":"x-amz-data-partition" + } + } + }, + "EnableDirectoryResponse":{ + "type":"structure", + "required":["DirectoryArn"], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The ARN of the enabled directory.

" + } + } + }, + "ExceptionMessage":{"type":"string"}, + "Facet":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"FacetName", + "documentation":"

The name of the Facet.

" + }, + "ObjectType":{ + "shape":"ObjectType", + "documentation":"

The object type that is associated with the facet. See CreateFacetRequest$ObjectType for more details.

" + }, + "FacetStyle":{ + "shape":"FacetStyle", + "documentation":"

There are two different styles that you can define on any given facet, Static and Dynamic. For static facets, all attributes must be defined in the schema. For dynamic facets, attributes can be defined during data plane operations.

" + } + }, + "documentation":"

A structure that contains Name, ARN, Attributes, Rules, and ObjectTypes. See Facets for more information.

" + }, + "FacetAlreadyExistsException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

A facet with the same name already exists.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "FacetAttribute":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"AttributeName", + "documentation":"

The name of the facet attribute.

" + }, + "AttributeDefinition":{ + "shape":"FacetAttributeDefinition", + "documentation":"

A facet attribute consists of either a definition or a reference. This structure contains the attribute definition. See Attribute References for more information.

" + }, + "AttributeReference":{ + "shape":"FacetAttributeReference", + "documentation":"

An attribute reference that is associated with the attribute. See Attribute References for more information.

" + }, + "RequiredBehavior":{ + "shape":"RequiredAttributeBehavior", + "documentation":"

The required behavior of the FacetAttribute.

" + } + }, + "documentation":"

An attribute that is associated with the Facet.

" + }, + "FacetAttributeDefinition":{ + "type":"structure", + "required":["Type"], + "members":{ + "Type":{ + "shape":"FacetAttributeType", + "documentation":"

The type of the attribute.

" + }, + "DefaultValue":{ + "shape":"TypedAttributeValue", + "documentation":"

The default value of the attribute (if configured).

" + }, + "IsImmutable":{ + "shape":"Bool", + "documentation":"

Whether the attribute is mutable or not.

" + }, + "Rules":{ + "shape":"RuleMap", + "documentation":"

Validation rules attached to the attribute definition.

" + } + }, + "documentation":"

A facet attribute definition. See Attribute References for more information.

" + }, + "FacetAttributeList":{ + "type":"list", + "member":{"shape":"FacetAttribute"} + }, + "FacetAttributeReference":{ + "type":"structure", + "required":[ + "TargetFacetName", + "TargetAttributeName" + ], + "members":{ + "TargetFacetName":{ + "shape":"FacetName", + "documentation":"

The target facet name that is associated with the facet reference. See Attribute References for more information.

" + }, + "TargetAttributeName":{ + "shape":"AttributeName", + "documentation":"

The target attribute name that is associated with the facet reference. See Attribute References for more information.

" + } + }, + "documentation":"

The facet attribute reference that specifies the attribute definition that contains the attribute facet name and attribute name.

" + }, + "FacetAttributeType":{ + "type":"string", + "enum":[ + "STRING", + "BINARY", + "BOOLEAN", + "NUMBER", + "DATETIME", + "VARIANT" + ] + }, + "FacetAttributeUpdate":{ + "type":"structure", + "members":{ + "Attribute":{ + "shape":"FacetAttribute", + "documentation":"

The attribute to update.

" + }, + "Action":{ + "shape":"UpdateActionType", + "documentation":"

The action to perform when updating the attribute.

" + } + }, + "documentation":"

A structure that contains information used to update an attribute.

" + }, + "FacetAttributeUpdateList":{ + "type":"list", + "member":{"shape":"FacetAttributeUpdate"} + }, + "FacetInUseException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

Occurs when deleting a facet that contains an attribute that is a target to an attribute reference in a different facet.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "FacetName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-zA-Z0-9._-]*$" + }, + "FacetNameList":{ + "type":"list", + "member":{"shape":"FacetName"} + }, + "FacetNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The specified Facet could not be found.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "FacetStyle":{ + "type":"string", + "enum":[ + "STATIC", + "DYNAMIC" + ] + }, + "FacetValidationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The Facet that you provided was not well formed or could not be validated with the schema.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "GetAppliedSchemaVersionRequest":{ + "type":"structure", + "required":["SchemaArn"], + "members":{ + "SchemaArn":{ + "shape":"Arn", + "documentation":"

The ARN of the applied schema.

" + } + } + }, + "GetAppliedSchemaVersionResponse":{ + "type":"structure", + "members":{ + "AppliedSchemaArn":{ + "shape":"Arn", + "documentation":"

Current applied schema ARN, including the minor version in use if one was provided.

" + } + } + }, + "GetDirectoryRequest":{ + "type":"structure", + "required":["DirectoryArn"], + "members":{ + "DirectoryArn":{ + "shape":"DirectoryArn", + "documentation":"

The ARN of the directory.

", + "location":"header", + "locationName":"x-amz-data-partition" + } + } + }, + "GetDirectoryResponse":{ + "type":"structure", + "required":["Directory"], + "members":{ + "Directory":{ + "shape":"Directory", + "documentation":"

Metadata about the directory.

" + } + } + }, + "GetFacetRequest":{ + "type":"structure", + "required":[ + "SchemaArn", + "Name" + ], + "members":{ + "SchemaArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that is associated with the Facet. For more information, see arns.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "Name":{ + "shape":"FacetName", + "documentation":"

The name of the facet to retrieve.

" + } + } + }, + "GetFacetResponse":{ + "type":"structure", + "members":{ + "Facet":{ + "shape":"Facet", + "documentation":"

The Facet structure that is associated with the facet.

" + } + } + }, + "GetLinkAttributesRequest":{ + "type":"structure", + "required":[ + "DirectoryArn", + "TypedLinkSpecifier", + "AttributeNames" + ], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that is associated with the Directory where the typed link resides. For more information, see arns or Typed link.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "TypedLinkSpecifier":{ + "shape":"TypedLinkSpecifier", + "documentation":"

Allows a typed link specifier to be accepted as input.

" + }, + "AttributeNames":{ + "shape":"AttributeNameList", + "documentation":"

A list of attribute names whose values will be retrieved.

" + }, + "ConsistencyLevel":{ + "shape":"ConsistencyLevel", + "documentation":"

The consistency level at which to retrieve the attributes on a typed link.

" + } + } + }, + "GetLinkAttributesResponse":{ + "type":"structure", + "members":{ + "Attributes":{ + "shape":"AttributeKeyAndValueList", + "documentation":"

The attributes that are associated with the typed link.

" + } + } + }, + "GetObjectAttributesRequest":{ + "type":"structure", + "required":[ + "DirectoryArn", + "ObjectReference", + "SchemaFacet", + "AttributeNames" + ], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that is associated with the Directory where the object resides.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

Reference that identifies the object whose attributes will be retrieved.

" + }, + "ConsistencyLevel":{ + "shape":"ConsistencyLevel", + "documentation":"

The consistency level at which to retrieve the attributes on an object.

", + "location":"header", + "locationName":"x-amz-consistency-level" + }, + "SchemaFacet":{ + "shape":"SchemaFacet", + "documentation":"

Identifier for the facet whose attributes will be retrieved. See SchemaFacet for details.

" + }, + "AttributeNames":{ + "shape":"AttributeNameList", + "documentation":"

List of attribute names whose values will be retrieved.

" + } + } + }, + "GetObjectAttributesResponse":{ + "type":"structure", + "members":{ + "Attributes":{ + "shape":"AttributeKeyAndValueList", + "documentation":"

The attributes that are associated with the object.

" + } + } + }, + "GetObjectInformationRequest":{ + "type":"structure", + "required":[ + "DirectoryArn", + "ObjectReference" + ], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The ARN of the directory being retrieved.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

A reference to the object.

" + }, + "ConsistencyLevel":{ + "shape":"ConsistencyLevel", + "documentation":"

The consistency level at which to retrieve the object information.

", + "location":"header", + "locationName":"x-amz-consistency-level" + } + } + }, + "GetObjectInformationResponse":{ + "type":"structure", + "members":{ + "SchemaFacets":{ + "shape":"SchemaFacetList", + "documentation":"

The facets attached to the specified object. Although the response does not include minor version information, the most recently applied minor version of each Facet is in effect. See GetAppliedSchemaVersion for details.

" + }, + "ObjectIdentifier":{ + "shape":"ObjectIdentifier", + "documentation":"

The ObjectIdentifier of the specified object.

" + } + } + }, + "GetSchemaAsJsonRequest":{ + "type":"structure", + "required":["SchemaArn"], + "members":{ + "SchemaArn":{ + "shape":"Arn", + "documentation":"

The ARN of the schema to retrieve.

", + "location":"header", + "locationName":"x-amz-data-partition" + } + } + }, + "GetSchemaAsJsonResponse":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"SchemaName", + "documentation":"

The name of the retrieved schema.

" + }, + "Document":{ + "shape":"SchemaJsonDocument", + "documentation":"

The JSON representation of the schema document.

" + } + } + }, + "GetTypedLinkFacetInformationRequest":{ + "type":"structure", + "required":[ + "SchemaArn", + "Name" + ], + "members":{ + "SchemaArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that is associated with the schema. For more information, see arns.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "Name":{ + "shape":"TypedLinkName", + "documentation":"

The unique name of the typed link facet.

" + } + } + }, + "GetTypedLinkFacetInformationResponse":{ + "type":"structure", + "members":{ + "IdentityAttributeOrder":{ + "shape":"AttributeNameList", + "documentation":"

The order of identity attributes for the facet, from most significant to least significant. The ability to filter typed links considers the order that the attributes are defined on the typed link facet. When providing ranges to typed link selection, any inexact ranges must be specified at the end. Any attributes that do not have a range specified are presumed to match the entire range. Filters are interpreted in the order of the attributes on the typed link facet, not the order in which they are supplied to any API calls. For more information about identity attributes, see Typed link.

" + } + } + }, + "IncompatibleSchemaException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

Indicates a failure occurred while performing a check for backward compatibility between the specified schema and the schema that is currently applied to the directory.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "IndexAttachment":{ + "type":"structure", + "members":{ + "IndexedAttributes":{ + "shape":"AttributeKeyAndValueList", + "documentation":"

The indexed attribute values.

" + }, + "ObjectIdentifier":{ + "shape":"ObjectIdentifier", + "documentation":"

In response to ListIndex, the ObjectIdentifier of the object attached to the index. In response to ListAttachedIndices, the ObjectIdentifier of the index attached to the object. This field will always contain the ObjectIdentifier of the object on the opposite side of the attachment specified in the query.

" + } + }, + "documentation":"

Represents an index and an attached object.

" + }, + "IndexAttachmentList":{ + "type":"list", + "member":{"shape":"IndexAttachment"} + }, + "IndexedAttributeMissingException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

An object has been attempted to be attached to an object that does not have the appropriate attribute value.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "InternalServiceException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

Indicates a problem that must be resolved by Amazon Web Services. This might be a transient error in which case you can retry your request until it succeeds. Otherwise, go to the AWS Service Health Dashboard site to see if there are any operational issues with the service.

", + "error":{"httpStatusCode":500}, + "exception":true + }, + "InvalidArnException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

Indicates that the provided ARN value is not valid.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidAttachmentException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

Indicates that an attempt to attach an object with the same link name or to apply a schema with the same name has occurred. Rename the link or the schema and then try again.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidFacetUpdateException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

An attempt to modify a Facet resulted in an invalid schema exception.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidNextTokenException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

Indicates that the NextToken value is not valid.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidRuleException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

Occurs when any of the rule parameter keys or values are invalid.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidSchemaDocException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

Indicates that the provided SchemaDoc value is not valid.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidTaggingRequestException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

Can occur for multiple reasons such as when you tag a resource that doesn’t exist or if you specify a higher number of tags for a resource than the allowed limit. Allowed limit is 50 tags per resource.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

Indicates that limits are exceeded. See Limits for more information.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "LinkAttributeAction":{ + "type":"structure", + "members":{ + "AttributeActionType":{ + "shape":"UpdateActionType", + "documentation":"

A type that can be either UPDATE_OR_CREATE or DELETE.

" + }, + "AttributeUpdateValue":{ + "shape":"TypedAttributeValue", + "documentation":"

The value that you want to update to.

" + } + }, + "documentation":"

The action to take on a typed link attribute value. Updates are only supported for attributes which don’t contribute to link identity.

" + }, + "LinkAttributeUpdate":{ + "type":"structure", + "members":{ + "AttributeKey":{ + "shape":"AttributeKey", + "documentation":"

The key of the attribute being updated.

" + }, + "AttributeAction":{ + "shape":"LinkAttributeAction", + "documentation":"

The action to perform as part of the attribute update.

" + } + }, + "documentation":"

Structure that contains attribute update information.

" + }, + "LinkAttributeUpdateList":{ + "type":"list", + "member":{"shape":"LinkAttributeUpdate"} + }, + "LinkName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[^\\/\\[\\]\\(\\):\\{\\}#@!?\\s\\\\;]+" + }, + "LinkNameAlreadyInUseException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

Indicates that a link could not be created due to a naming conflict. Choose a different name and then try again.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "LinkNameToObjectIdentifierMap":{ + "type":"map", + "key":{"shape":"LinkName"}, + "value":{"shape":"ObjectIdentifier"} + }, + "ListAppliedSchemaArnsRequest":{ + "type":"structure", + "required":["DirectoryArn"], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The ARN of the directory you are listing.

" + }, + "SchemaArn":{ + "shape":"Arn", + "documentation":"

The response for ListAppliedSchemaArns when this parameter is used will list all minor version ARNs for a major version.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of results to retrieve.

" + } + } + }, + "ListAppliedSchemaArnsResponse":{ + "type":"structure", + "members":{ + "SchemaArns":{ + "shape":"Arns", + "documentation":"

The ARNs of schemas that are applied to the directory.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + } + }, + "ListAttachedIndicesRequest":{ + "type":"structure", + "required":[ + "DirectoryArn", + "TargetReference" + ], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The ARN of the directory.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "TargetReference":{ + "shape":"ObjectReference", + "documentation":"

A reference to the object that has indices attached.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of results to retrieve.

" + }, + "ConsistencyLevel":{ + "shape":"ConsistencyLevel", + "documentation":"

The consistency level to use for this operation.

", + "location":"header", + "locationName":"x-amz-consistency-level" + } + } + }, + "ListAttachedIndicesResponse":{ + "type":"structure", + "members":{ + "IndexAttachments":{ + "shape":"IndexAttachmentList", + "documentation":"

The indices attached to the specified object.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + } + }, + "ListDevelopmentSchemaArnsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of results to retrieve.

" + } + } + }, + "ListDevelopmentSchemaArnsResponse":{ + "type":"structure", + "members":{ + "SchemaArns":{ + "shape":"Arns", + "documentation":"

The ARNs of retrieved development schemas.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + } + }, + "ListDirectoriesRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of results to retrieve.

" + }, + "state":{ + "shape":"DirectoryState", + "documentation":"

The state of the directories in the list. Can be either Enabled, Disabled, or Deleted.

" + } + } + }, + "ListDirectoriesResponse":{ + "type":"structure", + "required":["Directories"], + "members":{ + "Directories":{ + "shape":"DirectoryList", + "documentation":"

Lists all directories that are associated with your account in pagination fashion.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + } + }, + "ListFacetAttributesRequest":{ + "type":"structure", + "required":[ + "SchemaArn", + "Name" + ], + "members":{ + "SchemaArn":{ + "shape":"Arn", + "documentation":"

The ARN of the schema where the facet resides.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "Name":{ + "shape":"FacetName", + "documentation":"

The name of the facet whose attributes will be retrieved.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of results to retrieve.

" + } + } + }, + "ListFacetAttributesResponse":{ + "type":"structure", + "members":{ + "Attributes":{ + "shape":"FacetAttributeList", + "documentation":"

The attributes attached to the facet.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + } + }, + "ListFacetNamesRequest":{ + "type":"structure", + "required":["SchemaArn"], + "members":{ + "SchemaArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) to retrieve facet names from.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of results to retrieve.

" + } + } + }, + "ListFacetNamesResponse":{ + "type":"structure", + "members":{ + "FacetNames":{ + "shape":"FacetNameList", + "documentation":"

The names of facets that exist within the schema.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + } + }, + "ListIncomingTypedLinksRequest":{ + "type":"structure", + "required":[ + "DirectoryArn", + "ObjectReference" + ], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the directory where you want to list the typed links.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

Reference that identifies the object whose attributes will be listed.

" + }, + "FilterAttributeRanges":{ + "shape":"TypedLinkAttributeRangeList", + "documentation":"

Provides range filters for multiple attributes. When providing ranges to typed link selection, any inexact ranges must be specified at the end. Any attributes that do not have a range specified are presumed to match the entire range.

" + }, + "FilterTypedLink":{ + "shape":"TypedLinkSchemaAndFacetName", + "documentation":"

Filters are interpreted in the order of the attributes on the typed link facet, not the order in which they are supplied to any API calls.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of results to retrieve.

" + }, + "ConsistencyLevel":{ + "shape":"ConsistencyLevel", + "documentation":"

The consistency level to execute the request at.

" + } + } + }, + "ListIncomingTypedLinksResponse":{ + "type":"structure", + "members":{ + "LinkSpecifiers":{ + "shape":"TypedLinkSpecifierList", + "documentation":"

Returns one or more typed link specifiers as output.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + } + }, + "ListIndexRequest":{ + "type":"structure", + "required":[ + "DirectoryArn", + "IndexReference" + ], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The ARN of the directory that the index exists in.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "RangesOnIndexedValues":{ + "shape":"ObjectAttributeRangeList", + "documentation":"

Specifies the ranges of indexed values that you want to query.

" + }, + "IndexReference":{ + "shape":"ObjectReference", + "documentation":"

The reference to the index to list.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of objects in a single page to retrieve from the index during a request. For more information, see AWS Directory Service Limits.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "ConsistencyLevel":{ + "shape":"ConsistencyLevel", + "documentation":"

The consistency level to execute the request at.

", + "location":"header", + "locationName":"x-amz-consistency-level" + } + } + }, + "ListIndexResponse":{ + "type":"structure", + "members":{ + "IndexAttachments":{ + "shape":"IndexAttachmentList", + "documentation":"

The objects and indexed values attached to the index.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + } + }, + "ListManagedSchemaArnsRequest":{ + "type":"structure", + "members":{ + "SchemaArn":{ + "shape":"Arn", + "documentation":"

The response for ListManagedSchemaArns. When this parameter is used, all minor version ARNs for a major version are listed.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of results to retrieve.

" + } + } + }, + "ListManagedSchemaArnsResponse":{ + "type":"structure", + "members":{ + "SchemaArns":{ + "shape":"Arns", + "documentation":"

The ARNs for all AWS managed schemas.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + } + }, + "ListObjectAttributesRequest":{ + "type":"structure", + "required":[ + "DirectoryArn", + "ObjectReference" + ], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that is associated with the Directory where the object resides. For more information, see arns.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

The reference that identifies the object whose attributes will be listed.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of items to be retrieved in a single call. This is an approximate number.

" + }, + "ConsistencyLevel":{ + "shape":"ConsistencyLevel", + "documentation":"

Represents the manner and timing in which the successful write or update of an object is reflected in a subsequent read operation of that same object.

", + "location":"header", + "locationName":"x-amz-consistency-level" + }, + "FacetFilter":{ + "shape":"SchemaFacet", + "documentation":"

Used to filter the list of object attributes that are associated with a certain facet.

" + } + } + }, + "ListObjectAttributesResponse":{ + "type":"structure", + "members":{ + "Attributes":{ + "shape":"AttributeKeyAndValueList", + "documentation":"

Attributes map that is associated with the object. AttributeArn is the key, and attribute value is the value.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + } + }, + "ListObjectChildrenRequest":{ + "type":"structure", + "required":[ + "DirectoryArn", + "ObjectReference" + ], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that is associated with the Directory where the object resides. For more information, see arns.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

The reference that identifies the object for which child objects are being listed.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of items to be retrieved in a single call. This is an approximate number.

" + }, + "ConsistencyLevel":{ + "shape":"ConsistencyLevel", + "documentation":"

Represents the manner and timing in which the successful write or update of an object is reflected in a subsequent read operation of that same object.

", + "location":"header", + "locationName":"x-amz-consistency-level" + } + } + }, + "ListObjectChildrenResponse":{ + "type":"structure", + "members":{ + "Children":{ + "shape":"LinkNameToObjectIdentifierMap", + "documentation":"

Children structure, which is a map with key as the LinkName and ObjectIdentifier as the value.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + } + }, + "ListObjectParentPathsRequest":{ + "type":"structure", + "required":[ + "DirectoryArn", + "ObjectReference" + ], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The ARN of the directory to which the parent path applies.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

The reference that identifies the object whose parent paths are listed.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of items to be retrieved in a single call. This is an approximate number.

" + } + } + }, + "ListObjectParentPathsResponse":{ + "type":"structure", + "members":{ + "PathToObjectIdentifiersList":{ + "shape":"PathToObjectIdentifiersList", + "documentation":"

Returns the path to the ObjectIdentifiers that are associated with the directory.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + } + }, + "ListObjectParentsRequest":{ + "type":"structure", + "required":[ + "DirectoryArn", + "ObjectReference" + ], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that is associated with the Directory where the object resides. For more information, see arns.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

The reference that identifies the object for which parent objects are being listed.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of items to be retrieved in a single call. This is an approximate number.

" + }, + "ConsistencyLevel":{ + "shape":"ConsistencyLevel", + "documentation":"

Represents the manner and timing in which the successful write or update of an object is reflected in a subsequent read operation of that same object.

", + "location":"header", + "locationName":"x-amz-consistency-level" + } + } + }, + "ListObjectParentsResponse":{ + "type":"structure", + "members":{ + "Parents":{ + "shape":"ObjectIdentifierToLinkNameMap", + "documentation":"

The parent structure, which is a map with key as the ObjectIdentifier and LinkName as the value.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + } + }, + "ListObjectPoliciesRequest":{ + "type":"structure", + "required":[ + "DirectoryArn", + "ObjectReference" + ], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that is associated with the Directory where objects reside. For more information, see arns.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

Reference that identifies the object for which policies will be listed.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of items to be retrieved in a single call. This is an approximate number.

" + }, + "ConsistencyLevel":{ + "shape":"ConsistencyLevel", + "documentation":"

Represents the manner and timing in which the successful write or update of an object is reflected in a subsequent read operation of that same object.

", + "location":"header", + "locationName":"x-amz-consistency-level" + } + } + }, + "ListObjectPoliciesResponse":{ + "type":"structure", + "members":{ + "AttachedPolicyIds":{ + "shape":"ObjectIdentifierList", + "documentation":"

A list of policy ObjectIdentifiers, that are attached to the object.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + } + }, + "ListOutgoingTypedLinksRequest":{ + "type":"structure", + "required":[ + "DirectoryArn", + "ObjectReference" + ], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the directory where you want to list the typed links.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

A reference that identifies the object whose attributes will be listed.

" + }, + "FilterAttributeRanges":{ + "shape":"TypedLinkAttributeRangeList", + "documentation":"

Provides range filters for multiple attributes. When providing ranges to typed link selection, any inexact ranges must be specified at the end. Any attributes that do not have a range specified are presumed to match the entire range.

" + }, + "FilterTypedLink":{ + "shape":"TypedLinkSchemaAndFacetName", + "documentation":"

Filters are interpreted in the order of the attributes defined on the typed link facet, not the order they are supplied to any API calls.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of results to retrieve.

" + }, + "ConsistencyLevel":{ + "shape":"ConsistencyLevel", + "documentation":"

The consistency level to execute the request at.

" + } + } + }, + "ListOutgoingTypedLinksResponse":{ + "type":"structure", + "members":{ + "TypedLinkSpecifiers":{ + "shape":"TypedLinkSpecifierList", + "documentation":"

Returns a typed link specifier as output.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + } + }, + "ListPolicyAttachmentsRequest":{ + "type":"structure", + "required":[ + "DirectoryArn", + "PolicyReference" + ], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that is associated with the Directory where objects reside. For more information, see arns.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "PolicyReference":{ + "shape":"ObjectReference", + "documentation":"

The reference that identifies the policy object.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of items to be retrieved in a single call. This is an approximate number.

" + }, + "ConsistencyLevel":{ + "shape":"ConsistencyLevel", + "documentation":"

Represents the manner and timing in which the successful write or update of an object is reflected in a subsequent read operation of that same object.

", + "location":"header", + "locationName":"x-amz-consistency-level" + } + } + }, + "ListPolicyAttachmentsResponse":{ + "type":"structure", + "members":{ + "ObjectIdentifiers":{ + "shape":"ObjectIdentifierList", + "documentation":"

A list of ObjectIdentifiers to which the policy is attached.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + } + }, + "ListPublishedSchemaArnsRequest":{ + "type":"structure", + "members":{ + "SchemaArn":{ + "shape":"Arn", + "documentation":"

The response for ListPublishedSchemaArns when this parameter is used will list all minor version ARNs for a major version.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of results to retrieve.

" + } + } + }, + "ListPublishedSchemaArnsResponse":{ + "type":"structure", + "members":{ + "SchemaArns":{ + "shape":"Arns", + "documentation":"

The ARNs of published schemas.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the resource. Tagging is only supported for directories.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token. This is for future use. Currently pagination is not supported for tagging.

" + }, + "MaxResults":{ + "shape":"TagsNumberResults", + "documentation":"

The MaxResults parameter sets the maximum number of results returned in a single page. This is for future use and is not supported currently.

" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagList", + "documentation":"

A list of tag key value pairs that are associated with the response.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

" + } + } + }, + "ListTypedLinkFacetAttributesRequest":{ + "type":"structure", + "required":[ + "SchemaArn", + "Name" + ], + "members":{ + "SchemaArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that is associated with the schema. For more information, see arns.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "Name":{ + "shape":"TypedLinkName", + "documentation":"

The unique name of the typed link facet.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of results to retrieve.

" + } + } + }, + "ListTypedLinkFacetAttributesResponse":{ + "type":"structure", + "members":{ + "Attributes":{ + "shape":"TypedLinkAttributeDefinitionList", + "documentation":"

An ordered set of attributes associate with the typed link.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + } + }, + "ListTypedLinkFacetNamesRequest":{ + "type":"structure", + "required":["SchemaArn"], + "members":{ + "SchemaArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that is associated with the schema. For more information, see arns.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of results to retrieve.

" + } + } + }, + "ListTypedLinkFacetNamesResponse":{ + "type":"structure", + "members":{ + "FacetNames":{ + "shape":"TypedLinkNameList", + "documentation":"

The names of typed link facets that exist within the schema.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + } + }, + "LookupPolicyRequest":{ + "type":"structure", + "required":[ + "DirectoryArn", + "ObjectReference" + ], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that is associated with the Directory. For more information, see arns.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

Reference that identifies the object whose policies will be looked up.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token to request the next page of results.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of items to be retrieved in a single call. This is an approximate number.

" + } + } + }, + "LookupPolicyResponse":{ + "type":"structure", + "members":{ + "PolicyToPathList":{ + "shape":"PolicyToPathList", + "documentation":"

Provides list of path to policies. Policies contain PolicyId, ObjectIdentifier, and PolicyType. For more information, see Policies.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + } + }, + "NextToken":{"type":"string"}, + "NotIndexException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

Indicates that the requested operation can only operate on index objects.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "NotNodeException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

Occurs when any invalid operations are performed on an object that is not a node, such as calling ListObjectChildren for a leaf node object.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "NotPolicyException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

Indicates that the requested operation can only operate on policy objects.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "NumberAttributeValue":{"type":"string"}, + "NumberResults":{ + "type":"integer", + "min":1 + }, + "ObjectAlreadyDetachedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

Indicates that the object is not attached to the index.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ObjectAttributeAction":{ + "type":"structure", + "members":{ + "ObjectAttributeActionType":{ + "shape":"UpdateActionType", + "documentation":"

A type that can be either Update or Delete.

" + }, + "ObjectAttributeUpdateValue":{ + "shape":"TypedAttributeValue", + "documentation":"

The value that you want to update to.

" + } + }, + "documentation":"

The action to take on the object attribute.

" + }, + "ObjectAttributeRange":{ + "type":"structure", + "members":{ + "AttributeKey":{ + "shape":"AttributeKey", + "documentation":"

The key of the attribute that the attribute range covers.

" + }, + "Range":{ + "shape":"TypedAttributeValueRange", + "documentation":"

The range of attribute values being selected.

" + } + }, + "documentation":"

A range of attributes.

" + }, + "ObjectAttributeRangeList":{ + "type":"list", + "member":{"shape":"ObjectAttributeRange"} + }, + "ObjectAttributeUpdate":{ + "type":"structure", + "members":{ + "ObjectAttributeKey":{ + "shape":"AttributeKey", + "documentation":"

The key of the attribute being updated.

" + }, + "ObjectAttributeAction":{ + "shape":"ObjectAttributeAction", + "documentation":"

The action to perform as part of the attribute update.

" + } + }, + "documentation":"

Structure that contains attribute update information.

" + }, + "ObjectAttributeUpdateList":{ + "type":"list", + "member":{"shape":"ObjectAttributeUpdate"} + }, + "ObjectIdentifier":{"type":"string"}, + "ObjectIdentifierList":{ + "type":"list", + "member":{"shape":"ObjectIdentifier"} + }, + "ObjectIdentifierToLinkNameMap":{ + "type":"map", + "key":{"shape":"ObjectIdentifier"}, + "value":{"shape":"LinkName"} + }, + "ObjectNotDetachedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

Indicates that the requested operation cannot be completed because the object has not been detached from the tree.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ObjectReference":{ + "type":"structure", + "members":{ + "Selector":{ + "shape":"SelectorObjectReference", + "documentation":"

A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects. You can identify an object in one of the following ways:

" + } + }, + "documentation":"

The reference that identifies an object.

" + }, + "ObjectType":{ + "type":"string", + "enum":[ + "NODE", + "LEAF_NODE", + "POLICY", + "INDEX" + ] + }, + "PathString":{"type":"string"}, + "PathToObjectIdentifiers":{ + "type":"structure", + "members":{ + "Path":{ + "shape":"PathString", + "documentation":"

The path that is used to identify the object starting from directory root.

" + }, + "ObjectIdentifiers":{ + "shape":"ObjectIdentifierList", + "documentation":"

Lists ObjectIdentifiers starting from directory root to the object in the request.

" + } + }, + "documentation":"

Returns the path to the ObjectIdentifiers that is associated with the directory.

" + }, + "PathToObjectIdentifiersList":{ + "type":"list", + "member":{"shape":"PathToObjectIdentifiers"} + }, + "PolicyAttachment":{ + "type":"structure", + "members":{ + "PolicyId":{ + "shape":"ObjectIdentifier", + "documentation":"

The ID of PolicyAttachment.

" + }, + "ObjectIdentifier":{ + "shape":"ObjectIdentifier", + "documentation":"

The ObjectIdentifier that is associated with PolicyAttachment.

" + }, + "PolicyType":{ + "shape":"PolicyType", + "documentation":"

The type of policy that can be associated with PolicyAttachment.

" + } + }, + "documentation":"

Contains the PolicyType, PolicyId, and the ObjectIdentifier to which it is attached. For more information, see Policies.

" + }, + "PolicyAttachmentList":{ + "type":"list", + "member":{"shape":"PolicyAttachment"} + }, + "PolicyToPath":{ + "type":"structure", + "members":{ + "Path":{ + "shape":"PathString", + "documentation":"

The path that is referenced from the root.

" + }, + "Policies":{ + "shape":"PolicyAttachmentList", + "documentation":"

List of policy objects.

" + } + }, + "documentation":"

Used when a regular object exists in a Directory and you want to find all of the policies that are associated with that object and the parent to that object.

" + }, + "PolicyToPathList":{ + "type":"list", + "member":{"shape":"PolicyToPath"} + }, + "PolicyType":{"type":"string"}, + "PublishSchemaRequest":{ + "type":"structure", + "required":[ + "DevelopmentSchemaArn", + "Version" + ], + "members":{ + "DevelopmentSchemaArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that is associated with the development schema. For more information, see arns.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "Version":{ + "shape":"Version", + "documentation":"

The major version under which the schema will be published. Schemas have both a major and minor version associated with them.

" + }, + "MinorVersion":{ + "shape":"Version", + "documentation":"

The minor version under which the schema will be published. This parameter is recommended. Schemas have both a major and minor version associated with them.

" + }, + "Name":{ + "shape":"SchemaName", + "documentation":"

The new name under which the schema will be published. If this is not provided, the development schema is considered.

" + } + } + }, + "PublishSchemaResponse":{ + "type":"structure", + "members":{ + "PublishedSchemaArn":{ + "shape":"Arn", + "documentation":"

The ARN that is associated with the published schema. For more information, see arns.

" + } + } + }, + "PutSchemaFromJsonRequest":{ + "type":"structure", + "required":[ + "SchemaArn", + "Document" + ], + "members":{ + "SchemaArn":{ + "shape":"Arn", + "documentation":"

The ARN of the schema to update.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "Document":{ + "shape":"SchemaJsonDocument", + "documentation":"

The replacement JSON schema.

" + } + } + }, + "PutSchemaFromJsonResponse":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"Arn", + "documentation":"

The ARN of the schema to update.

" + } + } + }, + "RangeMode":{ + "type":"string", + "enum":[ + "FIRST", + "LAST", + "LAST_BEFORE_MISSING_VALUES", + "INCLUSIVE", + "EXCLUSIVE" + ] + }, + "RemoveFacetFromObjectRequest":{ + "type":"structure", + "required":[ + "DirectoryArn", + "SchemaFacet", + "ObjectReference" + ], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The ARN of the directory in which the object resides.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "SchemaFacet":{ + "shape":"SchemaFacet", + "documentation":"

The facet to remove. See SchemaFacet for details.

" + }, + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

A reference to the object to remove the facet from.

" + } + } + }, + "RemoveFacetFromObjectResponse":{ + "type":"structure", + "members":{ + } + }, + "RequiredAttributeBehavior":{ + "type":"string", + "enum":[ + "REQUIRED_ALWAYS", + "NOT_REQUIRED" + ] + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The specified resource could not be found.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "RetryableConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

Occurs when a conflict with a previous successful write is detected. For example, if a write operation occurs on an object and then an attempt is made to read the object using “SERIALIZABLE” consistency, this exception may result. This generally occurs when the previous write did not have time to propagate to the host serving the current request. A retry (with appropriate backoff logic) is the recommended response to this exception.

", + "error":{"httpStatusCode":409}, + "exception":true + }, + "Rule":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"RuleType", + "documentation":"

The type of attribute validation rule.

" + }, + "Parameters":{ + "shape":"RuleParameterMap", + "documentation":"

The minimum and maximum parameters that are associated with the rule.

" + } + }, + "documentation":"

Contains an Amazon Resource Name (ARN) and parameters that are associated with the rule.

" + }, + "RuleKey":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-zA-Z0-9._-]*$" + }, + "RuleMap":{ + "type":"map", + "key":{"shape":"RuleKey"}, + "value":{"shape":"Rule"} + }, + "RuleParameterKey":{"type":"string"}, + "RuleParameterMap":{ + "type":"map", + "key":{"shape":"RuleParameterKey"}, + "value":{"shape":"RuleParameterValue"} + }, + "RuleParameterValue":{"type":"string"}, + "RuleType":{ + "type":"string", + "enum":[ + "BINARY_LENGTH", + "NUMBER_COMPARISON", + "STRING_FROM_SET", + "STRING_LENGTH" + ] + }, + "SchemaAlreadyExistsException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

Indicates that a schema could not be created due to a naming conflict. Please select a different name and then try again.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "SchemaAlreadyPublishedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

Indicates that a schema is already published.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "SchemaFacet":{ + "type":"structure", + "members":{ + "SchemaArn":{ + "shape":"Arn", + "documentation":"

The ARN of the schema that contains the facet with no minor component. See arns and In-Place Schema Upgrade for a description of when to provide minor versions.

" + }, + "FacetName":{ + "shape":"FacetName", + "documentation":"

The name of the facet.

" + } + }, + "documentation":"

A facet.

" + }, + "SchemaFacetList":{ + "type":"list", + "member":{"shape":"SchemaFacet"} + }, + "SchemaJsonDocument":{"type":"string"}, + "SchemaName":{ + "type":"string", + "max":32, + "min":1, + "pattern":"^[a-zA-Z0-9._-]*$" + }, + "SelectorObjectReference":{"type":"string"}, + "StillContainsLinksException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The object could not be deleted because links still exist. Remove the links and then try the operation again.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "StringAttributeValue":{"type":"string"}, + "Tag":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

The key that is associated with the tag.

" + }, + "Value":{ + "shape":"TagValue", + "documentation":"

The value that is associated with the tag.

" + } + }, + "documentation":"

The tag structure that contains a tag key and value.

" + }, + "TagKey":{"type":"string"}, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"} + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"} + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the resource. Tagging is only supported for directories.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of tag key-value pairs.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{"type":"string"}, + "TagsNumberResults":{ + "type":"integer", + "min":50 + }, + "TypedAttributeValue":{ + "type":"structure", + "members":{ + "StringValue":{ + "shape":"StringAttributeValue", + "documentation":"

A string data value.

" + }, + "BinaryValue":{ + "shape":"BinaryAttributeValue", + "documentation":"

A binary data value.

" + }, + "BooleanValue":{ + "shape":"BooleanAttributeValue", + "documentation":"

A Boolean data value.

" + }, + "NumberValue":{ + "shape":"NumberAttributeValue", + "documentation":"

A number data value.

" + }, + "DatetimeValue":{ + "shape":"DatetimeAttributeValue", + "documentation":"

A date and time value.

" + } + }, + "documentation":"

Represents the data for a typed attribute. You can set one, and only one, of the elements. Each attribute in an item is a name-value pair. Attributes have a single value.

" + }, + "TypedAttributeValueRange":{ + "type":"structure", + "required":[ + "StartMode", + "EndMode" + ], + "members":{ + "StartMode":{ + "shape":"RangeMode", + "documentation":"

The inclusive or exclusive range start.

" + }, + "StartValue":{ + "shape":"TypedAttributeValue", + "documentation":"

The value to start the range at.

" + }, + "EndMode":{ + "shape":"RangeMode", + "documentation":"

The inclusive or exclusive range end.

" + }, + "EndValue":{ + "shape":"TypedAttributeValue", + "documentation":"

The attribute value to terminate the range at.

" + } + }, + "documentation":"

A range of attribute values. For more information, see Range Filters.

" + }, + "TypedLinkAttributeDefinition":{ + "type":"structure", + "required":[ + "Name", + "Type", + "RequiredBehavior" + ], + "members":{ + "Name":{ + "shape":"AttributeName", + "documentation":"

The unique name of the typed link attribute.

" + }, + "Type":{ + "shape":"FacetAttributeType", + "documentation":"

The type of the attribute.

" + }, + "DefaultValue":{ + "shape":"TypedAttributeValue", + "documentation":"

The default value of the attribute (if configured).

" + }, + "IsImmutable":{ + "shape":"Bool", + "documentation":"

Whether the attribute is mutable or not.

" + }, + "Rules":{ + "shape":"RuleMap", + "documentation":"

Validation rules that are attached to the attribute definition.

" + }, + "RequiredBehavior":{ + "shape":"RequiredAttributeBehavior", + "documentation":"

The required behavior of the TypedLinkAttributeDefinition.

" + } + }, + "documentation":"

A typed link attribute definition.

" + }, + "TypedLinkAttributeDefinitionList":{ + "type":"list", + "member":{"shape":"TypedLinkAttributeDefinition"} + }, + "TypedLinkAttributeRange":{ + "type":"structure", + "required":["Range"], + "members":{ + "AttributeName":{ + "shape":"AttributeName", + "documentation":"

The unique name of the typed link attribute.

" + }, + "Range":{ + "shape":"TypedAttributeValueRange", + "documentation":"

The range of attribute values that are being selected.

" + } + }, + "documentation":"

Identifies the range of attributes that are used by a specified filter.

" + }, + "TypedLinkAttributeRangeList":{ + "type":"list", + "member":{"shape":"TypedLinkAttributeRange"} + }, + "TypedLinkFacet":{ + "type":"structure", + "required":[ + "Name", + "Attributes", + "IdentityAttributeOrder" + ], + "members":{ + "Name":{ + "shape":"TypedLinkName", + "documentation":"

The unique name of the typed link facet.

" + }, + "Attributes":{ + "shape":"TypedLinkAttributeDefinitionList", + "documentation":"

A set of key-value pairs associated with the typed link. Typed link attributes are used when you have data values that are related to the link itself, and not to one of the two objects being linked. Identity attributes also serve to distinguish the link from others of the same type between the same objects.

" + }, + "IdentityAttributeOrder":{ + "shape":"AttributeNameList", + "documentation":"

The set of attributes that distinguish links made from this facet from each other, in the order of significance. Listing typed links can filter on the values of these attributes. See ListOutgoingTypedLinks and ListIncomingTypedLinks for details.

" + } + }, + "documentation":"

Defines the typed links structure and its attributes. To create a typed link facet, use the CreateTypedLinkFacet API.

" + }, + "TypedLinkFacetAttributeUpdate":{ + "type":"structure", + "required":[ + "Attribute", + "Action" + ], + "members":{ + "Attribute":{ + "shape":"TypedLinkAttributeDefinition", + "documentation":"

The attribute to update.

" + }, + "Action":{ + "shape":"UpdateActionType", + "documentation":"

The action to perform when updating the attribute.

" + } + }, + "documentation":"

A typed link facet attribute update.

" + }, + "TypedLinkFacetAttributeUpdateList":{ + "type":"list", + "member":{"shape":"TypedLinkFacetAttributeUpdate"} + }, + "TypedLinkName":{ + "type":"string", + "pattern":"^[a-zA-Z0-9._-]*$" + }, + "TypedLinkNameList":{ + "type":"list", + "member":{"shape":"TypedLinkName"} + }, + "TypedLinkSchemaAndFacetName":{ + "type":"structure", + "required":[ + "SchemaArn", + "TypedLinkName" + ], + "members":{ + "SchemaArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that is associated with the schema. For more information, see arns.

" + }, + "TypedLinkName":{ + "shape":"TypedLinkName", + "documentation":"

The unique name of the typed link facet.

" + } + }, + "documentation":"

Identifies the schema Amazon Resource Name (ARN) and facet name for the typed link.

" + }, + "TypedLinkSpecifier":{ + "type":"structure", + "required":[ + "TypedLinkFacet", + "SourceObjectReference", + "TargetObjectReference", + "IdentityAttributeValues" + ], + "members":{ + "TypedLinkFacet":{ + "shape":"TypedLinkSchemaAndFacetName", + "documentation":"

Identifies the typed link facet that is associated with the typed link.

" + }, + "SourceObjectReference":{ + "shape":"ObjectReference", + "documentation":"

Identifies the source object that the typed link will attach to.

" + }, + "TargetObjectReference":{ + "shape":"ObjectReference", + "documentation":"

Identifies the target object that the typed link will attach to.

" + }, + "IdentityAttributeValues":{ + "shape":"AttributeNameAndValueList", + "documentation":"

Identifies the attribute value to update.

" + } + }, + "documentation":"

Contains all the information that is used to uniquely identify a typed link. The parameters discussed in this topic are used to uniquely specify the typed link being operated on. The AttachTypedLink API returns a typed link specifier while the DetachTypedLink API accepts one as input. Similarly, the ListIncomingTypedLinks and ListOutgoingTypedLinks API operations provide typed link specifiers as output. You can also construct a typed link specifier from scratch.

" + }, + "TypedLinkSpecifierList":{ + "type":"list", + "member":{"shape":"TypedLinkSpecifier"} + }, + "UnsupportedIndexTypeException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

Indicates that the requested index type is not supported.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the resource. Tagging is only supported for directories.

" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

Keys of the tag that need to be removed from the resource.

" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateActionType":{ + "type":"string", + "enum":[ + "CREATE_OR_UPDATE", + "DELETE" + ] + }, + "UpdateFacetRequest":{ + "type":"structure", + "required":[ + "SchemaArn", + "Name" + ], + "members":{ + "SchemaArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that is associated with the Facet. For more information, see arns.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "Name":{ + "shape":"FacetName", + "documentation":"

The name of the facet.

" + }, + "AttributeUpdates":{ + "shape":"FacetAttributeUpdateList", + "documentation":"

List of attributes that need to be updated in a given schema Facet. Each attribute is followed by AttributeAction, which specifies the type of update operation to perform.

" + }, + "ObjectType":{ + "shape":"ObjectType", + "documentation":"

The object type that is associated with the facet. See CreateFacetRequest$ObjectType for more details.

" + } + } + }, + "UpdateFacetResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateLinkAttributesRequest":{ + "type":"structure", + "required":[ + "DirectoryArn", + "TypedLinkSpecifier", + "AttributeUpdates" + ], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that is associated with the Directory where the updated typed link resides. For more information, see arns or Typed link.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "TypedLinkSpecifier":{ + "shape":"TypedLinkSpecifier", + "documentation":"

Allows a typed link specifier to be accepted as input.

" + }, + "AttributeUpdates":{ + "shape":"LinkAttributeUpdateList", + "documentation":"

The attributes update structure.

" + } + } + }, + "UpdateLinkAttributesResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateObjectAttributesRequest":{ + "type":"structure", + "required":[ + "DirectoryArn", + "ObjectReference", + "AttributeUpdates" + ], + "members":{ + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that is associated with the Directory where the object resides. For more information, see arns.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

The reference that identifies the object.

" + }, + "AttributeUpdates":{ + "shape":"ObjectAttributeUpdateList", + "documentation":"

The attributes update structure.

" + } + } + }, + "UpdateObjectAttributesResponse":{ + "type":"structure", + "members":{ + "ObjectIdentifier":{ + "shape":"ObjectIdentifier", + "documentation":"

The ObjectIdentifier of the updated object.

" + } + } + }, + "UpdateSchemaRequest":{ + "type":"structure", + "required":[ + "SchemaArn", + "Name" + ], + "members":{ + "SchemaArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the development schema. For more information, see arns.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "Name":{ + "shape":"SchemaName", + "documentation":"

The name of the schema.

" + } + } + }, + "UpdateSchemaResponse":{ + "type":"structure", + "members":{ + "SchemaArn":{ + "shape":"Arn", + "documentation":"

The ARN that is associated with the updated schema. For more information, see arns.

" + } + } + }, + "UpdateTypedLinkFacetRequest":{ + "type":"structure", + "required":[ + "SchemaArn", + "Name", + "AttributeUpdates", + "IdentityAttributeOrder" + ], + "members":{ + "SchemaArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that is associated with the schema. For more information, see arns.

", + "location":"header", + "locationName":"x-amz-data-partition" + }, + "Name":{ + "shape":"TypedLinkName", + "documentation":"

The unique name of the typed link facet.

" + }, + "AttributeUpdates":{ + "shape":"TypedLinkFacetAttributeUpdateList", + "documentation":"

Attributes update structure.

" + }, + "IdentityAttributeOrder":{ + "shape":"AttributeNameList", + "documentation":"

The order of identity attributes for the facet, from most significant to least significant. The ability to filter typed links considers the order that the attributes are defined on the typed link facet. When providing ranges to a typed link selection, any inexact ranges must be specified at the end. Any attributes that do not have a range specified are presumed to match the entire range. Filters are interpreted in the order of the attributes on the typed link facet, not the order in which they are supplied to any API calls. For more information about identity attributes, see Typed link.

" + } + } + }, + "UpdateTypedLinkFacetResponse":{ + "type":"structure", + "members":{ + } + }, + "UpgradeAppliedSchemaRequest":{ + "type":"structure", + "required":[ + "PublishedSchemaArn", + "DirectoryArn" + ], + "members":{ + "PublishedSchemaArn":{ + "shape":"Arn", + "documentation":"

The revision of the published schema to upgrade the directory to.

" + }, + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The ARN for the directory to which the upgraded schema will be applied.

" + }, + "DryRun":{ + "shape":"Bool", + "documentation":"

Used for testing whether the major version schemas are backward compatible or not. If schema compatibility fails, an exception would be thrown else the call would succeed but no changes will be saved. This parameter is optional.

" + } + } + }, + "UpgradeAppliedSchemaResponse":{ + "type":"structure", + "members":{ + "UpgradedSchemaArn":{ + "shape":"Arn", + "documentation":"

The ARN of the upgraded schema that is returned as part of the response.

" + }, + "DirectoryArn":{ + "shape":"Arn", + "documentation":"

The ARN of the directory that is returned as part of the response.

" + } + } + }, + "UpgradePublishedSchemaRequest":{ + "type":"structure", + "required":[ + "DevelopmentSchemaArn", + "PublishedSchemaArn", + "MinorVersion" + ], + "members":{ + "DevelopmentSchemaArn":{ + "shape":"Arn", + "documentation":"

The ARN of the development schema with the changes used for the upgrade.

" + }, + "PublishedSchemaArn":{ + "shape":"Arn", + "documentation":"

The ARN of the published schema to be upgraded.

" + }, + "MinorVersion":{ + "shape":"Version", + "documentation":"

Identifies the minor version of the published schema that will be created. This parameter is NOT optional.

" + }, + "DryRun":{ + "shape":"Bool", + "documentation":"

Used for testing whether the Development schema provided is backwards compatible, or not, with the publish schema provided by the user to be upgraded. If schema compatibility fails, an exception would be thrown else the call would succeed. This parameter is optional and defaults to false.

" + } + } + }, + "UpgradePublishedSchemaResponse":{ + "type":"structure", + "members":{ + "UpgradedSchemaArn":{ + "shape":"Arn", + "documentation":"

The ARN of the upgraded schema that is returned as part of the response.

" + } + } + }, + "ValidationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"} + }, + "documentation":"

Indicates that your request is malformed in some manner. See the exception message.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "Version":{ + "type":"string", + "max":10, + "min":1, + "pattern":"^[a-zA-Z0-9._-]*$" + } + }, + "documentation":"Amazon Cloud Directory

Amazon Cloud Directory is a component of the AWS Directory Service that simplifies the development and management of cloud-scale web, mobile, and IoT applications. This guide describes the Cloud Directory operations that you can call programmatically and includes detailed information on data types and errors. For information about AWS Directory Services features, see AWS Directory Service and the AWS Directory Service Administration Guide.

" +} diff --git a/botocore/data/cloudformation/2010-05-15/service-2.json b/botocore/data/cloudformation/2010-05-15/service-2.json index e13482a6..db15133e 100644 --- a/botocore/data/cloudformation/2010-05-15/service-2.json +++ b/botocore/data/cloudformation/2010-05-15/service-2.json @@ -605,9 +605,10 @@ {"shape":"OperationInProgressException"}, {"shape":"OperationIdAlreadyExistsException"}, {"shape":"StaleRequestException"}, - {"shape":"InvalidOperationException"} + {"shape":"InvalidOperationException"}, + {"shape":"StackInstanceNotFoundException"} ], - "documentation":"

Updates the stack set and all associated stack instances.

Even if the stack set operation created by updating the stack set fails (completely or partially, below or above a specified failure tolerance), the stack set is updated with your changes. Subsequent CreateStackInstances calls on the specified stack set use the updated stack set.

" + "documentation":"

Updates the stack set, and associated stack instances in the specified accounts and regions.

Even if the stack set operation created by updating the stack set fails (completely or partially, below or above a specified failure tolerance), the stack set is updated with your changes. Subsequent CreateStackInstances calls on the specified stack set use the updated stack set.

" }, "UpdateTerminationProtection":{ "name":"UpdateTerminationProtection", @@ -1152,7 +1153,11 @@ }, "AdministrationRoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Number (ARN) of the IAM role to use to create this stack set.

Specify an IAM role only if you are using customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Define Permissions for Multiple Administrators in the AWS CloudFormation User Guide.

" + "documentation":"

The Amazon Resource Number (ARN) of the IAM role to use to create this stack set.

Specify an IAM role only if you are using customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Prerequisites: Granting Permissions for Stack Set Operations in the AWS CloudFormation User Guide.

" + }, + "ExecutionRoleName":{ + "shape":"ExecutionRoleName", + "documentation":"

The name of the IAM execution role to use to create the stack set. If you do not specify an execution role, AWS CloudFormation uses the AWSCloudFormationStackSetExecutionRole role for the stack set operation.

Specify an IAM role only if you are using customized execution roles to control which stack resources users and groups can include in their stack sets.

" }, "ClientRequestToken":{ "shape":"ClientRequestToken", @@ -1657,6 +1662,12 @@ }, "documentation":"

The output for the ExecuteChangeSet action.

" }, + "ExecutionRoleName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[a-zA-Z_0-9+=,.@-]+" + }, "ExecutionStatus":{ "type":"string", "enum":[ @@ -3045,7 +3056,11 @@ }, "AdministrationRoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Number (ARN) of the IAM role used to create or update the stack set.

Use customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Define Permissions for Multiple Administrators in the AWS CloudFormation User Guide.

" + "documentation":"

The Amazon Resource Number (ARN) of the IAM role used to create or update the stack set.

Use customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Prerequisites: Granting Permissions for Stack Set Operations in the AWS CloudFormation User Guide.

" + }, + "ExecutionRoleName":{ + "shape":"ExecutionRoleName", + "documentation":"

The name of the IAM execution role used to create or update the stack set.

Use customized execution roles to control which stack resources users and groups can include in their stack sets.

" } }, "documentation":"

A structure that contains information about a stack set. A stack set enables you to provision stacks into AWS accounts and across regions by using a single CloudFormation template. In the stack set, you specify the template to use, as well as any parameters and capabilities that the template requires.

" @@ -3112,6 +3127,10 @@ "shape":"RoleARN", "documentation":"

The Amazon Resource Number (ARN) of the IAM role used to perform this stack set operation.

Use customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Define Permissions for Multiple Administrators in the AWS CloudFormation User Guide.

" }, + "ExecutionRoleName":{ + "shape":"ExecutionRoleName", + "documentation":"

The name of the IAM execution role used to create or update the stack set.

Use customized execution roles to control which stack resources users and groups can include in their stack sets.

" + }, "CreationTimestamp":{ "shape":"Timestamp", "documentation":"

The time at which the operation was initiated. Note that the creation times for the stack set operation might differ from the creation time of the individual stacks themselves. This is because AWS CloudFormation needs to perform preparatory work for the operation, such as dispatching the work to the requested regions, before actually creating the first stacks.

" @@ -3573,7 +3592,7 @@ ], "members":{ "StackSetName":{ - "shape":"StackSetName", + "shape":"StackSetNameOrId", "documentation":"

The name or unique ID of the stack set associated with the stack instances.

" }, "Accounts":{ @@ -3662,10 +3681,22 @@ "shape":"RoleARN", "documentation":"

The Amazon Resource Number (ARN) of the IAM role to use to update this stack set.

Specify an IAM role only if you are using customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Define Permissions for Multiple Administrators in the AWS CloudFormation User Guide.

If you specify a customized administrator role, AWS CloudFormation uses that role to update the stack. If you do not specify a customized administrator role, AWS CloudFormation performs the update using the role previously associated with the stack set, so long as you have permissions to perform operations on the stack set.

" }, + "ExecutionRoleName":{ + "shape":"ExecutionRoleName", + "documentation":"

The name of the IAM execution role to use to update the stack set. If you do not specify an execution role, AWS CloudFormation uses the AWSCloudFormationStackSetExecutionRole role for the stack set operation.

Specify an IAM role only if you are using customized execution roles to control which stack resources users and groups can include in their stack sets.

If you specify a customized execution role, AWS CloudFormation uses that role to update the stack. If you do not specify a customized execution role, AWS CloudFormation performs the update using the role previously associated with the stack set, so long as you have permissions to perform operations on the stack set.

" + }, "OperationId":{ "shape":"ClientRequestToken", "documentation":"

The unique ID for this stack set operation.

The operation ID also functions as an idempotency token, to ensure that AWS CloudFormation performs the stack set operation only once, even if you retry the request multiple times. You might retry stack set operation requests to ensure that AWS CloudFormation successfully received them.

If you don't specify an operation ID, AWS CloudFormation generates one automatically.

Repeating this stack set operation with a new operation ID retries all stack instances whose status is OUTDATED.

", "idempotencyToken":true + }, + "Accounts":{ + "shape":"AccountList", + "documentation":"

The accounts in which to update associated stack instances. If you specify accounts, you must also specify the regions in which to update stack set instances.

To update all the stack instances associated with this stack set, do not specify the Accounts or Regions properties.

If the stack set update includes changes to the template (that is, if the TemplateBody or TemplateURL properties are specified), or the Parameters property, AWS CloudFormation marks all stack instances with a status of OUTDATED prior to updating the stack instances in the specified accounts and regions. If the stack set update does not include changes to the template or parameters, AWS CloudFormation updates the stack instances in the specified accounts and regions, while leaving all other stack instances with their existing stack instance status.

" + }, + "Regions":{ + "shape":"RegionList", + "documentation":"

The regions in which to update associated stack instances. If you specify regions, you must also specify accounts in which to update stack set instances.

To update all the stack instances associated with this stack set, do not specify the Accounts or Regions properties.

If the stack set update includes changes to the template (that is, if the TemplateBody or TemplateURL properties are specified), or the Parameters property, AWS CloudFormation marks all stack instances with a status of OUTDATED prior to updating the stack instances in the specified accounts and regions. If the stack set update does not include changes to the template or parameters, AWS CloudFormation updates the stack instances in the specified accounts and regions, while leaving all other stack instances with their existing stack instance status.

" } } }, diff --git a/botocore/data/cloudfront/2017-10-30/service-2.json b/botocore/data/cloudfront/2017-10-30/service-2.json index 42013775..3dad99bf 100644 --- a/botocore/data/cloudfront/2017-10-30/service-2.json +++ b/botocore/data/cloudfront/2017-10-30/service-2.json @@ -357,21 +357,6 @@ ], "documentation":"

Remove a public key you previously added to CloudFront.

" }, - "DeleteServiceLinkedRole":{ - "name":"DeleteServiceLinkedRole2017_10_30", - "http":{ - "method":"DELETE", - "requestUri":"/2017-10-30/service-linked-role/{RoleName}", - "responseCode":204 - }, - "input":{"shape":"DeleteServiceLinkedRoleRequest"}, - "errors":[ - {"shape":"InvalidArgument"}, - {"shape":"AccessDenied"}, - {"shape":"ResourceInUse"}, - {"shape":"NoSuchResource"} - ] - }, "DeleteStreamingDistribution":{ "name":"DeleteStreamingDistribution2017_10_30", "http":{ @@ -1894,17 +1879,6 @@ } } }, - "DeleteServiceLinkedRoleRequest":{ - "type":"structure", - "required":["RoleName"], - "members":{ - "RoleName":{ - "shape":"string", - "location":"uri", - "locationName":"RoleName" - } - } - }, "DeleteStreamingDistributionRequest":{ "type":"structure", "required":["Id"], @@ -4213,14 +4187,6 @@ "type":"string", "pattern":"arn:aws:cloudfront::[0-9]+:.*" }, - "ResourceInUse":{ - "type":"structure", - "members":{ - "Message":{"shape":"string"} - }, - "error":{"httpStatusCode":409}, - "exception":true - }, "Restrictions":{ "type":"structure", "required":["GeoRestriction"], diff --git a/botocore/data/cloudsearch/2013-01-01/service-2.json b/botocore/data/cloudsearch/2013-01-01/service-2.json index 3f2d3793..6c232754 100644 --- a/botocore/data/cloudsearch/2013-01-01/service-2.json +++ b/botocore/data/cloudsearch/2013-01-01/service-2.json @@ -4,6 +4,7 @@ "apiVersion":"2013-01-01", "endpointPrefix":"cloudsearch", "serviceFullName":"Amazon CloudSearch", + "serviceId":"CloudSearch", "signatureVersion":"v4", "xmlNamespace":"http://cloudsearch.amazonaws.com/doc/2013-01-01/", "protocol":"query", diff --git a/botocore/data/cloudsearchdomain/2013-01-01/service-2.json b/botocore/data/cloudsearchdomain/2013-01-01/service-2.json index 43e03a42..018b114d 100644 --- a/botocore/data/cloudsearchdomain/2013-01-01/service-2.json +++ b/botocore/data/cloudsearchdomain/2013-01-01/service-2.json @@ -6,6 +6,7 @@ "jsonVersion":"1.1", "protocol":"rest-json", "serviceFullName":"Amazon CloudSearch Domain", + "serviceId":"CloudSearch Domain", "signatureVersion":"v4", "signingName":"cloudsearch", "uid":"cloudsearchdomain-2013-01-01" diff --git a/botocore/data/cloudtrail/2013-11-01/service-2.json b/botocore/data/cloudtrail/2013-11-01/service-2.json index d3bf5873..cc0ba898 100644 --- a/botocore/data/cloudtrail/2013-11-01/service-2.json +++ b/botocore/data/cloudtrail/2013-11-01/service-2.json @@ -7,6 +7,7 @@ "protocol":"json", "serviceAbbreviation":"CloudTrail", "serviceFullName":"AWS CloudTrail", + "serviceId":"CloudTrail", "signatureVersion":"v4", "targetPrefix":"com.amazonaws.cloudtrail.v20131101.CloudTrail_20131101", "uid":"cloudtrail-2013-11-01" diff --git a/botocore/data/cloudwatch/2010-08-01/service-2.json b/botocore/data/cloudwatch/2010-08-01/service-2.json index fa0b9959..a572ec7b 100644 --- a/botocore/data/cloudwatch/2010-08-01/service-2.json +++ b/botocore/data/cloudwatch/2010-08-01/service-2.json @@ -6,6 +6,7 @@ "protocol":"query", "serviceAbbreviation":"CloudWatch", "serviceFullName":"Amazon CloudWatch", + "serviceId":"CloudWatch", "signatureVersion":"v4", "uid":"monitoring-2010-08-01", "xmlNamespace":"http://monitoring.amazonaws.com/doc/2010-08-01/" diff --git a/botocore/data/codebuild/2016-10-06/service-2.json b/botocore/data/codebuild/2016-10-06/service-2.json index 78eb9b53..bd4bad13 100644 --- a/botocore/data/codebuild/2016-10-06/service-2.json +++ b/botocore/data/codebuild/2016-10-06/service-2.json @@ -6,6 +6,7 @@ "jsonVersion":"1.1", "protocol":"json", "serviceFullName":"AWS CodeBuild", + "serviceId":"CodeBuild", "signatureVersion":"v4", "targetPrefix":"CodeBuild_20161006", "uid":"codebuild-2016-10-06" @@ -79,7 +80,7 @@ {"shape":"ResourceAlreadyExistsException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

For an existing AWS CodeBuild build project that has its source code stored in a GitHub repository, enables AWS CodeBuild to begin automatically rebuilding the source code every time a code change is pushed to the repository.

If you enable webhooks for an AWS CodeBuild project, and the project is used as a build step in AWS CodePipeline, then two identical builds will be created for each commit. One build is triggered through webhooks, and one through AWS CodePipeline. Because billing is on a per-build basis, you will be billed for both builds. Therefore, if you are using AWS CodePipeline, we recommend that you disable webhooks in CodeBuild. In the AWS CodeBuild console, clear the Webhook box. For more information, see step 9 in Change a Build Project's Settings.

" + "documentation":"

For an existing AWS CodeBuild build project that has its source code stored in a GitHub repository, enables AWS CodeBuild to begin automatically rebuilding the source code every time a code change is pushed to the repository.

If you enable webhooks for an AWS CodeBuild project, and the project is used as a build step in AWS CodePipeline, then two identical builds will be created for each commit. One build is triggered through webhooks, and one through AWS CodePipeline. Because billing is on a per-build basis, you will be billed for both builds. Therefore, if you are using AWS CodePipeline, we recommend that you disable webhooks in CodeBuild. In the AWS CodeBuild console, clear the Webhook box. For more information, see step 5 in Change a Build Project's Settings.

" }, "DeleteProject":{ "name":"DeleteProject", @@ -387,6 +388,10 @@ "shape":"ProjectEnvironment", "documentation":"

Information about the build environment for this build.

" }, + "serviceRole":{ + "shape":"NonEmptyString", + "documentation":"

The name of a service role used for this build.

" + }, "logs":{ "shape":"LogsLocation", "documentation":"

Information about the build's logs in Amazon CloudWatch Logs.

" @@ -705,7 +710,10 @@ }, "EnvironmentType":{ "type":"string", - "enum":["LINUX_CONTAINER"] + "enum":[ + "WINDOWS_CONTAINER", + "LINUX_CONTAINER" + ] }, "EnvironmentVariable":{ "type":"structure", @@ -957,7 +965,8 @@ "enum":[ "DEBIAN", "AMAZON_LINUX", - "UBUNTU" + "UBUNTU", + "WINDOWS_SERVER" ] }, "Project":{ @@ -1052,7 +1061,7 @@ }, "name":{ "shape":"String", - "documentation":"

Along with path and namespaceType, the pattern that AWS CodeBuild will use to name and store the output artifact, as follows:

For example, if path is set to MyArtifacts, namespaceType is set to BUILD_ID, and name is set to MyArtifact.zip, then the output artifact would be stored in MyArtifacts/build-ID/MyArtifact.zip.

" + "documentation":"

Along with path and namespaceType, the pattern that AWS CodeBuild will use to name and store the output artifact, as follows:

For example:

" }, "packaging":{ "shape":"ArtifactPackaging", @@ -1121,7 +1130,7 @@ }, "privilegedMode":{ "shape":"WrapperBoolean", - "documentation":"

Enables running the Docker daemon inside a Docker container. Set to true only if the build project is be used to build Docker images, and the specified build environment image is not provided by AWS CodeBuild with Docker support. Otherwise, all associated builds that attempt to interact with the Docker daemon will fail. Note that you must also start the Docker daemon so that builds can interact with it. One way to do this is to initialize the Docker daemon during the install phase of your build spec by running the following build commands. (Do not run the following build commands if the specified build environment image is provided by AWS CodeBuild with Docker support.)

- nohup /usr/local/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 --storage-driver=overlay& - timeout -t 15 sh -c \"until docker info; do echo .; sleep 1; done\"

" + "documentation":"

Enables running the Docker daemon inside a Docker container. Set to true only if the build project is be used to build Docker images, and the specified build environment image is not provided by AWS CodeBuild with Docker support. Otherwise, all associated builds that attempt to interact with the Docker daemon will fail. Note that you must also start the Docker daemon so that builds can interact with it. One way to do this is to initialize the Docker daemon during the install phase of your build spec by running the following build commands. (Do not run the following build commands if the specified build environment image is provided by AWS CodeBuild with Docker support.)

If the operating system's base image is Ubuntu Linux:

- nohup /usr/local/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 --storage-driver=overlay& - timeout 15 sh -c \"until docker info; do echo .; sleep 1; done\"

If the operating system's base image is Alpine Linux, add the -t argument to timeout:

- nohup /usr/local/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 --storage-driver=overlay& - timeout 15 -t sh -c \"until docker info; do echo .; sleep 1; done\"

" }, "certificate":{ "shape":"String", @@ -1174,6 +1183,10 @@ "shape":"SourceAuth", "documentation":"

Information about the authorization settings for AWS CodeBuild to access the source code to be built.

This information is for the AWS CodeBuild console's use only. Your code should not get or set this information directly (unless the build project's source type value is BITBUCKET or GITHUB).

" }, + "reportBuildStatus":{ + "shape":"WrapperBoolean", + "documentation":"

Set to true to report the status of a build's start and finish to your source provider. This option is only valid when your source provider is GitHub. If this is set and you use a different source provider, an invalidInputException is thrown.

" + }, "insecureSsl":{ "shape":"WrapperBoolean", "documentation":"

Enable this flag to ignore SSL warnings while connecting to the project source code.

" @@ -1261,6 +1274,18 @@ "shape":"EnvironmentVariables", "documentation":"

A set of environment variables that overrides, for this build only, the latest ones already defined in the build project.

" }, + "sourceTypeOverride":{ + "shape":"SourceType", + "documentation":"

A source input type for this build that overrides the source input defined in the build project

" + }, + "sourceLocationOverride":{ + "shape":"String", + "documentation":"

A location that overrides for this build the source location for the one defined in the build project.

" + }, + "sourceAuthOverride":{ + "shape":"SourceAuth", + "documentation":"

An authorization type for this build that overrides the one defined in the build project. This override applies only if the build project's source is BitBucket or GitHub.

" + }, "gitCloneDepthOverride":{ "shape":"GitCloneDepth", "documentation":"

The user-defined depth of history, with a minimum value of 0, that overrides, for this build only, any previous depth of history defined in the build project.

" @@ -1269,9 +1294,49 @@ "shape":"String", "documentation":"

A build spec declaration that overrides, for this build only, the latest one already defined in the build project.

" }, + "insecureSslOverride":{ + "shape":"WrapperBoolean", + "documentation":"

Enable this flag to override the insecure SSL setting that is specified in the build project. The insecure SSL setting determines whether to ignore SSL warnings while connecting to the project source code. This override applies only if the build's source is GitHub Enterprise.

" + }, + "reportBuildStatusOverride":{ + "shape":"WrapperBoolean", + "documentation":"

Set to true to report to your source provider the status of a build's start and completion. If you use this option with a source provider other than GitHub, an invalidInputException is thrown.

" + }, + "environmentTypeOverride":{ + "shape":"EnvironmentType", + "documentation":"

A container type for this build that overrides the one specified in the build project.

" + }, + "imageOverride":{ + "shape":"NonEmptyString", + "documentation":"

The name of an image for this build that overrides the one specified in the build project.

" + }, + "computeTypeOverride":{ + "shape":"ComputeType", + "documentation":"

The name of a compute type for this build that overrides the one specified in the build project.

" + }, + "certificateOverride":{ + "shape":"String", + "documentation":"

The name of a certificate for this build that overrides the one specified in the build project.

" + }, + "cacheOverride":{ + "shape":"ProjectCache", + "documentation":"

A ProjectCache object specified for this build that overrides the one defined in the build project.

" + }, + "serviceRoleOverride":{ + "shape":"NonEmptyString", + "documentation":"

The name of a service role for this build that overrides the one specified in the build project.

" + }, + "privilegedModeOverride":{ + "shape":"WrapperBoolean", + "documentation":"

Enable this flag to override privileged mode in the build project.

" + }, "timeoutInMinutesOverride":{ "shape":"TimeOut", "documentation":"

The number of build timeout minutes, from 5 to 480 (8 hours), that overrides, for this build only, the latest setting already defined in the build project.

" + }, + "idempotencyToken":{ + "shape":"String", + "documentation":"

A unique, case sensitive identifier you provide to ensure the idempotency of the StartBuild request. The token is included in the StartBuild request and is valid for 12 hours. If you repeat the StartBuild request with the same token, but change a parameter, AWS CodeBuild returns a parameter mismatch error.

" } } }, diff --git a/botocore/data/codedeploy/2014-10-06/service-2.json b/botocore/data/codedeploy/2014-10-06/service-2.json index f6a622de..04992201 100644 --- a/botocore/data/codedeploy/2014-10-06/service-2.json +++ b/botocore/data/codedeploy/2014-10-06/service-2.json @@ -1135,7 +1135,7 @@ }, "terminationWaitTimeInMinutes":{ "shape":"Duration", - "documentation":"

The number of minutes to wait after a successful blue/green deployment before terminating instances from the original environment.

" + "documentation":"

The number of minutes to wait after a successful blue/green deployment before terminating instances from the original environment. The maximum setting is 2880 minutes (2 days).

" } }, "documentation":"

Information about whether instances in the original environment are terminated when a blue/green deployment is successful.

" @@ -3211,11 +3211,11 @@ "members":{ "elbInfoList":{ "shape":"ELBInfoList", - "documentation":"

An array containing information about the load balancer to use for load balancing in a deployment. In Elastic Load Balancing, load balancers are used with Classic Load Balancers.

" + "documentation":"

An array containing information about the load balancer to use for load balancing in a deployment. In Elastic Load Balancing, load balancers are used with Classic Load Balancers.

Adding more than one load balancer to the array is not supported.

" }, "targetGroupInfoList":{ "shape":"TargetGroupInfoList", - "documentation":"

An array containing information about the target group to use for load balancing in a deployment. In Elastic Load Balancing, target groups are used with Application Load Balancers.

" + "documentation":"

An array containing information about the target group to use for load balancing in a deployment. In Elastic Load Balancing, target groups are used with Application Load Balancers.

Adding more than one target group to the array is not supported.

" } }, "documentation":"

Information about the Elastic Load Balancing load balancer or target group used in a deployment.

" diff --git a/botocore/data/codepipeline/2015-07-09/service-2.json b/botocore/data/codepipeline/2015-07-09/service-2.json index cc826014..722b2b51 100644 --- a/botocore/data/codepipeline/2015-07-09/service-2.json +++ b/botocore/data/codepipeline/2015-07-09/service-2.json @@ -7,6 +7,7 @@ "protocol":"json", "serviceAbbreviation":"CodePipeline", "serviceFullName":"AWS CodePipeline", + "serviceId":"CodePipeline", "signatureVersion":"v4", "targetPrefix":"CodePipeline_20150709", "uid":"codepipeline-2015-07-09" @@ -481,7 +482,8 @@ {"shape":"InvalidStageDeclarationException"}, {"shape":"InvalidActionDeclarationException"}, {"shape":"InvalidBlockerDeclarationException"}, - {"shape":"InvalidStructureException"} + {"shape":"InvalidStructureException"}, + {"shape":"LimitExceededException"} ], "documentation":"

Updates a specified pipeline with edits or changes to its structure. Use a JSON file with the pipeline structure in conjunction with UpdatePipeline to provide the full structure of the pipeline. Updating the pipeline increases the version number of the pipeline by 1.

" } diff --git a/botocore/data/codestar/2017-04-19/service-2.json b/botocore/data/codestar/2017-04-19/service-2.json index 6557ee76..2c53e815 100644 --- a/botocore/data/codestar/2017-04-19/service-2.json +++ b/botocore/data/codestar/2017-04-19/service-2.json @@ -7,6 +7,7 @@ "protocol":"json", "serviceAbbreviation":"CodeStar", "serviceFullName":"AWS CodeStar", + "serviceId":"CodeStar", "signatureVersion":"v4", "targetPrefix":"CodeStar_20170419", "uid":"codestar-2017-04-19" diff --git a/botocore/data/cognito-identity/2014-06-30/service-2.json b/botocore/data/cognito-identity/2014-06-30/service-2.json index cb6a478f..2b255458 100644 --- a/botocore/data/cognito-identity/2014-06-30/service-2.json +++ b/botocore/data/cognito-identity/2014-06-30/service-2.json @@ -6,6 +6,7 @@ "jsonVersion":"1.1", "protocol":"json", "serviceFullName":"Amazon Cognito Identity", + "serviceId":"Cognito Identity", "signatureVersion":"v4", "targetPrefix":"AWSCognitoIdentityService", "uid":"cognito-identity-2014-06-30" diff --git a/botocore/data/cognito-idp/2016-04-18/service-2.json b/botocore/data/cognito-idp/2016-04-18/service-2.json index 45a28a33..f861dd24 100644 --- a/botocore/data/cognito-idp/2016-04-18/service-2.json +++ b/botocore/data/cognito-idp/2016-04-18/service-2.json @@ -6,6 +6,7 @@ "jsonVersion":"1.1", "protocol":"json", "serviceFullName":"Amazon Cognito Identity Provider", + "serviceId":"Cognito Identity Provider", "signatureVersion":"v4", "targetPrefix":"AWSCognitoIdentityProviderService", "uid":"cognito-idp-2016-04-18" @@ -1863,7 +1864,7 @@ {"shape":"SoftwareTokenMFANotFoundException"}, {"shape":"CodeMismatchException"} ], - "documentation":"

Use this API to register a user's entered TOTP code and mark the user's software token MFA status as \"verified\" if successful,

" + "documentation":"

Use this API to register a user's entered TOTP code and mark the user's software token MFA status as \"verified\" if successful. The request takes an access token or a session string, but not both.

" }, "VerifyUserAttribute":{ "name":"VerifyUserAttribute", @@ -2377,7 +2378,7 @@ "members":{ "ChallengeName":{ "shape":"ChallengeNameType", - "documentation":"

The name of the challenge which you are responding to with this call. This is returned to you in the AdminInitiateAuth response if you need to pass another challenge.

" + "documentation":"

The name of the challenge which you are responding to with this call. This is returned to you in the AdminInitiateAuth response if you need to pass another challenge.

" }, "Session":{ "shape":"SessionType", @@ -3041,7 +3042,7 @@ }, "ExpiresIn":{ "shape":"IntegerType", - "documentation":"

The expiration period of the authentication result.

" + "documentation":"

The expiration period of the authentication result in seconds.

" }, "TokenType":{ "shape":"StringType", @@ -3648,7 +3649,7 @@ }, "CallbackURLs":{ "shape":"CallbackURLsListType", - "documentation":"

A list of allowed callback URLs for the identity providers.

" + "documentation":"

A list of allowed redirect (callback) URLs for the identity providers.

A redirect URI must:

See OAuth 2.0 - Redirection Endpoint.

" }, "LogoutURLs":{ "shape":"LogoutURLsListType", @@ -3656,7 +3657,7 @@ }, "DefaultRedirectURI":{ "shape":"RedirectUrlType", - "documentation":"

The default redirect URI. Must be in the CallbackURLs list.

" + "documentation":"

The default redirect URI. Must be in the CallbackURLs list.

A redirect URI must:

See OAuth 2.0 - Redirection Endpoint.

" }, "AllowedOAuthFlows":{ "shape":"OAuthFlowsType", @@ -4911,7 +4912,8 @@ "SAML", "Facebook", "Google", - "LoginWithAmazon" + "LoginWithAmazon", + "OIDC" ] }, "IdpIdentifierType":{ @@ -6068,7 +6070,7 @@ }, "Mutable":{ "shape":"BooleanType", - "documentation":"

Specifies whether the attribute can be changed once it has been created.

", + "documentation":"

Specifies whether the value of the attribute can be changed.

", "box":true }, "Required":{ @@ -6856,7 +6858,7 @@ }, "CallbackURLs":{ "shape":"CallbackURLsListType", - "documentation":"

A list of allowed callback URLs for the identity providers.

" + "documentation":"

A list of allowed redirect (callback) URLs for the identity providers.

A redirect URI must:

See OAuth 2.0 - Redirection Endpoint.

" }, "LogoutURLs":{ "shape":"LogoutURLsListType", @@ -6864,7 +6866,7 @@ }, "DefaultRedirectURI":{ "shape":"RedirectUrlType", - "documentation":"

The default redirect URI. Must be in the CallbackURLs list.

" + "documentation":"

The default redirect URI. Must be in the CallbackURLs list.

A redirect URI must:

See OAuth 2.0 - Redirection Endpoint.

" }, "AllowedOAuthFlows":{ "shape":"OAuthFlowsType", @@ -7213,7 +7215,7 @@ }, "CallbackURLs":{ "shape":"CallbackURLsListType", - "documentation":"

A list of allowed callback URLs for the identity providers.

" + "documentation":"

A list of allowed redirect (callback) URLs for the identity providers.

A redirect URI must:

See OAuth 2.0 - Redirection Endpoint.

" }, "LogoutURLs":{ "shape":"LogoutURLsListType", @@ -7221,7 +7223,7 @@ }, "DefaultRedirectURI":{ "shape":"RedirectUrlType", - "documentation":"

The default redirect URI. Must be in the CallbackURLs list.

" + "documentation":"

The default redirect URI. Must be in the CallbackURLs list.

A redirect URI must:

See OAuth 2.0 - Redirection Endpoint.

" }, "AllowedOAuthFlows":{ "shape":"OAuthFlowsType", @@ -7337,7 +7339,7 @@ }, "LambdaConfig":{ "shape":"LambdaConfigType", - "documentation":"

The AWS Lambda triggers associated with tue user pool.

" + "documentation":"

The AWS Lambda triggers associated with the user pool.

" }, "Status":{ "shape":"StatusType", @@ -7430,6 +7432,10 @@ "UserPoolAddOns":{ "shape":"UserPoolAddOnsType", "documentation":"

The user pool add-ons.

" + }, + "Arn":{ + "shape":"ArnType", + "documentation":"

The Amazon Resource Name (ARN) for the user pool.

" } }, "documentation":"

A container for information about the user pool.

" diff --git a/botocore/data/cognito-sync/2014-06-30/service-2.json b/botocore/data/cognito-sync/2014-06-30/service-2.json index 75273ff2..68c5ab5b 100644 --- a/botocore/data/cognito-sync/2014-06-30/service-2.json +++ b/botocore/data/cognito-sync/2014-06-30/service-2.json @@ -5,6 +5,7 @@ "endpointPrefix":"cognito-sync", "jsonVersion":"1.1", "serviceFullName":"Amazon Cognito Sync", + "serviceId":"Cognito Sync", "signatureVersion":"v4", "protocol":"rest-json", "uid":"cognito-sync-2014-06-30" diff --git a/botocore/data/comprehend/2017-11-27/service-2.json b/botocore/data/comprehend/2017-11-27/service-2.json index 949374e9..a479a369 100644 --- a/botocore/data/comprehend/2017-11-27/service-2.json +++ b/botocore/data/comprehend/2017-11-27/service-2.json @@ -44,7 +44,7 @@ {"shape":"BatchSizeLimitExceededException"}, {"shape":"InternalServerException"} ], - "documentation":"

Inspects the text of a batch of documents and returns information about them. For more information about entities, see how-entities

" + "documentation":"

Inspects the text of a batch of documents for named entities and returns information about them. For more information about named entities, see how-entities

" }, "BatchDetectKeyPhrases":{ "name":"BatchDetectKeyPhrases", @@ -80,6 +80,70 @@ ], "documentation":"

Inspects a batch of documents and returns an inference of the prevailing sentiment, POSITIVE, NEUTRAL, MIXED, or NEGATIVE, in each one.

" }, + "DescribeDominantLanguageDetectionJob":{ + "name":"DescribeDominantLanguageDetectionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDominantLanguageDetectionJobRequest"}, + "output":{"shape":"DescribeDominantLanguageDetectionJobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"JobNotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Gets the properties associated with a dominant language detection job. Use this operation to get the status of a detection job.

" + }, + "DescribeEntitiesDetectionJob":{ + "name":"DescribeEntitiesDetectionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEntitiesDetectionJobRequest"}, + "output":{"shape":"DescribeEntitiesDetectionJobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"JobNotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Gets the properties associated with an entities detection job. Use this operation to get the status of a detection job.

" + }, + "DescribeKeyPhrasesDetectionJob":{ + "name":"DescribeKeyPhrasesDetectionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeKeyPhrasesDetectionJobRequest"}, + "output":{"shape":"DescribeKeyPhrasesDetectionJobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"JobNotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Gets the properties associated with a key phrases detection job. Use this operation to get the status of a detection job.

" + }, + "DescribeSentimentDetectionJob":{ + "name":"DescribeSentimentDetectionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSentimentDetectionJobRequest"}, + "output":{"shape":"DescribeSentimentDetectionJobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"JobNotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Gets the properties associated with a sentiment detection job. Use this operation to get the status of a detection job.

" + }, "DescribeTopicsDetectionJob":{ "name":"DescribeTopicsDetectionJob", "http":{ @@ -125,7 +189,7 @@ {"shape":"UnsupportedLanguageException"}, {"shape":"InternalServerException"} ], - "documentation":"

Inspects text for entities, and returns information about them. For more information, about entities, see how-entities.

" + "documentation":"

Inspects text for named entities, and returns information about them. For more information, about named entities, see how-entities.

" }, "DetectKeyPhrases":{ "name":"DetectKeyPhrases", @@ -159,6 +223,70 @@ ], "documentation":"

Inspects text and returns an inference of the prevailing sentiment (POSITIVE, NEUTRAL, MIXED, or NEGATIVE).

" }, + "ListDominantLanguageDetectionJobs":{ + "name":"ListDominantLanguageDetectionJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDominantLanguageDetectionJobsRequest"}, + "output":{"shape":"ListDominantLanguageDetectionJobsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InvalidFilterException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Gets a list of the dominant language detection jobs that you have submitted.

" + }, + "ListEntitiesDetectionJobs":{ + "name":"ListEntitiesDetectionJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListEntitiesDetectionJobsRequest"}, + "output":{"shape":"ListEntitiesDetectionJobsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InvalidFilterException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Gets a list of the entity detection jobs that you have submitted.

" + }, + "ListKeyPhrasesDetectionJobs":{ + "name":"ListKeyPhrasesDetectionJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListKeyPhrasesDetectionJobsRequest"}, + "output":{"shape":"ListKeyPhrasesDetectionJobsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InvalidFilterException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Get a list of key phrase detection jobs that you have submitted.

" + }, + "ListSentimentDetectionJobs":{ + "name":"ListSentimentDetectionJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListSentimentDetectionJobsRequest"}, + "output":{"shape":"ListSentimentDetectionJobsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InvalidFilterException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Gets a list of sentiment detection jobs that you have submitted.

" + }, "ListTopicsDetectionJobs":{ "name":"ListTopicsDetectionJobs", "http":{ @@ -175,6 +303,66 @@ ], "documentation":"

Gets a list of the topic detection jobs that you have submitted.

" }, + "StartDominantLanguageDetectionJob":{ + "name":"StartDominantLanguageDetectionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartDominantLanguageDetectionJobRequest"}, + "output":{"shape":"StartDominantLanguageDetectionJobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Starts an asynchronous dominant language detection job for a collection of documents. Use the operation to track the status of a job.

" + }, + "StartEntitiesDetectionJob":{ + "name":"StartEntitiesDetectionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartEntitiesDetectionJobRequest"}, + "output":{"shape":"StartEntitiesDetectionJobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Starts an asynchronous entity detection job for a collection of documents. Use the operation to track the status of a job.

" + }, + "StartKeyPhrasesDetectionJob":{ + "name":"StartKeyPhrasesDetectionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartKeyPhrasesDetectionJobRequest"}, + "output":{"shape":"StartKeyPhrasesDetectionJobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Starts an asynchronous key phrase detection job for a collection of documents. Use the operation to track the status of a job.

" + }, + "StartSentimentDetectionJob":{ + "name":"StartSentimentDetectionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartSentimentDetectionJobRequest"}, + "output":{"shape":"StartSentimentDetectionJobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Starts an asynchronous sentiment detection job for a collection of documents. use the operation to track the status of a job.

" + }, "StartTopicsDetectionJob":{ "name":"StartTopicsDetectionJob", "http":{ @@ -189,6 +377,66 @@ {"shape":"InternalServerException"} ], "documentation":"

Starts an asynchronous topic detection job. Use the DescribeTopicDetectionJob operation to track the status of a job.

" + }, + "StopDominantLanguageDetectionJob":{ + "name":"StopDominantLanguageDetectionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopDominantLanguageDetectionJobRequest"}, + "output":{"shape":"StopDominantLanguageDetectionJobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"JobNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Stops a dominant language detection job in progress.

If the job state is IN_PROGRESS the job will be marked for termination and put into the STOPPING state.

If the job is in the COMPLETED or FAILED state when you call the StopDominantLanguageDetectionJob operation, the operation will return a 400 Internal Request Exception.

When a job is stopped, any document that has already been processed will be written to the output location.

" + }, + "StopEntitiesDetectionJob":{ + "name":"StopEntitiesDetectionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopEntitiesDetectionJobRequest"}, + "output":{"shape":"StopEntitiesDetectionJobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"JobNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Stops an entities detection job in progress.

If the job state is IN_PROGRESS the job will be marked for termination and put into the STOPPING state.

If the job is in the COMPLETED or FAILED state when you call the StopDominantLanguageDetectionJob operation, the operation will return a 400 Internal Request Exception.

When a job is stopped, any document that has already been processed will be written to the output location.

" + }, + "StopKeyPhrasesDetectionJob":{ + "name":"StopKeyPhrasesDetectionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopKeyPhrasesDetectionJobRequest"}, + "output":{"shape":"StopKeyPhrasesDetectionJobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"JobNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Stops a key phrases detection job in progress.

If the job state is IN_PROGRESS the job will be marked for termination and put into the STOPPING state.

If the job is in the COMPLETED or FAILED state when you call the StopDominantLanguageDetectionJob operation, the operation will return a 400 Internal Request Exception.

When a job is stopped, any document that has already been processed will be written to the output location.

" + }, + "StopSentimentDetectionJob":{ + "name":"StopSentimentDetectionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopSentimentDetectionJobRequest"}, + "output":{"shape":"StopSentimentDetectionJobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"JobNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Stops a sentiment detection job in progress.

If the job state is IN_PROGRESS the job will be marked for termination and put into the STOPPING state.

If the job is in the COMPLETED or FAILED state when you call the StopDominantLanguageDetectionJob operation, the operation will return a 400 Internal Request Exception.

When a job is stopped, any document that has already been processed will be written to the output location.

" } }, "shapes":{ @@ -260,8 +508,8 @@ "documentation":"

A list containing the text of the input documents. The list can contain a maximum of 25 documents. Each document must contain fewer than 5,000 bytes of UTF-8 encoded characters.

" }, "LanguageCode":{ - "shape":"String", - "documentation":"

The language of the input documents. All documents must be in the same language.

" + "shape":"LanguageCode", + "documentation":"

The language of the input documents. You can specify English (\"en\") or Spanish (\"es\"). All documents must be in the same language.

" } } }, @@ -308,8 +556,8 @@ "documentation":"

A list containing the text of the input documents. The list can contain a maximum of 25 documents. Each document must contain fewer that 5,000 bytes of UTF-8 encoded characters.

" }, "LanguageCode":{ - "shape":"String", - "documentation":"

The language of the input documents. All documents must be in the same language.

" + "shape":"LanguageCode", + "documentation":"

The language of the input documents. You can specify English (\"en\") or Spanish (\"es\"). All documents must be in the same language.

" } } }, @@ -360,8 +608,8 @@ "documentation":"

A list containing the text of the input documents. The list can contain a maximum of 25 documents. Each document must contain fewer that 5,000 bytes of UTF-8 encoded characters.

" }, "LanguageCode":{ - "shape":"String", - "documentation":"

The language of the input documents. All documents must be in the same language.

" + "shape":"LanguageCode", + "documentation":"

The language of the input documents. You can specify English (\"en\") or Spanish (\"es\"). All documents must be in the same language.

" } } }, @@ -418,6 +666,82 @@ "min":1, "pattern":"^[a-zA-Z0-9-]+$" }, + "DescribeDominantLanguageDetectionJobRequest":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its response.

" + } + } + }, + "DescribeDominantLanguageDetectionJobResponse":{ + "type":"structure", + "members":{ + "DominantLanguageDetectionJobProperties":{ + "shape":"DominantLanguageDetectionJobProperties", + "documentation":"

An object that contains the properties associated with a dominant language detection job.

" + } + } + }, + "DescribeEntitiesDetectionJobRequest":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its response.

" + } + } + }, + "DescribeEntitiesDetectionJobResponse":{ + "type":"structure", + "members":{ + "EntitiesDetectionJobProperties":{ + "shape":"EntitiesDetectionJobProperties", + "documentation":"

An object that contains the properties associated with an entities detection job.

" + } + } + }, + "DescribeKeyPhrasesDetectionJobRequest":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its response.

" + } + } + }, + "DescribeKeyPhrasesDetectionJobResponse":{ + "type":"structure", + "members":{ + "KeyPhrasesDetectionJobProperties":{ + "shape":"KeyPhrasesDetectionJobProperties", + "documentation":"

An object that contains the properties associated with a key phrases detection job.

" + } + } + }, + "DescribeSentimentDetectionJobRequest":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its response.

" + } + } + }, + "DescribeSentimentDetectionJobResponse":{ + "type":"structure", + "members":{ + "SentimentDetectionJobProperties":{ + "shape":"SentimentDetectionJobProperties", + "documentation":"

An object that contains the properties associated with a sentiment detection job.

" + } + } + }, "DescribeTopicsDetectionJobRequest":{ "type":"structure", "required":["JobId"], @@ -469,7 +793,7 @@ }, "LanguageCode":{ "shape":"LanguageCode", - "documentation":"

The RFC 5646 language code of the input text. If the request does not specify the language code, the service detects the dominant language. If you specify a language code that the service does not support, it returns UnsupportedLanguageException exception. For more information about RFC 5646, see Tags for Identifying Languages on the IETF Tools web site.

" + "documentation":"

The language of the input documents. You can specify English (\"en\") or Spanish (\"es\"). All documents must be in the same language.

" } } }, @@ -495,7 +819,7 @@ }, "LanguageCode":{ "shape":"LanguageCode", - "documentation":"

The RFC 5646 language code for the input text. If you don't specify a language code, Amazon Comprehend detects the dominant language. If you specify the code for a language that Amazon Comprehend does not support, it returns and UnsupportedLanguageException. For more information about RFC 5646, see Tags for Identifying Languages on the IETF Tools web site.

" + "documentation":"

The language of the input documents. You can specify English (\"en\") or Spanish (\"es\"). All documents must be in the same language.

" } } }, @@ -521,7 +845,7 @@ }, "LanguageCode":{ "shape":"LanguageCode", - "documentation":"

The RFC 5646 language code for the input text. If you don't specify a language code, Amazon Comprehend detects the dominant language. If you specify the code for a language that Amazon Comprehend does not support, it returns and UnsupportedLanguageException. For more information about RFC 5646, see Tags for Identifying Languages on the IETF Tools web site.

" + "documentation":"

The language of the input documents. You can specify English (\"en\") or Spanish (\"es\"). All documents must be in the same language.

" } } }, @@ -543,7 +867,7 @@ "members":{ "LanguageCode":{ "shape":"String", - "documentation":"

The RFC 5646 language code for the dominant language.

" + "documentation":"

The RFC 5646 language code for the dominant language. For more information about RFC 5646, see Tags for Identifying Languages on the IETF Tools web site.

" }, "Score":{ "shape":"Float", @@ -552,6 +876,138 @@ }, "documentation":"

Returns the code for the dominant language in the input text and the level of confidence that Amazon Comprehend has in the accuracy of the detection.

" }, + "DominantLanguageDetectionJobFilter":{ + "type":"structure", + "members":{ + "JobName":{ + "shape":"JobName", + "documentation":"

Filters on the name of the job.

" + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

Filters the list of jobs based on job status. Returns only jobs with the specified status.

" + }, + "SubmitTimeBefore":{ + "shape":"Timestamp", + "documentation":"

Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted before the specified time. Jobs are returned in ascending order, oldest to newest.

" + }, + "SubmitTimeAfter":{ + "shape":"Timestamp", + "documentation":"

Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted after the specified time. Jobs are returned in descending order, newest to oldest.

" + } + }, + "documentation":"

Provides information for filtering a list of dominant language detection jobs. For more information, see the operation.

" + }, + "DominantLanguageDetectionJobProperties":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier assigned to the dominant language detection job.

" + }, + "JobName":{ + "shape":"JobName", + "documentation":"

The name that you assigned to the dominant language detection job.

" + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

The current status of the dominant language detection job. If the status is FAILED, the Message field shows the reason for the failure.

" + }, + "Message":{ + "shape":"AnyLengthString", + "documentation":"

A description for the status of a job.

" + }, + "SubmitTime":{ + "shape":"Timestamp", + "documentation":"

The time that the dominant language detection job was submitted for processing.

" + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

The time that the dominant language detection job completed.

" + }, + "InputDataConfig":{ + "shape":"InputDataConfig", + "documentation":"

The input data configuration that you supplied when you created the dominant language detection job.

" + }, + "OutputDataConfig":{ + "shape":"OutputDataConfig", + "documentation":"

The output data configuration that you supplied when you created the dominant language detection job.

" + } + }, + "documentation":"

Provides information about a dominant language detection job.

" + }, + "DominantLanguageDetectionJobPropertiesList":{ + "type":"list", + "member":{"shape":"DominantLanguageDetectionJobProperties"} + }, + "EntitiesDetectionJobFilter":{ + "type":"structure", + "members":{ + "JobName":{ + "shape":"JobName", + "documentation":"

Filters on the name of the job.

" + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

Filters the list of jobs based on job status. Returns only jobs with the specified status.

" + }, + "SubmitTimeBefore":{ + "shape":"Timestamp", + "documentation":"

Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted before the specified time. Jobs are returned in ascending order, oldest to newest.

" + }, + "SubmitTimeAfter":{ + "shape":"Timestamp", + "documentation":"

Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted after the specified time. Jobs are returned in descending order, newest to oldest.

" + } + }, + "documentation":"

Provides information for filtering a list of dominant language detection jobs. For more information, see the operation.

" + }, + "EntitiesDetectionJobProperties":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier assigned to the entities detection job.

" + }, + "JobName":{ + "shape":"JobName", + "documentation":"

The name that you assigned the entities detection job.

" + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

The current status of the entities detection job. If the status is FAILED, the Message field shows the reason for the failure.

" + }, + "Message":{ + "shape":"AnyLengthString", + "documentation":"

A description of the status of a job.

" + }, + "SubmitTime":{ + "shape":"Timestamp", + "documentation":"

The time that the entities detection job was submitted for processing.

" + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

The time that the entities detection job completed

" + }, + "InputDataConfig":{ + "shape":"InputDataConfig", + "documentation":"

The input data configuration that you supplied when you created the entities detection job.

" + }, + "OutputDataConfig":{ + "shape":"OutputDataConfig", + "documentation":"

The output data configuration that you supplied when you created the entities detection job.

" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

The language code of the input documents.

" + } + }, + "documentation":"

Provides information about an entities detection job.

" + }, + "EntitiesDetectionJobPropertiesList":{ + "type":"list", + "member":{"shape":"EntitiesDetectionJobProperties"} + }, "Entity":{ "type":"structure", "members":{ @@ -670,7 +1126,9 @@ "SUBMITTED", "IN_PROGRESS", "COMPLETED", - "FAILED" + "FAILED", + "STOP_REQUESTED", + "STOPPED" ] }, "KeyPhrase":{ @@ -695,6 +1153,74 @@ }, "documentation":"

Describes a key noun phrase.

" }, + "KeyPhrasesDetectionJobFilter":{ + "type":"structure", + "members":{ + "JobName":{ + "shape":"JobName", + "documentation":"

Filters on the name of the job.

" + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

Filters the list of jobs based on job status. Returns only jobs with the specified status.

" + }, + "SubmitTimeBefore":{ + "shape":"Timestamp", + "documentation":"

Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted before the specified time. Jobs are returned in ascending order, oldest to newest.

" + }, + "SubmitTimeAfter":{ + "shape":"Timestamp", + "documentation":"

Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted after the specified time. Jobs are returned in descending order, newest to oldest.

" + } + }, + "documentation":"

Provides information for filtering a list of dominant language detection jobs. For more information, see the operation.

" + }, + "KeyPhrasesDetectionJobProperties":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier assigned to the key phrases detection job.

" + }, + "JobName":{ + "shape":"JobName", + "documentation":"

The name that you assigned the key phrases detection job.

" + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

The current status of the key phrases detection job. If the status is FAILED, the Message field shows the reason for the failure.

" + }, + "Message":{ + "shape":"AnyLengthString", + "documentation":"

A description of the status of a job.

" + }, + "SubmitTime":{ + "shape":"Timestamp", + "documentation":"

The time that the key phrases detection job was submitted for processing.

" + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

The time that the key phrases detection job completed.

" + }, + "InputDataConfig":{ + "shape":"InputDataConfig", + "documentation":"

The input data configuration that you supplied when you created the key phrases detection job.

" + }, + "OutputDataConfig":{ + "shape":"OutputDataConfig", + "documentation":"

The output data configuration that you supplied when you created the key phrases detection job.

" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

The language code of the input documents.

" + } + }, + "documentation":"

Provides information about a key phrases detection job.

" + }, + "KeyPhrasesDetectionJobPropertiesList":{ + "type":"list", + "member":{"shape":"KeyPhrasesDetectionJobProperties"} + }, "LanguageCode":{ "type":"string", "enum":[ @@ -702,6 +1228,96 @@ "es" ] }, + "ListDominantLanguageDetectionJobsRequest":{ + "type":"structure", + "members":{ + "Filter":{ + "shape":"DominantLanguageDetectionJobFilter", + "documentation":"

Filters that jobs that are returned. You can filter jobs on their name, status, or the date and time that they were submitted. You can only set one filter at a time.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Identifies the next page of results to return.

" + }, + "MaxResults":{ + "shape":"MaxResultsInteger", + "documentation":"

The maximum number of results to return in each page. The default is 100.

" + } + } + }, + "ListDominantLanguageDetectionJobsResponse":{ + "type":"structure", + "members":{ + "DominantLanguageDetectionJobPropertiesList":{ + "shape":"DominantLanguageDetectionJobPropertiesList", + "documentation":"

A list containing the properties of each job that is returned.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Identifies the next page of results to return.

" + } + } + }, + "ListEntitiesDetectionJobsRequest":{ + "type":"structure", + "members":{ + "Filter":{ + "shape":"EntitiesDetectionJobFilter", + "documentation":"

Filters the jobs that are returned. You can filter jobs on their name, status, or the date and time that they were submitted. You can only set one filter at a time.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Identifies the next page of results to return.

" + }, + "MaxResults":{ + "shape":"MaxResultsInteger", + "documentation":"

The maximum number of results to return in each page. The default is 100.

" + } + } + }, + "ListEntitiesDetectionJobsResponse":{ + "type":"structure", + "members":{ + "EntitiesDetectionJobPropertiesList":{ + "shape":"EntitiesDetectionJobPropertiesList", + "documentation":"

A list containing the properties of each job that is returned.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Identifies the next page of results to return.

" + } + } + }, + "ListKeyPhrasesDetectionJobsRequest":{ + "type":"structure", + "members":{ + "Filter":{ + "shape":"KeyPhrasesDetectionJobFilter", + "documentation":"

Filters the jobs that are returned. You can filter jobs on their name, status, or the date and time that they were submitted. You can only set one filter at a time.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Identifies the next page of results to return.

" + }, + "MaxResults":{ + "shape":"MaxResultsInteger", + "documentation":"

The maximum number of results to return in each page. The default is 100.

" + } + } + }, + "ListKeyPhrasesDetectionJobsResponse":{ + "type":"structure", + "members":{ + "KeyPhrasesDetectionJobPropertiesList":{ + "shape":"KeyPhrasesDetectionJobPropertiesList", + "documentation":"

A list containing the properties of each job that is returned.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Identifies the next page of results to return.

" + } + } + }, "ListOfDetectDominantLanguageResult":{ "type":"list", "member":{"shape":"BatchDetectDominantLanguageItemResult"} @@ -730,6 +1346,36 @@ "type":"list", "member":{"shape":"KeyPhrase"} }, + "ListSentimentDetectionJobsRequest":{ + "type":"structure", + "members":{ + "Filter":{ + "shape":"SentimentDetectionJobFilter", + "documentation":"

Filters the jobs that are returned. You can filter jobs on their name, status, or the date and time that they were submitted. You can only set one filter at a time.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Identifies the next page of results to return.

" + }, + "MaxResults":{ + "shape":"MaxResultsInteger", + "documentation":"

The maximum number of results to return in each page. The default is 100.

" + } + } + }, + "ListSentimentDetectionJobsResponse":{ + "type":"structure", + "members":{ + "SentimentDetectionJobPropertiesList":{ + "shape":"SentimentDetectionJobPropertiesList", + "documentation":"

A list containing the properties of each job that is returned.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Identifies the next page of results to return.

" + } + } + }, "ListTopicsDetectionJobsRequest":{ "type":"structure", "members":{ @@ -743,7 +1389,7 @@ }, "MaxResults":{ "shape":"MaxResultsInteger", - "documentation":"

The maximum number of results to return in each page.

" + "documentation":"

The maximum number of results to return in each page. The default is 100.

" } } }, @@ -776,7 +1422,7 @@ "members":{ "S3Uri":{ "shape":"S3Uri", - "documentation":"

The Amazon S3 URI where you want to write the output data. The URI must be in the same region as the API endpoint that you are calling.

The service creates an output file called output.tar.gz. It is a compressed archive that contains two files, topic-terms.csv that lists the terms associated with each topic, and doc-topics.csv that lists the documents associated with each topic. For more information, see topic-modeling.

" + "documentation":"

When you use the OutputDataConfig object with asynchronous operations, you specify the Amazon S3 location where you want to write the output data. The URI must be in the same region as the API endpoint that you are calling. The location is used as the prefix for the actual location of the output file.

When the topic detection job is finished, the service creates an output file in a directory specific to the job. The S3Uri field contains the location of the output file, called output.tar.gz. It is a compressed archive that contains the ouput of the operation.

" } }, "documentation":"

Provides configuration parameters for the output of topic detection jobs.

" @@ -784,7 +1430,75 @@ "S3Uri":{ "type":"string", "max":1024, - "pattern":"s3://([^/]+)(/.*)?" + "pattern":"s3://[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9](/.*)?" + }, + "SentimentDetectionJobFilter":{ + "type":"structure", + "members":{ + "JobName":{ + "shape":"JobName", + "documentation":"

Filters on the name of the job.

" + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

Filters the list of jobs based on job status. Returns only jobs with the specified status.

" + }, + "SubmitTimeBefore":{ + "shape":"Timestamp", + "documentation":"

Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted before the specified time. Jobs are returned in ascending order, oldest to newest.

" + }, + "SubmitTimeAfter":{ + "shape":"Timestamp", + "documentation":"

Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted after the specified time. Jobs are returned in descending order, newest to oldest.

" + } + }, + "documentation":"

Provides information for filtering a list of dominant language detection jobs. For more information, see the operation.

" + }, + "SentimentDetectionJobProperties":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier assigned to the sentiment detection job.

" + }, + "JobName":{ + "shape":"JobName", + "documentation":"

The name that you assigned to the sentiment detection job

" + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

The current status of the sentiment detection job. If the status is FAILED, the Messages field shows the reason for the failure.

" + }, + "Message":{ + "shape":"AnyLengthString", + "documentation":"

A description of the status of a job.

" + }, + "SubmitTime":{ + "shape":"Timestamp", + "documentation":"

The time that the sentiment detection job was submitted for processing.

" + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

The time that the sentiment detection job ended.

" + }, + "InputDataConfig":{ + "shape":"InputDataConfig", + "documentation":"

The input data configuration that you supplied when you created the sentiment detection job.

" + }, + "OutputDataConfig":{ + "shape":"OutputDataConfig", + "documentation":"

The output data configuration that you supplied when you created the sentiment detection job.

" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

The language code of the input documents.

" + } + }, + "documentation":"

Provides information about a sentiment detection job.

" + }, + "SentimentDetectionJobPropertiesList":{ + "type":"list", + "member":{"shape":"SentimentDetectionJobProperties"} }, "SentimentScore":{ "type":"structure", @@ -817,7 +1531,7 @@ "MIXED" ] }, - "StartTopicsDetectionJobRequest":{ + "StartDominantLanguageDetectionJobRequest":{ "type":"structure", "required":[ "InputDataConfig", @@ -833,6 +1547,197 @@ "shape":"OutputDataConfig", "documentation":"

Specifies where to send the output files.

" }, + "DataAccessRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants Amazon Comprehend read access to your input data.

" + }, + "JobName":{ + "shape":"JobName", + "documentation":"

An identifier for the job.

" + }, + "ClientRequestToken":{ + "shape":"ClientRequestTokenString", + "documentation":"

A unique identifier for the request. If you do not set the client request token, Amazon Comprehend generates one.

", + "idempotencyToken":true + } + } + }, + "StartDominantLanguageDetectionJobResponse":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier generated for the job. To get the status of a job, use this identifier with the operation.

" + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

The status of the job.

" + } + } + }, + "StartEntitiesDetectionJobRequest":{ + "type":"structure", + "required":[ + "InputDataConfig", + "OutputDataConfig", + "DataAccessRoleArn", + "LanguageCode" + ], + "members":{ + "InputDataConfig":{ + "shape":"InputDataConfig", + "documentation":"

Specifies the format and location of the input data for the job.

" + }, + "OutputDataConfig":{ + "shape":"OutputDataConfig", + "documentation":"

Specifies where to send the output files.

" + }, + "DataAccessRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants Amazon Comprehend read access to your input data.

" + }, + "JobName":{ + "shape":"JobName", + "documentation":"

The identifier of the job.

" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

The language of the input documents. You can specify English (\"en\") or Spanish (\"es\"). All documents must be in the same language.

" + }, + "ClientRequestToken":{ + "shape":"ClientRequestTokenString", + "documentation":"

A unique identifier for the request. If you don't set the client request token, Amazon Comprehend generates one.

", + "idempotencyToken":true + } + } + }, + "StartEntitiesDetectionJobResponse":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier generated for the job. To get the status of job, use this identifier with the operation.

" + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

The status of the job.

" + } + } + }, + "StartKeyPhrasesDetectionJobRequest":{ + "type":"structure", + "required":[ + "InputDataConfig", + "OutputDataConfig", + "DataAccessRoleArn", + "LanguageCode" + ], + "members":{ + "InputDataConfig":{ + "shape":"InputDataConfig", + "documentation":"

Specifies the format and location of the input data for the job.

" + }, + "OutputDataConfig":{ + "shape":"OutputDataConfig", + "documentation":"

Specifies where to send the output files.

" + }, + "DataAccessRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants Amazon Comprehend read access to your input data.

" + }, + "JobName":{ + "shape":"JobName", + "documentation":"

The identifier of the job.

" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

The language of the input documents. You can specify English (\"en\") or Spanish (\"es\"). All documents must be in the same language.

" + }, + "ClientRequestToken":{ + "shape":"ClientRequestTokenString", + "documentation":"

A unique identifier for the request. If you don't set the client request token, Amazon Comprehend generates one.

", + "idempotencyToken":true + } + } + }, + "StartKeyPhrasesDetectionJobResponse":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier generated for the job. To get the status of a job, use this identifier with the operation.

" + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

The status of the job.

" + } + } + }, + "StartSentimentDetectionJobRequest":{ + "type":"structure", + "required":[ + "InputDataConfig", + "OutputDataConfig", + "DataAccessRoleArn", + "LanguageCode" + ], + "members":{ + "InputDataConfig":{ + "shape":"InputDataConfig", + "documentation":"

Specifies the format and location of the input data for the job.

" + }, + "OutputDataConfig":{ + "shape":"OutputDataConfig", + "documentation":"

Specifies where to send the output files.

" + }, + "DataAccessRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants Amazon Comprehend read access to your input data.

" + }, + "JobName":{ + "shape":"JobName", + "documentation":"

The identifier of the job.

" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

The language of the input documents. You can specify English (\"en\") or Spanish (\"es\"). All documents must be in the same language.

" + }, + "ClientRequestToken":{ + "shape":"ClientRequestTokenString", + "documentation":"

A unique identifier for the request. If you don't set the client request token, Amazon Comprehend generates one.

", + "idempotencyToken":true + } + } + }, + "StartSentimentDetectionJobResponse":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier generated for the job. To get the status of a job, use this identifier with the operation.

" + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

The status of the job.

" + } + } + }, + "StartTopicsDetectionJobRequest":{ + "type":"structure", + "required":[ + "InputDataConfig", + "OutputDataConfig", + "DataAccessRoleArn" + ], + "members":{ + "InputDataConfig":{ + "shape":"InputDataConfig", + "documentation":"

Specifies the format and location of the input data for the job.

" + }, + "OutputDataConfig":{ + "shape":"OutputDataConfig", + "documentation":"

Specifies where to send the output files. The output is a compressed archive with two files, topic-terms.csv that lists the terms associated with each topic, and doc-topics.csv that lists the documents associated with each topic

" + }, "DataAccessRoleArn":{ "shape":"IamRoleArn", "documentation":"

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data.

" @@ -865,6 +1770,98 @@ } } }, + "StopDominantLanguageDetectionJobRequest":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier of the dominant language detection job to stop.

" + } + } + }, + "StopDominantLanguageDetectionJobResponse":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier of the dominant language detection job to stop.

" + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

Either STOPPING if the job is currently running, or STOPPED if the job was previously stopped with the StopDominantLanguageDetectionJob operation.

" + } + } + }, + "StopEntitiesDetectionJobRequest":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier of the entities detection job to stop.

" + } + } + }, + "StopEntitiesDetectionJobResponse":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier of the entities detection job to stop.

" + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

Either STOPPING if the job is currently running, or STOPPED if the job was previously stopped with the StopEntitiesDetectionJob operation.

" + } + } + }, + "StopKeyPhrasesDetectionJobRequest":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier of the key phrases detection job to stop.

" + } + } + }, + "StopKeyPhrasesDetectionJobResponse":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier of the key phrases detection job to stop.

" + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

Either STOPPING if the job is currently running, or STOPPED if the job was previously stopped with the StopKeyPhrasesDetectionJob operation.

" + } + } + }, + "StopSentimentDetectionJobRequest":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier of the sentiment detection job to stop.

" + } + } + }, + "StopSentimentDetectionJobResponse":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier of the sentiment detection job to stop.

" + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

Either STOPPING if the job is currently running, or STOPPED if the job was previously stopped with the StopSentimentDetectionJob operation.

" + } + } + }, "String":{ "type":"string", "min":1 diff --git a/botocore/data/config/2014-11-12/service-2.json b/botocore/data/config/2014-11-12/service-2.json index 663a199f..cefa015f 100644 --- a/botocore/data/config/2014-11-12/service-2.json +++ b/botocore/data/config/2014-11-12/service-2.json @@ -115,6 +115,19 @@ ], "documentation":"

Deletes pending authorization requests for a specified aggregator account in a specified region.

" }, + "DeleteRetentionConfiguration":{ + "name":"DeleteRetentionConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRetentionConfigurationRequest"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"NoSuchRetentionConfigurationException"} + ], + "documentation":"

Deletes the retention configuration.

" + }, "DeliverConfigSnapshot":{ "name":"DeliverConfigSnapshot", "http":{ @@ -318,6 +331,21 @@ ], "documentation":"

Returns a list of all pending aggregation requests.

" }, + "DescribeRetentionConfigurations":{ + "name":"DescribeRetentionConfigurations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRetentionConfigurationsRequest"}, + "output":{"shape":"DescribeRetentionConfigurationsResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"NoSuchRetentionConfigurationException"}, + {"shape":"InvalidNextTokenException"} + ], + "documentation":"

Returns the details of one or more retention configurations. If the retention configuration name is not specified, this action returns the details for all the retention configurations for that account.

Currently, AWS Config supports only one retention configuration per region in your account.

" + }, "GetAggregateComplianceDetailsByConfigRule":{ "name":"GetAggregateComplianceDetailsByConfigRule", "http":{ @@ -431,7 +459,7 @@ {"shape":"NoAvailableConfigurationRecorderException"}, {"shape":"ResourceNotDiscoveredException"} ], - "documentation":"

Returns a list of configuration items for the specified resource. The list contains details about each state of the resource during the specified time interval.

The response is paginated. By default, AWS Config returns a limit of 10 configuration items per page. You can customize this number with the limit parameter. The response includes a nextToken string. To get the next page of results, run the request again and specify the string for the nextToken parameter.

Each call to the API is limited to span a duration of seven days. It is likely that the number of records returned is smaller than the specified limit. In such cases, you can make another call, using the nextToken.

" + "documentation":"

Returns a list of configuration items for the specified resource. The list contains details about each state of the resource during the specified time interval. If you specified a retention period to retain your ConfigurationItems between a minimum of 30 days and a maximum of 7 years (2557 days), AWS Config returns the ConfigurationItems for the specified retention period.

The response is paginated. By default, AWS Config returns a limit of 10 configuration items per page. You can customize this number with the limit parameter. The response includes a nextToken string. To get the next page of results, run the request again and specify the string for the nextToken parameter.

Each call to the API is limited to span a duration of seven days. It is likely that the number of records returned is smaller than the specified limit. In such cases, you can make another call, using the nextToken.

" }, "ListDiscoveredResources":{ "name":"ListDiscoveredResources", @@ -544,6 +572,20 @@ ], "documentation":"

Used by an AWS Lambda function to deliver evaluation results to AWS Config. This action is required in every AWS Lambda function that is invoked by an AWS Config rule.

" }, + "PutRetentionConfiguration":{ + "name":"PutRetentionConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutRetentionConfigurationRequest"}, + "output":{"shape":"PutRetentionConfigurationResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"MaxNumberOfRetentionConfigurationsExceededException"} + ], + "documentation":"

Creates and updates the retention configuration with details about retention period (number of days) that AWS Config stores your historical information. The API creates the RetentionConfiguration object and names the object as default. When you have a RetentionConfiguration object named default, calling the API modifies the default object.

Currently, AWS Config supports only one retention configuration per region in your account.

" + }, "StartConfigRulesEvaluation":{ "name":"StartConfigRulesEvaluation", "http":{ @@ -598,7 +640,7 @@ }, "AllAwsRegions":{ "shape":"Boolean", - "documentation":"

If true, aggreagate existing AWS Config regions and future regions.

" + "documentation":"

If true, aggregate existing AWS Config regions and future regions.

" }, "AwsRegions":{ "shape":"AggregatorRegionList", @@ -1567,6 +1609,16 @@ } } }, + "DeleteRetentionConfigurationRequest":{ + "type":"structure", + "required":["RetentionConfigurationName"], + "members":{ + "RetentionConfigurationName":{ + "shape":"RetentionConfigurationName", + "documentation":"

The name of the retention configuration to delete.

" + } + } + }, "DeliverConfigSnapshotRequest":{ "type":"structure", "required":["deliveryChannelName"], @@ -1876,7 +1928,7 @@ "members":{ "AggregatedSourceStatusList":{ "shape":"AggregatedSourceStatusList", - "documentation":"

Retuns an AggregatedSourceStatus object.

" + "documentation":"

Returns an AggregatedSourceStatus object.

" }, "NextToken":{ "shape":"String", @@ -2025,6 +2077,32 @@ } } }, + "DescribeRetentionConfigurationsRequest":{ + "type":"structure", + "members":{ + "RetentionConfigurationNames":{ + "shape":"RetentionConfigurationNameList", + "documentation":"

A list of names of retention configurations for which you want details. If you do not specify a name, AWS Config returns details for all the retention configurations for that account.

Currently, AWS Config supports only one retention configuration per region in your account.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" + } + } + }, + "DescribeRetentionConfigurationsResponse":{ + "type":"structure", + "members":{ + "RetentionConfigurations":{ + "shape":"RetentionConfigurationList", + "documentation":"

Returns a retention configuration object.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" + } + } + }, "EarlierTime":{"type":"timestamp"}, "EmptiableStringWithCharLimit256":{ "type":"string", @@ -2612,6 +2690,13 @@ "documentation":"

You have reached the limit of the number of delivery channels you can create.

", "exception":true }, + "MaxNumberOfRetentionConfigurationsExceededException":{ + "type":"structure", + "members":{ + }, + "documentation":"

Failed to add the retention configuration because a retention configuration with that name already exists.

", + "exception":true + }, "MaximumExecutionFrequency":{ "type":"string", "enum":[ @@ -2696,6 +2781,13 @@ "documentation":"

You have specified a delivery channel that does not exist.

", "exception":true }, + "NoSuchRetentionConfigurationException":{ + "type":"structure", + "members":{ + }, + "documentation":"

You have specified a retention configuration that does not exist.

", + "exception":true + }, "OrderingTimestamp":{"type":"timestamp"}, "OrganizationAccessDeniedException":{ "type":"structure", @@ -2718,7 +2810,7 @@ }, "AllAwsRegions":{ "shape":"Boolean", - "documentation":"

If true, aggreagate existing AWS Config regions and future regions.

" + "documentation":"

If true, aggregate existing AWS Config regions and future regions.

" } }, "documentation":"

This object contains regions to setup the aggregator and an IAM role to retrieve organization details.

" @@ -2869,6 +2961,25 @@ }, "documentation":"

" }, + "PutRetentionConfigurationRequest":{ + "type":"structure", + "required":["RetentionPeriodInDays"], + "members":{ + "RetentionPeriodInDays":{ + "shape":"RetentionPeriodInDays", + "documentation":"

Number of days AWS Config stores your historical information.

Currently, only applicable to the configuration item history.

" + } + } + }, + "PutRetentionConfigurationResponse":{ + "type":"structure", + "members":{ + "RetentionConfiguration":{ + "shape":"RetentionConfiguration", + "documentation":"

Returns a retention configuration object.

" + } + } + }, "RecorderName":{ "type":"string", "max":256, @@ -3087,7 +3198,8 @@ "AWS::ElasticBeanstalk::Application", "AWS::ElasticBeanstalk::ApplicationVersion", "AWS::ElasticBeanstalk::Environment", - "AWS::ElasticLoadBalancing::LoadBalancer" + "AWS::ElasticLoadBalancing::LoadBalancer", + "AWS::XRay::EncryptionConfig" ] }, "ResourceTypeList":{ @@ -3100,6 +3212,45 @@ "max":20, "min":0 }, + "RetentionConfiguration":{ + "type":"structure", + "required":[ + "Name", + "RetentionPeriodInDays" + ], + "members":{ + "Name":{ + "shape":"RetentionConfigurationName", + "documentation":"

The name of the retention configuration object.

" + }, + "RetentionPeriodInDays":{ + "shape":"RetentionPeriodInDays", + "documentation":"

Number of days AWS Config stores your historical information.

Currently, only applicable to the configuration item history.

" + } + }, + "documentation":"

An object with the name of the retention configuration and the retention period in days. The object stores the configuration for data retention in AWS Config.

" + }, + "RetentionConfigurationList":{ + "type":"list", + "member":{"shape":"RetentionConfiguration"} + }, + "RetentionConfigurationName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[\\w\\-]+" + }, + "RetentionConfigurationNameList":{ + "type":"list", + "member":{"shape":"RetentionConfigurationName"}, + "max":1, + "min":0 + }, + "RetentionPeriodInDays":{ + "type":"integer", + "max":2557, + "min":30 + }, "RuleLimit":{ "type":"integer", "max":50, diff --git a/botocore/data/cur/2017-01-06/service-2.json b/botocore/data/cur/2017-01-06/service-2.json index 300a9331..4ed705a8 100644 --- a/botocore/data/cur/2017-01-06/service-2.json +++ b/botocore/data/cur/2017-01-06/service-2.json @@ -6,6 +6,7 @@ "jsonVersion":"1.1", "protocol":"json", "serviceFullName":"AWS Cost and Usage Report Service", + "serviceId":"Cost and Usage Report Service", "signatureVersion":"v4", "signingName":"cur", "targetPrefix":"AWSOrigamiServiceGatewayService", diff --git a/botocore/data/datapipeline/2012-10-29/service-2.json b/botocore/data/datapipeline/2012-10-29/service-2.json index 8fdea8f3..55298952 100644 --- a/botocore/data/datapipeline/2012-10-29/service-2.json +++ b/botocore/data/datapipeline/2012-10-29/service-2.json @@ -5,6 +5,7 @@ "endpointPrefix":"datapipeline", "jsonVersion":"1.1", "serviceFullName":"AWS Data Pipeline", + "serviceId":"Data Pipeline", "signatureVersion":"v4", "targetPrefix":"DataPipeline", "protocol":"json", diff --git a/botocore/data/dax/2017-04-19/service-2.json b/botocore/data/dax/2017-04-19/service-2.json index 1dab8c8c..1c2cf5fc 100644 --- a/botocore/data/dax/2017-04-19/service-2.json +++ b/botocore/data/dax/2017-04-19/service-2.json @@ -7,6 +7,7 @@ "protocol":"json", "serviceAbbreviation":"Amazon DAX", "serviceFullName":"Amazon DynamoDB Accelerator (DAX)", + "serviceId":"DAX", "signatureVersion":"v4", "targetPrefix":"AmazonDAXV3", "uid":"dax-2017-04-19" diff --git a/botocore/data/devicefarm/2015-06-23/service-2.json b/botocore/data/devicefarm/2015-06-23/service-2.json index 3f563dc7..b81bc1dc 100644 --- a/botocore/data/devicefarm/2015-06-23/service-2.json +++ b/botocore/data/devicefarm/2015-06-23/service-2.json @@ -1386,6 +1386,10 @@ "billingMethod":{ "shape":"BillingMethod", "documentation":"

The billing method for the remote access session.

" + }, + "vpceConfigurationArns":{ + "shape":"AmazonResourceNames", + "documentation":"

An array of Amazon Resource Names (ARNs) included in the VPC endpoint configuration.

" } }, "documentation":"

Configuration settings for a remote access session, including billing method.

" diff --git a/botocore/data/directconnect/2012-10-25/service-2.json b/botocore/data/directconnect/2012-10-25/service-2.json index dd91dc19..15f3a185 100644 --- a/botocore/data/directconnect/2012-10-25/service-2.json +++ b/botocore/data/directconnect/2012-10-25/service-2.json @@ -6,6 +6,7 @@ "jsonVersion":"1.1", "protocol":"json", "serviceFullName":"AWS Direct Connect", + "serviceId":"Direct Connect", "signatureVersion":"v4", "targetPrefix":"OvertureService", "uid":"directconnect-2012-10-25" diff --git a/botocore/data/dms/2016-01-01/service-2.json b/botocore/data/dms/2016-01-01/service-2.json index 6892e071..f64e545c 100644 --- a/botocore/data/dms/2016-01-01/service-2.json +++ b/botocore/data/dms/2016-01-01/service-2.json @@ -895,9 +895,13 @@ "shape":"S3Settings", "documentation":"

Settings in JSON format for the target Amazon S3 endpoint. For more information about the available settings, see the Extra Connection Attributes section at Using Amazon S3 as a Target for AWS Database Migration Service.

" }, + "DmsTransferSettings":{ + "shape":"DmsTransferSettings", + "documentation":"

The settings in JSON format for the DMS Transfer type source endpoint.

Attributes include:

Shorthand syntax: ServiceAccessRoleArn=string ,BucketName=string,CompressionType=string

JSON syntax:

{ \"ServiceAccessRoleArn\": \"string\", \"BucketName\": \"string\", \"CompressionType\": \"none\"|\"gzip\" }

" + }, "MongoDbSettings":{ "shape":"MongoDbSettings", - "documentation":"

Settings in JSON format for the source MongoDB endpoint. For more information about the available settings, see the Configuration Properties When Using MongoDB as a Source for AWS Database Migration Service section at Using Amazon S3 as a Target for AWS Database Migration Service.

" + "documentation":"

Settings in JSON format for the source MongoDB endpoint. For more information about the available settings, see the Configuration Properties When Using MongoDB as a Source for AWS Database Migration Service section at Using MongoDB as a Target for AWS Database Migration Service.

" } }, "documentation":"

" @@ -1110,7 +1114,7 @@ }, "CdcStartTime":{ "shape":"TStamp", - "documentation":"

Indicates the start time for a change data capture (CDC) operation. Use either CdcStartTime or CdcStartPosition to specify when you want a CDC operation to start. Specifying both values results in an error.

" + "documentation":"

Indicates the start time for a change data capture (CDC) operation. Use either CdcStartTime or CdcStartPosition to specify when you want a CDC operation to start. Specifying both values results in an error.

Timestamp Example: --cdc-start-time “2018-03-08T12:12:12”

" }, "CdcStartPosition":{ "shape":"String", @@ -1814,6 +1818,20 @@ "verify-full" ] }, + "DmsTransferSettings":{ + "type":"structure", + "members":{ + "ServiceAccessRoleArn":{ + "shape":"String", + "documentation":"

The IAM role that has permission to access the Amazon S3 bucket.

" + }, + "BucketName":{ + "shape":"String", + "documentation":"

The name of the S3 bucket to use.

" + } + }, + "documentation":"

The settings in JSON format for the DMS Transfer type source endpoint.

" + }, "DynamoDbSettings":{ "type":"structure", "required":["ServiceAccessRoleArn"], @@ -1904,6 +1922,10 @@ "shape":"S3Settings", "documentation":"

The settings for the S3 target endpoint. For more information, see the S3Settings structure.

" }, + "DmsTransferSettings":{ + "shape":"DmsTransferSettings", + "documentation":"

The settings in JSON format for the DMS Transfer type source endpoint.

Attributes include:

Shorthand syntax: ServiceAccessRoleArn=string ,BucketName=string,CompressionType=string

JSON syntax:

{ \"ServiceAccessRoleArn\": \"string\", \"BucketName\": \"string\", \"CompressionType\": \"none\"|\"gzip\" }

" + }, "MongoDbSettings":{ "shape":"MongoDbSettings", "documentation":"

The settings for the MongoDB source endpoint. For more information, see the MongoDbSettings structure.

" @@ -2227,6 +2249,10 @@ "shape":"S3Settings", "documentation":"

Settings in JSON format for the target S3 endpoint. For more information about the available settings, see the Extra Connection Attributes section at Using Amazon S3 as a Target for AWS Database Migration Service.

" }, + "DmsTransferSettings":{ + "shape":"DmsTransferSettings", + "documentation":"

The settings in JSON format for the DMS Transfer type source endpoint.

Attributes include:

Shorthand syntax: ServiceAccessRoleArn=string ,BucketName=string,CompressionType=string

JSON syntax:

{ \"ServiceAccessRoleArn\": \"string\", \"BucketName\": \"string\", \"CompressionType\": \"none\"|\"gzip\" }

" + }, "MongoDbSettings":{ "shape":"MongoDbSettings", "documentation":"

Settings in JSON format for the source MongoDB endpoint. For more information about the available settings, see the Configuration Properties When Using MongoDB as a Source for AWS Database Migration Service section at Using Amazon S3 as a Target for AWS Database Migration Service.

" @@ -2400,7 +2426,7 @@ }, "CdcStartTime":{ "shape":"TStamp", - "documentation":"

Indicates the start time for a change data capture (CDC) operation. Use either CdcStartTime or CdcStartPosition to specify when you want a CDC operation to start. Specifying both values results in an error.

" + "documentation":"

Indicates the start time for a change data capture (CDC) operation. Use either CdcStartTime or CdcStartPosition to specify when you want a CDC operation to start. Specifying both values results in an error.

Timestamp Example: --cdc-start-time “2018-03-08T12:12:12”

" }, "CdcStartPosition":{ "shape":"String", @@ -2607,6 +2633,13 @@ "refreshing" ] }, + "ReloadOptionValue":{ + "type":"string", + "enum":[ + "data-reload", + "validate-only" + ] + }, "ReloadTablesMessage":{ "type":"structure", "required":[ @@ -2616,11 +2649,15 @@ "members":{ "ReplicationTaskArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the replication instance.

" + "documentation":"

The Amazon Resource Name (ARN) of the replication task.

" }, "TablesToReload":{ "shape":"TableListToReload", "documentation":"

The name and schema of the table to be reloaded.

" + }, + "ReloadOption":{ + "shape":"ReloadOptionValue", + "documentation":"

Options for reload. Specify data-reload to reload the data and re-validate it if validation is enabled. Specify validate-only to re-validate the table. This option applies only when validation is enabled for the task.

Valid values: data-reload, validate-only

Default value is data-reload.

" } } }, @@ -2862,7 +2899,7 @@ "members":{ "ReplicationTaskIdentifier":{ "shape":"String", - "documentation":"

The replication task identifier.

Constraints:

" + "documentation":"

The user-assigned replication task identifier or name.

Constraints:

" }, "SourceEndpointArn":{ "shape":"String", @@ -3146,7 +3183,7 @@ }, "CdcStartTime":{ "shape":"TStamp", - "documentation":"

Indicates the start time for a change data capture (CDC) operation. Use either CdcStartTime or CdcStartPosition to specify when you want a CDC operation to start. Specifying both values results in an error.

" + "documentation":"

Indicates the start time for a change data capture (CDC) operation. Use either CdcStartTime or CdcStartPosition to specify when you want a CDC operation to start. Specifying both values results in an error.

Timestamp Example: --cdc-start-time “2018-03-08T12:12:12”

" }, "CdcStartPosition":{ "shape":"String", @@ -3340,6 +3377,10 @@ "ValidationState":{ "shape":"String", "documentation":"

The validation state of the table.

The parameter can have the following values

" + }, + "ValidationStateDetails":{ + "shape":"String", + "documentation":"

Additional details about the state of validation.

" } }, "documentation":"

" diff --git a/botocore/data/ds/2015-04-16/service-2.json b/botocore/data/ds/2015-04-16/service-2.json index 622a0382..dd153440 100644 --- a/botocore/data/ds/2015-04-16/service-2.json +++ b/botocore/data/ds/2015-04-16/service-2.json @@ -585,6 +585,25 @@ ], "documentation":"

Removes tags from a directory.

" }, + "ResetUserPassword":{ + "name":"ResetUserPassword", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetUserPasswordRequest"}, + "output":{"shape":"ResetUserPasswordResult"}, + "errors":[ + {"shape":"DirectoryUnavailableException"}, + {"shape":"UserDoesNotExistException"}, + {"shape":"InvalidPasswordException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"EntityDoesNotExistException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ], + "documentation":"

Resets the password for any user in your AWS Managed Microsoft AD or Simple AD directory.

" + }, "RestoreFromSnapshot":{ "name":"RestoreFromSnapshot", "http":{ @@ -1204,6 +1223,12 @@ "documentation":"

The result of a CreateTrust request.

" }, "CreatedDateTime":{"type":"timestamp"}, + "CustomerUserName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^(?!.*\\\\|.*\"|.*\\/|.*\\[|.*\\]|.*:|.*;|.*\\||.*=|.*,|.*\\+|.*\\*|.*\\?|.*<|.*>|.*@).*$" + }, "DeleteAssociatedConditionalForwarder":{"type":"boolean"}, "DeleteConditionalForwarderRequest":{ "type":"structure", @@ -2114,6 +2139,15 @@ "documentation":"

One or more parameters are not valid.

", "exception":true }, + "InvalidPasswordException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "documentation":"

The new password provided by the user does not meet the password complexity requirements defined in your directory.

", + "exception":true + }, "IpAddr":{ "type":"string", "pattern":"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$" @@ -2477,6 +2511,33 @@ "documentation":"

The AWS request identifier.

", "pattern":"^([A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12})$" }, + "ResetUserPasswordRequest":{ + "type":"structure", + "required":[ + "DirectoryId", + "UserName", + "NewPassword" + ], + "members":{ + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"

Identifier of the AWS Managed Microsoft AD or Simple AD directory in which the user resides.

" + }, + "UserName":{ + "shape":"CustomerUserName", + "documentation":"

The username of the user whose password will be reset.

" + }, + "NewPassword":{ + "shape":"UserPassword", + "documentation":"

The new password that will be reset.

" + } + } + }, + "ResetUserPasswordResult":{ + "type":"structure", + "members":{ + } + }, "ResourceId":{ "type":"string", "pattern":"^[d]-[0-9a-f]{10}$" @@ -2968,11 +3029,26 @@ }, "UpdateSecurityGroupForDirectoryControllers":{"type":"boolean"}, "UseSameUsername":{"type":"boolean"}, + "UserDoesNotExistException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "documentation":"

The user provided a username that does not exist in your directory.

", + "exception":true + }, "UserName":{ "type":"string", "min":1, "pattern":"[a-zA-Z0-9._-]+" }, + "UserPassword":{ + "type":"string", + "max":127, + "min":1, + "sensitive":true + }, "VerifyTrustRequest":{ "type":"structure", "required":["TrustId"], diff --git a/botocore/data/dynamodb/2012-08-10/service-2.json b/botocore/data/dynamodb/2012-08-10/service-2.json index 03acc805..182b0c6b 100644 --- a/botocore/data/dynamodb/2012-08-10/service-2.json +++ b/botocore/data/dynamodb/2012-08-10/service-2.json @@ -76,7 +76,7 @@ {"shape":"GlobalTableAlreadyExistsException"}, {"shape":"TableNotFoundException"} ], - "documentation":"

Creates a global table from an existing table. A global table creates a replication relationship between two or more DynamoDB tables with the same table name in the provided regions.

Tables can only be added as the replicas of a global table group under the following conditions:

If global secondary indexes are specified, then the following conditions must also be met:

" + "documentation":"

Creates a global table from an existing table. A global table creates a replication relationship between two or more DynamoDB tables with the same table name in the provided regions.

If you want to add a new replica table to a global table, each of the following conditions must be true:

If global secondary indexes are specified, then the following conditions must also be met:

Write capacity settings should be set consistently across your replica tables and secondary indexes. DynamoDB strongly recommends enabling auto scaling to manage the write capacity settings for all of your global tables replicas and indexes.

If you prefer to manage write capacity settings manually, you should provision equal replicated write capacity units to your replica tables. You should also provision equal replicated write capacity units to matching secondary indexes across your global table.

" }, "CreateTable":{ "name":"CreateTable", @@ -1305,7 +1305,7 @@ "members":{ "ContinuousBackupsDescription":{ "shape":"ContinuousBackupsDescription", - "documentation":"

ContinuousBackupsDescription can be one of the following : ENABLED, DISABLED.

" + "documentation":"

Represents the continuous backups and point in time recovery settings on the table.

" } } }, @@ -1826,6 +1826,7 @@ "type":"list", "member":{"shape":"AttributeMap"} }, + "KMSMasterKeyArn":{"type":"string"}, "Key":{ "type":"map", "key":{"shape":"AttributeName"}, @@ -1914,7 +1915,7 @@ "documentation":"

Too many operations for a given subscriber.

" } }, - "documentation":"

Up to 50 CreateBackup operations are allowed per second, per account. There is no limit to the number of daily on-demand backups that can be taken.

Up to 10 simultaneous table operations are allowed per account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, and RestoreTableToPointInTime.

For tables with secondary indexes, only one of those tables can be in the CREATING state at any point in time. Do not attempt to create more than one such table simultaneously.

The total limit of tables in the ACTIVE state is 250.

", + "documentation":"

There is no limit to the number of daily on-demand backups that can be taken.

Up to 10 simultaneous table operations are allowed per account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, and RestoreTableToPointInTime.

For tables with secondary indexes, only one of those tables can be in the CREATING state at any point in time. Do not attempt to create more than one such table simultaneously.

The total limit of tables in the ACTIVE state is 250.

", "exception":true }, "ListAttributeValue":{ @@ -1942,7 +1943,7 @@ }, "ExclusiveStartBackupArn":{ "shape":"BackupArn", - "documentation":"

LastEvaluatedBackupARN returned by the previous ListBackups call.

" + "documentation":"

LastEvaluatedBackupArn is the ARN of the backup last evaluated when the current page of results was returned, inclusive of the current page of results. This value may be specified as the ExclusiveStartBackupArn of a new ListBackups operation in order to fetch the next page of results.

" } } }, @@ -1955,7 +1956,7 @@ }, "LastEvaluatedBackupArn":{ "shape":"BackupArn", - "documentation":"

Last evaluated BackupARN.

" + "documentation":"

The ARN of the backup last evaluated when the current page of results was returned, inclusive of the current page of results. This value may be specified as the ExclusiveStartBackupArn of a new ListBackups operation in order to fetch the next page of results.

If LastEvaluatedBackupArn is empty, then the last page of results has been processed and there are no more results to be retrieved.

If LastEvaluatedBackupArn is not empty, this may or may not indicate there is more data to be returned. All results are guaranteed to have been returned if and only if no value for LastEvaluatedBackupArn is returned.

" } } }, @@ -2787,6 +2788,14 @@ "Status":{ "shape":"SSEStatus", "documentation":"

The current state of server-side encryption:

" + }, + "SSEType":{ + "shape":"SSEType", + "documentation":"

Server-side encryption type:

" + }, + "KMSMasterKeyArn":{ + "shape":"KMSMasterKeyArn", + "documentation":"

The KMS master key ARN used for the KMS encryption.

" } }, "documentation":"

The description of the server-side encryption status on the specified table.

" @@ -2812,6 +2821,13 @@ "DISABLED" ] }, + "SSEType":{ + "type":"string", + "enum":[ + "AES256", + "KMS" + ] + }, "ScalarAttributeType":{ "type":"string", "enum":[ diff --git a/botocore/data/dynamodbstreams/2012-08-10/service-2.json b/botocore/data/dynamodbstreams/2012-08-10/service-2.json index 699e8aec..c8112d65 100644 --- a/botocore/data/dynamodbstreams/2012-08-10/service-2.json +++ b/botocore/data/dynamodbstreams/2012-08-10/service-2.json @@ -6,6 +6,7 @@ "jsonVersion":"1.0", "protocol":"json", "serviceFullName":"Amazon DynamoDB Streams", + "serviceId":"DynamoDB Streams", "signatureVersion":"v4", "signingName":"dynamodb", "targetPrefix":"DynamoDBStreams_20120810", diff --git a/botocore/data/ec2/2016-11-15/service-2.json b/botocore/data/ec2/2016-11-15/service-2.json index c92e9fba..9383bbc9 100644 --- a/botocore/data/ec2/2016-11-15/service-2.json +++ b/botocore/data/ec2/2016-11-15/service-2.json @@ -60,7 +60,7 @@ }, "input":{"shape":"AllocateHostsRequest"}, "output":{"shape":"AllocateHostsResult"}, - "documentation":"

Allocates a Dedicated Host to your account. At minimum you need to specify the instance size type, Availability Zone, and quantity of hosts you want to allocate.

" + "documentation":"

Allocates a Dedicated Host to your account. At a minimum, specify the instance size type, Availability Zone, and quantity of hosts to allocate.

" }, "AssignIpv6Addresses":{ "name":"AssignIpv6Addresses", @@ -177,7 +177,7 @@ }, "input":{"shape":"AttachVolumeRequest"}, "output":{"shape":"VolumeAttachment"}, - "documentation":"

Attaches an EBS volume to a running or stopped instance and exposes it to the instance with the specified device name.

Encrypted EBS volumes may only be attached to instances that support Amazon EBS encryption. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

For a list of supported device names, see Attaching an EBS Volume to an Instance. Any device names that aren't reserved for instance store volumes can be used for EBS volumes. For more information, see Amazon EC2 Instance Store in the Amazon Elastic Compute Cloud User Guide.

If a volume has an AWS Marketplace product code:

For an overview of the AWS Marketplace, see Introducing AWS Marketplace.

For more information about EBS volumes, see Attaching Amazon EBS Volumes in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Attaches an EBS volume to a running or stopped instance and exposes it to the instance with the specified device name.

Encrypted EBS volumes may only be attached to instances that support Amazon EBS encryption. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

For a list of supported device names, see Attaching an EBS Volume to an Instance. Any device names that aren't reserved for instance store volumes can be used for EBS volumes. For more information, see Amazon EC2 Instance Store in the Amazon Elastic Compute Cloud User Guide.

If a volume has an AWS Marketplace product code:

For more information about EBS volumes, see Attaching Amazon EBS Volumes in the Amazon Elastic Compute Cloud User Guide.

" }, "AttachVpnGateway":{ "name":"AttachVpnGateway", @@ -283,7 +283,7 @@ }, "input":{"shape":"CancelSpotInstanceRequestsRequest"}, "output":{"shape":"CancelSpotInstanceRequestsResult"}, - "documentation":"

Cancels one or more Spot Instance requests. Spot Instances are instances that Amazon EC2 starts on your behalf when the maximum price that you specify exceeds the current Spot price. For more information, see Spot Instance Requests in the Amazon EC2 User Guide for Linux Instances.

Canceling a Spot Instance request does not terminate running Spot Instances associated with the request.

" + "documentation":"

Cancels one or more Spot Instance requests.

Canceling a Spot Instance request does not terminate running Spot Instances associated with the request.

" }, "ConfirmProductInstance":{ "name":"ConfirmProductInstance", @@ -323,7 +323,7 @@ }, "input":{"shape":"CopySnapshotRequest"}, "output":{"shape":"CopySnapshotResult"}, - "documentation":"

Copies a point-in-time snapshot of an EBS volume and stores it in Amazon S3. You can copy the snapshot within the same region or from one region to another. You can use the snapshot to create EBS volumes or Amazon Machine Images (AMIs). The snapshot is copied to the regional endpoint that you send the HTTP request to.

Copies of encrypted EBS snapshots remain encrypted. Copies of unencrypted snapshots remain unencrypted, unless the Encrypted flag is specified during the snapshot copy operation. By default, encrypted snapshot copies use the default AWS Key Management Service (AWS KMS) customer master key (CMK); however, you can specify a non-default CMK with the KmsKeyId parameter.

To copy an encrypted snapshot that has been shared from another account, you must have permissions for the CMK used to encrypt the snapshot.

Snapshots created by the CopySnapshot action have an arbitrary volume ID that should not be used for any purpose.

For more information, see Copying an Amazon EBS Snapshot in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Copies a point-in-time snapshot of an EBS volume and stores it in Amazon S3. You can copy the snapshot within the same region or from one region to another. You can use the snapshot to create EBS volumes or Amazon Machine Images (AMIs). The snapshot is copied to the regional endpoint that you send the HTTP request to.

Copies of encrypted EBS snapshots remain encrypted. Copies of unencrypted snapshots remain unencrypted, unless the Encrypted flag is specified during the snapshot copy operation. By default, encrypted snapshot copies use the default AWS Key Management Service (AWS KMS) customer master key (CMK); however, you can specify a non-default CMK with the KmsKeyId parameter.

To copy an encrypted snapshot that has been shared from another account, you must have permissions for the CMK used to encrypt the snapshot.

Snapshots created by copying another snapshot have an arbitrary volume ID that should not be used for any purpose.

For more information, see Copying an Amazon EBS Snapshot in the Amazon Elastic Compute Cloud User Guide.

" }, "CreateCustomerGateway":{ "name":"CreateCustomerGateway", @@ -383,7 +383,7 @@ }, "input":{"shape":"CreateFleetRequest"}, "output":{"shape":"CreateFleetResult"}, - "documentation":"

Launches an EC2 Fleet.

You can create a single EC2 Fleet that includes multiple launch specifications that vary by instance type, AMI, Availability Zone, or subnet.

For more information, see Launching an EC2 Fleet in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Launches an EC2 Fleet.

You can create a single EC2 Fleet that includes multiple launch specifications that vary by instance type, AMI, Availability Zone, or subnet.

For more information, see Launching an EC2 Fleet in the Amazon Elastic Compute Cloud User Guide.

" }, "CreateFlowLogs":{ "name":"CreateFlowLogs", @@ -473,7 +473,7 @@ }, "input":{"shape":"CreateNatGatewayRequest"}, "output":{"shape":"CreateNatGatewayResult"}, - "documentation":"

Creates a NAT gateway in the specified subnet. A NAT gateway can be used to enable instances in a private subnet to connect to the Internet. This action creates a network interface in the specified subnet with a private IP address from the IP address range of the subnet. For more information, see NAT Gateways in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Creates a NAT gateway in the specified public subnet. This action creates a network interface in the specified subnet with a private IP address from the IP address range of the subnet. Internet-bound traffic from a private subnet can be routed to the NAT gateway, therefore enabling instances in the private subnet to connect to the internet. For more information, see NAT Gateways in the Amazon Virtual Private Cloud User Guide.

" }, "CreateNetworkAcl":{ "name":"CreateNetworkAcl", @@ -512,7 +512,7 @@ }, "input":{"shape":"CreateNetworkInterfacePermissionRequest"}, "output":{"shape":"CreateNetworkInterfacePermissionResult"}, - "documentation":"

Grants an AWS authorized partner account permission to attach the specified network interface to an instance in their account.

You can grant permission to a single AWS account only, and only one account at a time.

" + "documentation":"

Grants an AWS-authorized account permission to attach the specified network interface to an instance in their account.

You can grant permission to a single AWS account only, and only one account at a time.

" }, "CreatePlacementGroup":{ "name":"CreatePlacementGroup", @@ -571,7 +571,7 @@ }, "input":{"shape":"CreateSnapshotRequest"}, "output":{"shape":"Snapshot"}, - "documentation":"

Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use snapshots for backups, to make copies of EBS volumes, and to save data before shutting down an instance.

When a snapshot is created, any AWS Marketplace product codes that are associated with the source volume are propagated to the snapshot.

You can take a snapshot of an attached volume that is in use. However, snapshots only capture data that has been written to your EBS volume at the time the snapshot command is issued; this may exclude any data that has been cached by any applications or the operating system. If you can pause any file systems on the volume long enough to take a snapshot, your snapshot should be complete. However, if you cannot pause all file writes to the volume, you should unmount the volume from within the instance, issue the snapshot command, and then remount the volume to ensure a consistent and complete snapshot. You may remount and use your volume while the snapshot status is pending.

To create a snapshot for EBS volumes that serve as root devices, you should stop the instance before taking the snapshot.

Snapshots that are taken from encrypted volumes are automatically encrypted. Volumes that are created from encrypted snapshots are also automatically encrypted. Your encrypted volumes and any associated snapshots always remain protected.

You can tag your snapshots during creation. For more information, see Tagging Your Amazon EC2 Resources.

For more information, see Amazon Elastic Block Store and Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use snapshots for backups, to make copies of EBS volumes, and to save data before shutting down an instance.

When a snapshot is created, any AWS Marketplace product codes that are associated with the source volume are propagated to the snapshot.

You can take a snapshot of an attached volume that is in use. However, snapshots only capture data that has been written to your EBS volume at the time the snapshot command is issued; this may exclude any data that has been cached by any applications or the operating system. If you can pause any file systems on the volume long enough to take a snapshot, your snapshot should be complete. However, if you cannot pause all file writes to the volume, you should unmount the volume from within the instance, issue the snapshot command, and then remount the volume to ensure a consistent and complete snapshot. You may remount and use your volume while the snapshot status is pending.

To create a snapshot for EBS volumes that serve as root devices, you should stop the instance before taking the snapshot.

Snapshots that are taken from encrypted volumes are automatically encrypted. Volumes that are created from encrypted snapshots are also automatically encrypted. Your encrypted volumes and any associated snapshots always remain protected.

You can tag your snapshots during creation. For more information, see Tagging Your Amazon EC2 Resources in the Amazon Elastic Compute Cloud User Guide.

For more information, see Amazon Elastic Block Store and Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

" }, "CreateSpotDatafeedSubscription":{ "name":"CreateSpotDatafeedSubscription", @@ -610,7 +610,7 @@ }, "input":{"shape":"CreateVolumeRequest"}, "output":{"shape":"Volume"}, - "documentation":"

Creates an EBS volume that can be attached to an instance in the same Availability Zone. The volume is created in the regional endpoint that you send the HTTP request to. For more information see Regions and Endpoints.

You can create a new empty volume or restore a volume from an EBS snapshot. Any AWS Marketplace product codes from the snapshot are propagated to the volume.

You can create encrypted volumes with the Encrypted parameter. Encrypted volumes may only be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are also automatically encrypted. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

You can tag your volumes during creation. For more information, see Tagging Your Amazon EC2 Resources.

For more information, see Creating an Amazon EBS Volume in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Creates an EBS volume that can be attached to an instance in the same Availability Zone. The volume is created in the regional endpoint that you send the HTTP request to. For more information see Regions and Endpoints.

You can create a new empty volume or restore a volume from an EBS snapshot. Any AWS Marketplace product codes from the snapshot are propagated to the volume.

You can create encrypted volumes with the Encrypted parameter. Encrypted volumes may only be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are also automatically encrypted. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

You can tag your volumes during creation. For more information, see Tagging Your Amazon EC2 Resources in the Amazon Elastic Compute Cloud User Guide.

For more information, see Creating an Amazon EBS Volume in the Amazon Elastic Compute Cloud User Guide.

" }, "CreateVpc":{ "name":"CreateVpc", @@ -727,7 +727,7 @@ }, "input":{"shape":"DeleteFleetsRequest"}, "output":{"shape":"DeleteFleetsResult"}, - "documentation":"

Deletes the specified EC2 Fleet.

After you delete an EC2 Fleet, the EC2 Fleet launches no new instances. You must specify whether the EC2 Fleet should also terminate its instances. If you terminate the instances, the EC2 Fleet enters the deleted_terminating state. Otherwise, the EC2 Fleet enters the deleted_running state, and the instances continue to run until they are interrupted or you terminate them manually.

" + "documentation":"

Deletes the specified EC2 Fleet.

After you delete an EC2 Fleet, it launches no new instances. You must specify whether an EC2 Fleet should also terminate its instances. If you terminate the instances, the EC2 Fleet enters the deleted_terminating state. Otherwise, the EC2 Fleet enters the deleted_running state, and the instances continue to run until they are interrupted or you terminate them manually.

" }, "DeleteFlowLogs":{ "name":"DeleteFlowLogs", @@ -913,7 +913,7 @@ "requestUri":"/" }, "input":{"shape":"DeleteVolumeRequest"}, - "documentation":"

Deletes the specified EBS volume. The volume must be in the available state (not attached to an instance).

The volume may remain in the deleting state for several minutes.

For more information, see Deleting an Amazon EBS Volume in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Deletes the specified EBS volume. The volume must be in the available state (not attached to an instance).

The volume can remain in the deleting state for several minutes.

For more information, see Deleting an Amazon EBS Volume in the Amazon Elastic Compute Cloud User Guide.

" }, "DeleteVpc":{ "name":"DeleteVpc", @@ -1148,7 +1148,7 @@ }, "input":{"shape":"DescribeFleetsRequest"}, "output":{"shape":"DescribeFleetsResult"}, - "documentation":"

Describes the specified EC2 Fleet.

" + "documentation":"

Describes one or more of your EC2 Fleet.

" }, "DescribeFlowLogs":{ "name":"DescribeFlowLogs", @@ -1438,7 +1438,7 @@ }, "input":{"shape":"DescribePrefixListsRequest"}, "output":{"shape":"DescribePrefixListsResult"}, - "documentation":"

Describes available AWS services in a prefix list format, which includes the prefix list name and prefix list ID of the service and the IP address range for the service. A prefix list ID is required for creating an outbound security group rule that allows traffic from a VPC to access an AWS service through a gateway VPC endpoint.

" + "documentation":"

Describes available AWS services in a prefix list format, which includes the prefix list name and prefix list ID of the service and the IP address range for the service. A prefix list ID is required for creating an outbound security group rule that allows traffic from a VPC to access an AWS service through a gateway VPC endpoint. Currently, the services that support this action are Amazon S3 and Amazon DynamoDB.

" }, "DescribePrincipalIdFormat":{ "name":"DescribePrincipalIdFormat", @@ -1618,7 +1618,7 @@ }, "input":{"shape":"DescribeSpotInstanceRequestsRequest"}, "output":{"shape":"DescribeSpotInstanceRequestsResult"}, - "documentation":"

Describes the Spot Instance requests that belong to your account. Spot Instances are instances that Amazon EC2 launches when the Spot price that you specify exceeds the current Spot price. For more information, see Spot Instance Requests in the Amazon EC2 User Guide for Linux Instances.

You can use DescribeSpotInstanceRequests to find a running Spot Instance by examining the response. If the status of the Spot Instance is fulfilled, the instance ID appears in the response and contains the identifier of the instance. Alternatively, you can use DescribeInstances with a filter to look for instances where the instance lifecycle is spot.

Spot Instance requests are deleted four hours after they are canceled and their instances are terminated.

" + "documentation":"

Describes the specified Spot Instance requests.

You can use DescribeSpotInstanceRequests to find a running Spot Instance by examining the response. If the status of the Spot Instance is fulfilled, the instance ID appears in the response and contains the identifier of the instance. Alternatively, you can use DescribeInstances with a filter to look for instances where the instance lifecycle is spot.

Spot Instance requests are deleted four hours after they are canceled and their instances are terminated.

" }, "DescribeSpotPriceHistory":{ "name":"DescribeSpotPriceHistory", @@ -1678,7 +1678,7 @@ }, "input":{"shape":"DescribeVolumeStatusRequest"}, "output":{"shape":"DescribeVolumeStatusResult"}, - "documentation":"

Describes the status of the specified volumes. Volume status provides the result of the checks performed on your volumes to determine events that can impair the performance of your volumes. The performance of a volume can be affected if an issue occurs on the volume's underlying host. If the volume's underlying host experiences a power outage or system issue, after the system is restored, there could be data inconsistencies on the volume. Volume events notify you if this occurs. Volume actions notify you if any action needs to be taken in response to the event.

The DescribeVolumeStatus operation provides the following information about the specified volumes:

Status: Reflects the current status of the volume. The possible values are ok, impaired , warning, or insufficient-data. If all checks pass, the overall status of the volume is ok. If the check fails, the overall status is impaired. If the status is insufficient-data, then the checks may still be taking place on your volume at the time. We recommend that you retry the request. For more information on volume status, see Monitoring the Status of Your Volumes.

Events: Reflect the cause of a volume status and may require you to take action. For example, if your volume returns an impaired status, then the volume event might be potential-data-inconsistency. This means that your volume has been affected by an issue with the underlying host, has all I/O operations disabled, and may have inconsistent data.

Actions: Reflect the actions you may have to take in response to an event. For example, if the status of the volume is impaired and the volume event shows potential-data-inconsistency, then the action shows enable-volume-io. This means that you may want to enable the I/O operations for the volume by calling the EnableVolumeIO action and then check the volume for data consistency.

Volume status is based on the volume status checks, and does not reflect the volume state. Therefore, volume status does not indicate volumes in the error state (for example, when a volume is incapable of accepting I/O.)

" + "documentation":"

Describes the status of the specified volumes. Volume status provides the result of the checks performed on your volumes to determine events that can impair the performance of your volumes. The performance of a volume can be affected if an issue occurs on the volume's underlying host. If the volume's underlying host experiences a power outage or system issue, after the system is restored, there could be data inconsistencies on the volume. Volume events notify you if this occurs. Volume actions notify you if any action needs to be taken in response to the event.

The DescribeVolumeStatus operation provides the following information about the specified volumes:

Status: Reflects the current status of the volume. The possible values are ok, impaired , warning, or insufficient-data. If all checks pass, the overall status of the volume is ok. If the check fails, the overall status is impaired. If the status is insufficient-data, then the checks may still be taking place on your volume at the time. We recommend that you retry the request. For more information about volume status, see Monitoring the Status of Your Volumes in the Amazon Elastic Compute Cloud User Guide.

Events: Reflect the cause of a volume status and may require you to take action. For example, if your volume returns an impaired status, then the volume event might be potential-data-inconsistency. This means that your volume has been affected by an issue with the underlying host, has all I/O operations disabled, and may have inconsistent data.

Actions: Reflect the actions you may have to take in response to an event. For example, if the status of the volume is impaired and the volume event shows potential-data-inconsistency, then the action shows enable-volume-io. This means that you may want to enable the I/O operations for the volume by calling the EnableVolumeIO action and then check the volume for data consistency.

Volume status is based on the volume status checks, and does not reflect the volume state. Therefore, volume status does not indicate volumes in the error state (for example, when a volume is incapable of accepting I/O.)

" }, "DescribeVolumes":{ "name":"DescribeVolumes", @@ -1698,7 +1698,7 @@ }, "input":{"shape":"DescribeVolumesModificationsRequest"}, "output":{"shape":"DescribeVolumesModificationsResult"}, - "documentation":"

Reports the current modification status of EBS volumes.

Current-generation EBS volumes support modification of attributes including type, size, and (for io1 volumes) IOPS provisioning while either attached to or detached from an instance. Following an action from the API or the console to modify a volume, the status of the modification may be modifying, optimizing, completed, or failed. If a volume has never been modified, then certain elements of the returned VolumeModification objects are null.

You can also use CloudWatch Events to check the status of a modification to an EBS volume. For information about CloudWatch Events, see the Amazon CloudWatch Events User Guide. For more information, see Monitoring Volume Modifications\".

" + "documentation":"

Reports the current modification status of EBS volumes.

Current-generation EBS volumes support modification of attributes including type, size, and (for io1 volumes) IOPS provisioning while either attached to or detached from an instance. Following an action from the API or the console to modify a volume, the status of the modification may be modifying, optimizing, completed, or failed. If a volume has never been modified, then certain elements of the returned VolumeModification objects are null.

You can also use CloudWatch Events to check the status of a modification to an EBS volume. For information about CloudWatch Events, see the Amazon CloudWatch Events User Guide. For more information, see Monitoring Volume Modifications\" in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribeVpcAttribute":{ "name":"DescribeVpcAttribute", @@ -2000,7 +2000,7 @@ }, "input":{"shape":"GetConsoleOutputRequest"}, "output":{"shape":"GetConsoleOutputResult"}, - "documentation":"

Gets the console output for the specified instance.

Instances do not have a physical monitor through which you can view their console output. They also lack physical controls that allow you to power up, reboot, or shut them down. To allow these actions, we provide them through the Amazon EC2 API and command line interface.

Instance console output is buffered and posted shortly after instance boot, reboot, and termination. Amazon EC2 preserves the most recent 64 KB output, which is available for at least one hour after the most recent post.

For Linux instances, the instance console output displays the exact console output that would normally be displayed on a physical monitor attached to a computer. This output is buffered because the instance produces it and then posts it to a store where the instance's owner can retrieve it.

For Windows instances, the instance console output includes output from the EC2Config service.

" + "documentation":"

Gets the console output for the specified instance. For Linux instances, the instance console output displays the exact console output that would normally be displayed on a physical monitor attached to a computer. For Windows instances, the instance console output includes output from the EC2Config service.

GetConsoleOutput returns up to 64 KB of console output shortly after it's generated by the instance.

By default, the console output returns buffered information that was posted shortly after an instance transition state (start, stop, reboot, or terminate). This information is available for at least one hour after the most recent post.

You can optionally retrieve the latest serial console output at any time during the instance lifecycle. This option is only supported on C5, M5, and i3.metal instances.

" }, "GetConsoleScreenshot":{ "name":"GetConsoleScreenshot", @@ -2224,7 +2224,7 @@ "requestUri":"/" }, "input":{"shape":"ModifySnapshotAttributeRequest"}, - "documentation":"

Adds or removes permission settings for the specified snapshot. You may add or remove specified AWS account IDs from a snapshot's list of create volume permissions, but you cannot do both in a single API call. If you need to both add and remove account IDs for a snapshot, you must use multiple API calls.

Encrypted snapshots and snapshots with AWS Marketplace product codes cannot be made public. Snapshots encrypted with your default CMK cannot be shared with other accounts.

For more information on modifying snapshot permissions, see Sharing Snapshots in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Adds or removes permission settings for the specified snapshot. You may add or remove specified AWS account IDs from a snapshot's list of create volume permissions, but you cannot do both in a single API call. If you need to both add and remove account IDs for a snapshot, you must use multiple API calls.

Encrypted snapshots and snapshots with AWS Marketplace product codes cannot be made public. Snapshots encrypted with your default CMK cannot be shared with other accounts.

For more information about modifying snapshot permissions, see Sharing Snapshots in the Amazon Elastic Compute Cloud User Guide.

" }, "ModifySpotFleetRequest":{ "name":"ModifySpotFleetRequest", @@ -2253,7 +2253,7 @@ }, "input":{"shape":"ModifyVolumeRequest"}, "output":{"shape":"ModifyVolumeResult"}, - "documentation":"

You can modify several parameters of an existing EBS volume, including volume size, volume type, and IOPS capacity. If your EBS volume is attached to a current-generation EC2 instance type, you may be able to apply these changes without stopping the instance or detaching the volume from it. For more information about modifying an EBS volume running Linux, see Modifying the Size, IOPS, or Type of an EBS Volume on Linux. For more information about modifying an EBS volume running Windows, see Modifying the Size, IOPS, or Type of an EBS Volume on Windows.

When you complete a resize operation on your volume, you need to extend the volume's file-system size to take advantage of the new storage capacity. For information about extending a Linux file system, see Extending a Linux File System. For information about extending a Windows file system, see Extending a Windows File System.

You can use CloudWatch Events to check the status of a modification to an EBS volume. For information about CloudWatch Events, see the Amazon CloudWatch Events User Guide. You can also track the status of a modification using the DescribeVolumesModifications API. For information about tracking status changes using either method, see Monitoring Volume Modifications.

With previous-generation instance types, resizing an EBS volume may require detaching and reattaching the volume or stopping and restarting the instance. For more information about modifying an EBS volume running Linux, see Modifying the Size, IOPS, or Type of an EBS Volume on Linux. For more information about modifying an EBS volume running Windows, see Modifying the Size, IOPS, or Type of an EBS Volume on Windows.

If you reach the maximum volume modification rate per volume limit, you will need to wait at least six hours before applying further modifications to the affected EBS volume.

" + "documentation":"

You can modify several parameters of an existing EBS volume, including volume size, volume type, and IOPS capacity. If your EBS volume is attached to a current-generation EC2 instance type, you may be able to apply these changes without stopping the instance or detaching the volume from it. For more information about modifying an EBS volume running Linux, see Modifying the Size, IOPS, or Type of an EBS Volume on Linux. For more information about modifying an EBS volume running Windows, see Modifying the Size, IOPS, or Type of an EBS Volume on Windows.

When you complete a resize operation on your volume, you need to extend the volume's file-system size to take advantage of the new storage capacity. For information about extending a Linux file system, see Extending a Linux File System. For information about extending a Windows file system, see Extending a Windows File System.

You can use CloudWatch Events to check the status of a modification to an EBS volume. For information about CloudWatch Events, see the Amazon CloudWatch Events User Guide. You can also track the status of a modification using the DescribeVolumesModifications API. For information about tracking status changes using either method, see Monitoring Volume Modifications.

With previous-generation instance types, resizing an EBS volume may require detaching and reattaching the volume or stopping and restarting the instance. For more information, see Modifying the Size, IOPS, or Type of an EBS Volume on Linux and Modifying the Size, IOPS, or Type of an EBS Volume on Windows.

If you reach the maximum volume modification rate per volume limit, you will need to wait at least six hours before applying further modifications to the affected EBS volume.

" }, "ModifyVolumeAttribute":{ "name":"ModifyVolumeAttribute", @@ -2516,7 +2516,7 @@ }, "input":{"shape":"RequestSpotInstancesRequest"}, "output":{"shape":"RequestSpotInstancesResult"}, - "documentation":"

Creates a Spot Instance request. Spot Instances are instances that Amazon EC2 launches when the maximum price that you specify exceeds the current Spot price. For more information, see Spot Instance Requests in the Amazon EC2 User Guide for Linux Instances.

" + "documentation":"

Creates a Spot Instance request.

For more information, see Spot Instance Requests in the Amazon EC2 User Guide for Linux Instances.

" }, "ResetFpgaImageAttribute":{ "name":"ResetFpgaImageAttribute", @@ -2562,7 +2562,7 @@ "requestUri":"/" }, "input":{"shape":"ResetSnapshotAttributeRequest"}, - "documentation":"

Resets permission settings for the specified snapshot.

For more information on modifying snapshot permissions, see Sharing Snapshots in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Resets permission settings for the specified snapshot.

For more information about modifying snapshot permissions, see Sharing Snapshots in the Amazon Elastic Compute Cloud User Guide.

" }, "RestoreAddressToClassic":{ "name":"RestoreAddressToClassic", @@ -3006,17 +3006,17 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier you provide to ensure idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon Elastic Compute Cloud User Guide.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon Elastic Compute Cloud User Guide.

", "locationName":"clientToken" }, "InstanceType":{ "shape":"String", - "documentation":"

Specify the instance type that you want your Dedicated Hosts to be configured for. When you specify the instance type, that is the only instance type that you can launch onto that host.

", + "documentation":"

Specify the instance type for which to configure your Dedicated Hosts. When you specify the instance type, that is the only instance type that you can launch onto that host.

", "locationName":"instanceType" }, "Quantity":{ "shape":"Integer", - "documentation":"

The number of Dedicated Hosts you want to allocate to your account with these parameters.

", + "documentation":"

The number of Dedicated Hosts to allocate to your account with these parameters.

", "locationName":"quantity" } }, @@ -3027,7 +3027,7 @@ "members":{ "HostIds":{ "shape":"ResponseHostIdList", - "documentation":"

The ID of the allocated Dedicated Host. This is used when you want to launch an instance onto a specific host.

", + "documentation":"

The ID of the allocated Dedicated Host. This is used to launch an instance onto a specific host.

", "locationName":"hostIdSet" } }, @@ -4503,10 +4503,6 @@ }, "ConversionTask":{ "type":"structure", - "required":[ - "ConversionTaskId", - "State" - ], "members":{ "ConversionTaskId":{ "shape":"String", @@ -4668,7 +4664,7 @@ }, "DestinationRegion":{ "shape":"String", - "documentation":"

The destination region to use in the PresignedUrl parameter of a snapshot copy operation. This parameter is only valid for specifying the destination region in a PresignedUrl parameter, where it is required.

CopySnapshot sends the snapshot copy to the regional endpoint that you send the HTTP request to, such as ec2.us-east-1.amazonaws.com (in the AWS CLI, this is specified with the --region parameter or the default region in your AWS configuration file).

", + "documentation":"

The destination region to use in the PresignedUrl parameter of a snapshot copy operation. This parameter is only valid for specifying the destination region in a PresignedUrl parameter, where it is required.

The snapshot copy is sent to the regional endpoint that you sent the HTTP request to (for example, ec2.us-east-1.amazonaws.com). With the AWS CLI, this is specified using the --region parameter or the default region in your AWS configuration file.

", "locationName":"destinationRegion" }, "Encrypted":{ @@ -4713,6 +4709,36 @@ }, "documentation":"

Contains the output of CopySnapshot.

" }, + "CpuOptions":{ + "type":"structure", + "members":{ + "CoreCount":{ + "shape":"Integer", + "documentation":"

The number of CPU cores for the instance.

", + "locationName":"coreCount" + }, + "ThreadsPerCore":{ + "shape":"Integer", + "documentation":"

The number of threads per CPU core.

", + "locationName":"threadsPerCore" + } + }, + "documentation":"

The CPU options for the instance.

" + }, + "CpuOptionsRequest":{ + "type":"structure", + "members":{ + "CoreCount":{ + "shape":"Integer", + "documentation":"

The number of CPU cores for the instance.

" + }, + "ThreadsPerCore":{ + "shape":"Integer", + "documentation":"

The number of threads per CPU core. To disable Intel Hyper-Threading Technology for the instance, specify a value of 1. Otherwise, specify the default value of 2.

" + } + }, + "documentation":"

The CPU options for the instance. Both the core count and threads per core must be specified in the request.

" + }, "CreateCustomerGatewayRequest":{ "type":"structure", "required":[ @@ -4912,7 +4938,7 @@ }, "TagSpecifications":{ "shape":"TagSpecificationList", - "documentation":"

The tags for an EC2 Fleet resource.

", + "documentation":"

The key-value pair for tagging the EC2 Fleet request on creation. The value for ResourceType must be fleet, otherwise the fleet request fails. To tag instances at launch, specify the tags in the launch template. For information about tagging after launch, see Tagging Your Resources.

", "locationName":"TagSpecification" } } @@ -5326,7 +5352,7 @@ }, "Protocol":{ "shape":"String", - "documentation":"

The protocol. A value of -1 or all means all protocols. If you specify all, -1, or a protocol number other than tcp, udp, or icmp, traffic on all ports is allowed, regardless of any ports or ICMP types or codes you specify. If you specify protocol 58 (ICMPv6) and specify an IPv4 CIDR block, traffic for all ICMP types and codes allowed, regardless of any that you specify. If you specify protocol 58 (ICMPv6) and specify an IPv6 CIDR block, you must specify an ICMP type and code.

", + "documentation":"

The protocol. A value of -1 or all means all protocols. If you specify all, -1, or a protocol number other than 6 (tcp), 17 (udp), or 1 (icmp), traffic on all ports is allowed, regardless of any ports or ICMP types or codes you specify. If you specify protocol 58 (ICMPv6) and specify an IPv4 CIDR block, traffic for all ICMP types and codes allowed, regardless of any that you specify. If you specify protocol 58 (ICMPv6) and specify an IPv6 CIDR block, you must specify an ICMP type and code.

", "locationName":"protocol" }, "RuleAction":{ @@ -5854,7 +5880,7 @@ }, "Iops":{ "shape":"Integer", - "documentation":"

The number of I/O operations per second (IOPS) to provision for the volume, with a maximum ratio of 50 IOPS/GiB. Range is 100 to 32000 IOPS for volumes in most regions. For exceptions, see Amazon EBS Volume Types.

This parameter is valid only for Provisioned IOPS SSD (io1) volumes.

" + "documentation":"

The number of I/O operations per second (IOPS) to provision for the volume, with a maximum ratio of 50 IOPS/GiB. Range is 100 to 32000 IOPS for volumes in most regions. For exceptions, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

This parameter is valid only for Provisioned IOPS SSD (io1) volumes.

" }, "KmsKeyId":{ "shape":"String", @@ -7178,7 +7204,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters. Filter names and values are case-sensitive.

", + "documentation":"

One or more filters. Filter names and values are case-sensitive.

", "locationName":"Filter" }, "PublicIps":{ @@ -7303,7 +7329,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "DryRun":{ @@ -7389,7 +7415,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "DryRun":{ @@ -7421,7 +7447,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "DryRun":{ @@ -7634,7 +7660,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" } } @@ -7681,7 +7707,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" } } @@ -7696,7 +7722,7 @@ }, "Fleets":{ "shape":"FleetSet", - "documentation":"

The EC2 Fleets.

", + "documentation":"

Information about the EC2 Fleets.

", "locationName":"fleetSet" } } @@ -7790,7 +7816,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "NextToken":{ @@ -7903,7 +7929,7 @@ "members":{ "Filter":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"filter" }, "HostIds":{ @@ -8060,7 +8086,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "ImageIds":{ @@ -8300,7 +8326,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "InstanceIds":{ @@ -8315,7 +8341,7 @@ }, "MaxResults":{ "shape":"Integer", - "documentation":"

The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value. This value can be between 5 and 1000. You cannot specify this parameter and the instance IDs parameter or tag filters in the same call.

", + "documentation":"

The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value. This value can be between 5 and 1000. You cannot specify this parameter and the instance IDs parameter in the same call.

", "locationName":"maxResults" }, "NextToken":{ @@ -8347,7 +8373,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "DryRun":{ @@ -8440,7 +8466,7 @@ }, "MaxResults":{ "shape":"Integer", - "documentation":"

The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value. This value can be between 5 and 1000.

" + "documentation":"

The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value. This value can be between 1 and 200.

" }, "Filters":{ "shape":"FilterList", @@ -8483,7 +8509,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "NextToken":{ @@ -8563,7 +8589,7 @@ "members":{ "Filter":{ "shape":"FilterList", - "documentation":"

One or more filters.

" + "documentation":"

One or more filters.

" }, "MaxResults":{ "shape":"Integer", @@ -8602,7 +8628,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "DryRun":{ @@ -8727,7 +8753,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"filter" }, "DryRun":{ @@ -9062,7 +9088,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "OfferingClass":{ @@ -9103,7 +9129,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "DryRun":{ @@ -9266,7 +9292,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters. If using multiple filters for rules, the results include security groups for which any combination of rules - not necessarily a single rule - match all filters.

", + "documentation":"

One or more filters. If using multiple filters for rules, the results include security groups for which any combination of rules - not necessarily a single rule - match all filters.

", "locationName":"Filter" }, "GroupIds":{ @@ -9360,7 +9386,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "MaxResults":{ @@ -9609,7 +9635,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "DryRun":{ @@ -9745,7 +9771,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "SubnetIds":{ @@ -9782,7 +9808,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "MaxResults":{ @@ -9947,7 +9973,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "VolumeIds":{ @@ -10074,7 +10100,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "DryRun":{ @@ -10361,7 +10387,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "DryRun":{ @@ -10393,7 +10419,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "VpcIds":{ @@ -10425,7 +10451,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "VpnConnectionIds":{ @@ -10457,7 +10483,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "VpnGatewayIds":{ @@ -10890,11 +10916,6 @@ }, "DiskImageDescription":{ "type":"structure", - "required":[ - "Format", - "ImportManifestUrl", - "Size" - ], "members":{ "Checksum":{ "shape":"String", @@ -10959,7 +10980,6 @@ }, "DiskImageVolumeDescription":{ "type":"structure", - "required":["Id"], "members":{ "Id":{ "shape":"String", @@ -11025,7 +11045,7 @@ }, "KmsKeyId":{ "shape":"String", - "documentation":"

Identifier (key ID, key alias, ID ARN, or alias ARN) for a user-managed CMK under which the EBS volume is encrypted.

Note: This parameter is only supported on BlockDeviceMapping objects called by RunInstances, RequestSpotFleet, and RequestSpotInstances.

" + "documentation":"

Identifier (key ID, key alias, ID ARN, or alias ARN) for a user-managed CMK under which the EBS volume is encrypted.

This parameter is only supported on BlockDeviceMapping objects called by RunInstances, RequestSpotFleet, and RequestSpotInstances.

" }, "SnapshotId":{ "shape":"String", @@ -12084,6 +12104,10 @@ "shape":"Boolean", "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "locationName":"dryRun" + }, + "Latest":{ + "shape":"Boolean", + "documentation":"

When enabled, retrieves the latest console output for the instance.

Default: disabled (false)

" } }, "documentation":"

Contains the parameters for GetConsoleOutput.

" @@ -12098,12 +12122,12 @@ }, "Output":{ "shape":"String", - "documentation":"

The console output, Base64-encoded. If using a command line tool, the tool decodes the output for you.

", + "documentation":"

The console output, base64-encoded. If you are using a command line tool, the tool decodes the output for you.

", "locationName":"output" }, "Timestamp":{ "shape":"DateTime", - "documentation":"

The time the output was last updated.

", + "documentation":"

The time at which the output was last updated.

", "locationName":"timestamp" } }, @@ -12415,7 +12439,7 @@ "locationName":"timestamp" } }, - "documentation":"

Describes an event in the history of the EC2 Fleet.

" + "documentation":"

Describes an event in the history of an EC2 Fleet.

" }, "HistoryRecordSet":{ "type":"list", @@ -12488,6 +12512,11 @@ "shape":"DateTime", "documentation":"

The time that the Dedicated Host was released.

", "locationName":"releaseTime" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Any tags assigned to the Dedicated Host.

", + "locationName":"tagSet" } }, "documentation":"

Describes the properties of the Dedicated Host.

" @@ -13355,7 +13384,6 @@ }, "ImportInstanceTaskDetails":{ "type":"structure", - "required":["Volumes"], "members":{ "Description":{ "shape":"String", @@ -13611,12 +13639,6 @@ }, "ImportVolumeTaskDetails":{ "type":"structure", - "required":[ - "AvailabilityZone", - "BytesConverted", - "Image", - "Volume" - ], "members":{ "AvailabilityZone":{ "shape":"String", @@ -13843,6 +13865,11 @@ "shape":"VirtualizationType", "documentation":"

The virtualization type of the instance.

", "locationName":"virtualizationType" + }, + "CpuOptions":{ + "shape":"CpuOptions", + "documentation":"

The CPU options for the instance.

", + "locationName":"cpuOptions" } }, "documentation":"

Describes an instance.

" @@ -14703,6 +14730,7 @@ "i3.4xlarge", "i3.8xlarge", "i3.16xlarge", + "i3.metal", "hi1.4xlarge", "hs1.8xlarge", "c1.medium", @@ -14723,6 +14751,12 @@ "c5.4xlarge", "c5.9xlarge", "c5.18xlarge", + "c5d.large", + "c5d.xlarge", + "c5d.2xlarge", + "c5d.4xlarge", + "c5d.9xlarge", + "c5d.18xlarge", "cc1.4xlarge", "cc2.8xlarge", "g2.2xlarge", @@ -14749,6 +14783,12 @@ "m5.4xlarge", "m5.12xlarge", "m5.24xlarge", + "m5d.large", + "m5d.xlarge", + "m5d.2xlarge", + "m5d.4xlarge", + "m5d.12xlarge", + "m5d.24xlarge", "h1.2xlarge", "h1.4xlarge", "h1.8xlarge", @@ -15246,6 +15286,36 @@ "locationName":"item" } }, + "LaunchTemplateCpuOptions":{ + "type":"structure", + "members":{ + "CoreCount":{ + "shape":"Integer", + "documentation":"

The number of CPU cores for the instance.

", + "locationName":"coreCount" + }, + "ThreadsPerCore":{ + "shape":"Integer", + "documentation":"

The number of threads per CPU core.

", + "locationName":"threadsPerCore" + } + }, + "documentation":"

The CPU options for the instance.

" + }, + "LaunchTemplateCpuOptionsRequest":{ + "type":"structure", + "members":{ + "CoreCount":{ + "shape":"Integer", + "documentation":"

The number of CPU cores for the instance.

" + }, + "ThreadsPerCore":{ + "shape":"Integer", + "documentation":"

The number of threads per CPU core. To disable Intel Hyper-Threading Technology for the instance, specify a value of 1. Otherwise, specify the default value of 2.

" + } + }, + "documentation":"

The CPU options for the instance. Both the core count and threads per core must be specified in the request.

" + }, "LaunchTemplateEbsBlockDevice":{ "type":"structure", "members":{ @@ -15754,7 +15824,7 @@ "members":{ "ResourceType":{ "shape":"ResourceType", - "documentation":"

The type of resource to tag. Currently, the resource types that support tagging on creation are instance and volume.

" + "documentation":"

The type of resource to tag. Currently, the resource types that support tagging on creation are instance and volume. To tag a resource after it has been created, see CreateTags.

" }, "Tags":{ "shape":"TagList", @@ -16449,7 +16519,7 @@ "members":{ "Attribute":{ "shape":"SnapshotAttributeName", - "documentation":"

The snapshot attribute to modify.

Only volume creation permissions may be modified at the customer level.

" + "documentation":"

The snapshot attribute to modify. Only volume creation permissions can be modified.

" }, "CreateVolumePermission":{ "shape":"CreateVolumePermissionModifications", @@ -16568,15 +16638,15 @@ }, "Size":{ "shape":"Integer", - "documentation":"

Target size in GiB of the volume to be modified. Target volume size must be greater than or equal to than the existing size of the volume. For information about available EBS volume sizes, see http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html.

Default: If no size is specified, the existing size is retained.

" + "documentation":"

The target size of the volume, in GiB. The target volume size must be greater than or equal to than the existing size of the volume. For information about available EBS volume sizes, see Amazon EBS Volume Types.

Default: If no size is specified, the existing size is retained.

" }, "VolumeType":{ "shape":"VolumeType", - "documentation":"

Target EBS volume type of the volume to be modified

The API does not support modifications for volume type standard. You also cannot change the type of a volume to standard.

Default: If no type is specified, the existing type is retained.

" + "documentation":"

The target EBS volume type of the volume.

Default: If no type is specified, the existing type is retained.

" }, "Iops":{ "shape":"Integer", - "documentation":"

Target IOPS rate of the volume to be modified.

Only valid for Provisioned IOPS SSD (io1) volumes. For more information about io1 IOPS configuration, see http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html#EBSVolumeTypes_piops.

Default: If no IOPS value is specified, the existing value is retained.

" + "documentation":"

The target IOPS rate of the volume.

This is only valid for Provisioned IOPS SSD (io1) volumes. For more information, see Provisioned IOPS SSD (io1) Volumes.

Default: If no IOPS value is specified, the existing value is retained.

" } } }, @@ -16585,7 +16655,7 @@ "members":{ "VolumeModification":{ "shape":"VolumeModification", - "documentation":"

A VolumeModification object.

", + "documentation":"

Information about the volume modification.

", "locationName":"volumeModification" } } @@ -17985,7 +18055,6 @@ }, "PrivateIpAddressSpecification":{ "type":"structure", - "required":["PrivateIpAddress"], "members":{ "Primary":{ "shape":"Boolean", @@ -18969,7 +19038,7 @@ }, "TagSpecifications":{ "shape":"LaunchTemplateTagSpecificationRequestList", - "documentation":"

The tags to apply to the resources during launch. You can tag instances and volumes. The specified tags are applied to all instances or volumes that are created during launch.

", + "documentation":"

The tags to apply to the resources during launch. You can only tag instances and volumes on launch. The specified tags are applied to all instances or volumes that are created during launch. To tag a resource after it has been created, see CreateTags.

", "locationName":"TagSpecification" }, "ElasticGpuSpecifications":{ @@ -18994,6 +19063,10 @@ "CreditSpecification":{ "shape":"CreditSpecificationRequest", "documentation":"

The credit option for CPU usage of the instance. Valid for T2 instances only.

" + }, + "CpuOptions":{ + "shape":"LaunchTemplateCpuOptionsRequest", + "documentation":"

The CPU options for the instance. For more information, see Optimizing CPU Options in the Amazon Elastic Compute Cloud User Guide.

" } }, "documentation":"

The information to include in the launch template.

" @@ -20014,6 +20087,11 @@ "shape":"CreditSpecification", "documentation":"

The credit option for CPU usage of the instance.

", "locationName":"creditSpecification" + }, + "CpuOptions":{ + "shape":"LaunchTemplateCpuOptions", + "documentation":"

The CPU options for the instance. For more information, see Optimizing CPU Options in the Amazon Elastic Compute Cloud User Guide.

", + "locationName":"cpuOptions" } }, "documentation":"

The information for a launch template.

" @@ -20463,7 +20541,7 @@ }, "TagSpecifications":{ "shape":"TagSpecificationList", - "documentation":"

The tags to apply to the resources during launch. You can tag instances and volumes. The specified tags are applied to all instances or volumes that are created during launch.

", + "documentation":"

The tags to apply to the resources during launch. You can only tag instances and volumes on launch. The specified tags are applied to all instances or volumes that are created during launch. To tag a resource after it has been created, see CreateTags.

", "locationName":"TagSpecification" }, "LaunchTemplate":{ @@ -20477,6 +20555,10 @@ "CreditSpecification":{ "shape":"CreditSpecificationRequest", "documentation":"

The credit option for CPU usage of the instance. Valid values are standard and unlimited. To change this attribute after launch, use ModifyInstanceCreditSpecification. For more information, see T2 Instances in the Amazon Elastic Compute Cloud User Guide.

Default: standard

" + }, + "CpuOptions":{ + "shape":"CpuOptionsRequest", + "documentation":"

The CPU options for the instance. For more information, see Optimizing CPU Options in the Amazon Elastic Compute Cloud User Guide.

" } }, "documentation":"

Contains the parameters for RunInstances.

" @@ -22337,7 +22419,7 @@ }, "Message":{ "shape":"String", - "documentation":"

The message for the state change.

", + "documentation":"

The message for the state change.

", "locationName":"message" } }, @@ -22648,7 +22730,7 @@ "members":{ "ResourceType":{ "shape":"ResourceType", - "documentation":"

The type of resource to tag. Currently, the resource types that support tagging on creation are instance and volume.

", + "documentation":"

The type of resource to tag. Currently, the resource types that support tagging on creation are fleet, instance, snapshot, and volume. To tag a resource after it has been created, see CreateTags.

", "locationName":"resourceType" }, "Tags":{ @@ -23339,7 +23421,7 @@ }, "Iops":{ "shape":"Integer", - "documentation":"

The number of I/O operations per second (IOPS) that the volume supports. For Provisioned IOPS SSD volumes, this represents the number of IOPS that are provisioned for the volume. For General Purpose SSD volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information on General Purpose SSD baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

Constraint: Range is 100-32000 IOPS for io1 volumes and 100-10000 IOPS for gp2 volumes.

Condition: This parameter is required for requests to create io1 volumes; it is not used in requests to create gp2, st1, sc1, or standard volumes.

", + "documentation":"

The number of I/O operations per second (IOPS) that the volume supports. For Provisioned IOPS SSD volumes, this represents the number of IOPS that are provisioned for the volume. For General Purpose SSD volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information about General Purpose SSD baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

Constraint: Range is 100-32000 IOPS for io1 volumes and 100-10000 IOPS for gp2 volumes.

Condition: This parameter is required for requests to create io1 volumes; it is not used in requests to create gp2, st1, sc1, or standard volumes.

", "locationName":"iops" }, "Tags":{ @@ -23446,62 +23528,62 @@ "members":{ "VolumeId":{ "shape":"String", - "documentation":"

ID of the volume being modified.

", + "documentation":"

The ID of the volume.

", "locationName":"volumeId" }, "ModificationState":{ "shape":"VolumeModificationState", - "documentation":"

Current state of modification. Modification state is null for unmodified volumes.

", + "documentation":"

The current modification state. The modification state is null for unmodified volumes.

", "locationName":"modificationState" }, "StatusMessage":{ "shape":"String", - "documentation":"

Generic status message on modification progress or failure.

", + "documentation":"

A status message about the modification progress or failure.

", "locationName":"statusMessage" }, "TargetSize":{ "shape":"Integer", - "documentation":"

Target size of the volume being modified.

", + "documentation":"

The target size of the volume, in GiB.

", "locationName":"targetSize" }, "TargetIops":{ "shape":"Integer", - "documentation":"

Target IOPS rate of the volume being modified.

", + "documentation":"

The target IOPS rate of the volume.

", "locationName":"targetIops" }, "TargetVolumeType":{ "shape":"VolumeType", - "documentation":"

Target EBS volume type of the volume being modified.

", + "documentation":"

The target EBS volume type of the volume.

", "locationName":"targetVolumeType" }, "OriginalSize":{ "shape":"Integer", - "documentation":"

Original size of the volume being modified.

", + "documentation":"

The original size of the volume.

", "locationName":"originalSize" }, "OriginalIops":{ "shape":"Integer", - "documentation":"

Original IOPS rate of the volume being modified.

", + "documentation":"

The original IOPS rate of the volume.

", "locationName":"originalIops" }, "OriginalVolumeType":{ "shape":"VolumeType", - "documentation":"

Original EBS volume type of the volume being modified.

", + "documentation":"

The original EBS volume type of the volume.

", "locationName":"originalVolumeType" }, "Progress":{ "shape":"Long", - "documentation":"

Modification progress from 0 to 100%.

", + "documentation":"

The modification progress, from 0 to 100 percent complete.

", "locationName":"progress" }, "StartTime":{ "shape":"DateTime", - "documentation":"

Modification start time

", + "documentation":"

The modification start time.

", "locationName":"startTime" }, "EndTime":{ "shape":"DateTime", - "documentation":"

Modification completion or failure time.

", + "documentation":"

The modification completion or failure time.

", "locationName":"endTime" } }, diff --git a/botocore/data/ecr/2015-09-21/service-2.json b/botocore/data/ecr/2015-09-21/service-2.json index 0ca318fe..333936b2 100644 --- a/botocore/data/ecr/2015-09-21/service-2.json +++ b/botocore/data/ecr/2015-09-21/service-2.json @@ -7,6 +7,7 @@ "protocol":"json", "serviceAbbreviation":"Amazon ECR", "serviceFullName":"Amazon EC2 Container Registry", + "serviceId":"ECR", "signatureVersion":"v4", "targetPrefix":"AmazonEC2ContainerRegistry_V20150921", "uid":"ecr-2015-09-21" diff --git a/botocore/data/ecs/2014-11-13/service-2.json b/botocore/data/ecs/2014-11-13/service-2.json index 17ff272c..b14c5c7e 100644 --- a/botocore/data/ecs/2014-11-13/service-2.json +++ b/botocore/data/ecs/2014-11-13/service-2.json @@ -46,7 +46,7 @@ {"shape":"PlatformTaskDefinitionIncompatibilityException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Runs and maintains a desired number of tasks from a specified task definition. If the number of tasks running in a service drops below desiredCount, Amazon ECS spawns another copy of the task in the specified cluster. To update an existing service, see UpdateService.

In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind a load balancer. The load balancer distributes traffic across the tasks that are associated with the service. For more information, see Service Load Balancing in the Amazon Elastic Container Service Developer Guide.

You can optionally specify a deployment configuration for your service. During a deployment, the service scheduler uses the minimumHealthyPercent and maximumPercent parameters to determine the deployment strategy. The deployment is triggered by changing the task definition or the desired count of a service with an UpdateService operation.

The minimumHealthyPercent represents a lower limit on the number of your service's tasks that must remain in the RUNNING state during a deployment, as a percentage of the desiredCount (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a desiredCount of four tasks and a minimumHealthyPercent of 50%, the scheduler can stop two existing tasks to free up cluster capacity before starting two new tasks. Tasks for services that do not use a load balancer are considered healthy if they are in the RUNNING state. Tasks for services that do use a load balancer are considered healthy if they are in the RUNNING state and the container instance they are hosted on is reported as healthy by the load balancer. The default value for minimumHealthyPercent is 50% in the console and 100% for the AWS CLI, the AWS SDKs, and the APIs.

The maximumPercent parameter represents an upper limit on the number of your service's tasks that are allowed in the RUNNING or PENDING state during a deployment, as a percentage of the desiredCount (rounded down to the nearest integer). This parameter enables you to define the deployment batch size. For example, if your service has a desiredCount of four tasks and a maximumPercent value of 200%, the scheduler can start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximumPercent is 200%.

When the service scheduler launches new tasks, it determines task placement in your cluster using the following logic:

" + "documentation":"

Runs and maintains a desired number of tasks from a specified task definition. If the number of tasks running in a service drops below desiredCount, Amazon ECS spawns another copy of the task in the specified cluster. To update an existing service, see UpdateService.

In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind a load balancer. The load balancer distributes traffic across the tasks that are associated with the service. For more information, see Service Load Balancing in the Amazon Elastic Container Service Developer Guide.

You can optionally specify a deployment configuration for your service. During a deployment, the service scheduler uses the minimumHealthyPercent and maximumPercent parameters to determine the deployment strategy. The deployment is triggered by changing the task definition or the desired count of a service with an UpdateService operation.

The minimumHealthyPercent represents a lower limit on the number of your service's tasks that must remain in the RUNNING state during a deployment, as a percentage of the desiredCount (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a desiredCount of four tasks and a minimumHealthyPercent of 50%, the scheduler can stop two existing tasks to free up cluster capacity before starting two new tasks. Tasks for services that do not use a load balancer are considered healthy if they are in the RUNNING state. Tasks for services that do use a load balancer are considered healthy if they are in the RUNNING state and the container instance they are hosted on is reported as healthy by the load balancer. The default value for a replica service for minimumHealthyPercent is 50% in the console and 100% for the AWS CLI, the AWS SDKs, and the APIs. The default value for a daemon service for minimumHealthyPercent is 0% for the AWS CLI, the AWS SDKs, and the APIs and 50% for the console.

The maximumPercent parameter represents an upper limit on the number of your service's tasks that are allowed in the RUNNING or PENDING state during a deployment, as a percentage of the desiredCount (rounded down to the nearest integer). This parameter enables you to define the deployment batch size. For example, if your replica service has a desiredCount of four tasks and a maximumPercent value of 200%, the scheduler can start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for a replica service for maximumPercent is 200%. If you are using a daemon service type, the maximumPercent should remain at 100%, which is the default value.

When the service scheduler launches new tasks, it determines task placement in your cluster using the following logic:

" }, "DeleteAttributes":{ "name":"DeleteAttributes", @@ -870,7 +870,7 @@ }, "hostname":{ "shape":"String", - "documentation":"

The hostname to use for your container. This parameter maps to Hostname in the Create a container section of the Docker Remote API and the --hostname option to docker run.

" + "documentation":"

The hostname to use for your container. This parameter maps to Hostname in the Create a container section of the Docker Remote API and the --hostname option to docker run.

The hostname parameter is not supported if using the awsvpc networkMode.

" }, "user":{ "shape":"String", @@ -952,7 +952,7 @@ }, "remainingResources":{ "shape":"Resources", - "documentation":"

For CPU and memory resource types, this parameter describes the remaining CPU and memory on the that has not already been allocated to tasks (and is therefore available for new tasks). For port resource types, this parameter describes the ports that were reserved by the Amazon ECS container agent (at instance registration time) and any task containers that have reserved port mappings on the host (with the host or bridge network mode). Any port that is not specified here is available for new tasks.

" + "documentation":"

For CPU and memory resource types, this parameter describes the remaining CPU and memory that has not already been allocated to tasks and is therefore available for new tasks. For port resource types, this parameter describes the ports that were reserved by the Amazon ECS container agent (at instance registration time) and any task containers that have reserved port mappings on the host (with the host or bridge network mode). Any port that is not specified here is available for new tasks.

" }, "registeredResources":{ "shape":"Resources", @@ -1094,8 +1094,7 @@ "type":"structure", "required":[ "serviceName", - "taskDefinition", - "desiredCount" + "taskDefinition" ], "members":{ "cluster":{ @@ -1112,11 +1111,11 @@ }, "loadBalancers":{ "shape":"LoadBalancers", - "documentation":"

A load balancer object representing the load balancer to use with your service. Currently, you are limited to one load balancer or target group per service. After you create a service, the load balancer name or target group ARN, container name, and container port specified in the service definition are immutable.

For Classic Load Balancers, this object must contain the load balancer name, the container name (as it appears in a container definition), and the container port to access from the load balancer. When a task from this service is placed on a container instance, the container instance is registered with the load balancer specified here.

For Application Load Balancers and Network Load Balancers, this object must contain the load balancer target group ARN, the container name (as it appears in a container definition), and the container port to access from the load balancer. When a task from this service is placed on a container instance, the container instance and port combination is registered as a target in the target group specified here.

" + "documentation":"

A load balancer object representing the load balancer to use with your service. Currently, you are limited to one load balancer or target group per service. After you create a service, the load balancer name or target group ARN, container name, and container port specified in the service definition are immutable.

For Classic Load Balancers, this object must contain the load balancer name, the container name (as it appears in a container definition), and the container port to access from the load balancer. When a task from this service is placed on a container instance, the container instance is registered with the load balancer specified here.

For Application Load Balancers and Network Load Balancers, this object must contain the load balancer target group ARN, the container name (as it appears in a container definition), and the container port to access from the load balancer. When a task from this service is placed on a container instance, the container instance and port combination is registered as a target in the target group specified here.

Services with tasks that use the awsvpc network mode (for example, those with the Fargate launch type) only support Application Load Balancers and Network Load Balancers; Classic Load Balancers are not supported. Also, when you create any target groups for these services, you must choose ip as the target type, not instance, because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance.

" }, "serviceRegistries":{ "shape":"ServiceRegistries", - "documentation":"

The details of the service discovery registries you want to assign to this service. For more information, see Service Discovery.

" + "documentation":"

The details of the service discovery registries you want to assign to this service. For more information, see Service Discovery.

Service discovery is supported for Fargate tasks if using platform version v1.1.0 or later. For more information, see AWS Fargate Platform Versions.

" }, "desiredCount":{ "shape":"BoxedInteger", @@ -1124,7 +1123,7 @@ }, "clientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. Up to 32 ASCII characters are allowed.

" + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Up to 32 ASCII characters are allowed.

" }, "launchType":{ "shape":"LaunchType", @@ -1157,6 +1156,10 @@ "healthCheckGracePeriodSeconds":{ "shape":"BoxedInteger", "documentation":"

The period of time, in seconds, that the Amazon ECS service scheduler should ignore unhealthy Elastic Load Balancing target health checks after a task has first started. This is only valid if your service is configured to use a load balancer. If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you can specify a health check grace period of up to 1,800 seconds during which the ECS service scheduler ignores health check status. This grace period can prevent the ECS service scheduler from marking tasks as unhealthy and stopping them before they have time to come up.

" + }, + "schedulingStrategy":{ + "shape":"SchedulingStrategy", + "documentation":"

The scheduling strategy to use for the service. For more information, see Services.

There are two service scheduler strategies available:

" } } }, @@ -1222,6 +1225,10 @@ "service":{ "shape":"String", "documentation":"

The name of the service to delete.

" + }, + "force":{ + "shape":"BoxedBoolean", + "documentation":"

If true, allows you to delete a service even if it has not been scaled down to zero tasks. It is only necessary to use this if the service is using the REPLICA scheduling strategy.

" } } }, @@ -1384,7 +1391,7 @@ }, "containerInstances":{ "shape":"StringList", - "documentation":"

A list of container instance IDs or full ARN entries.

" + "documentation":"

A list of up to 100 container instance IDs or full Amazon Resource Name (ARN) entries.

" } } }, @@ -1698,11 +1705,11 @@ }, "sharedMemorySize":{ "shape":"BoxedInteger", - "documentation":"

The value for the size of the /dev/shm volume. This parameter maps to the --shm-size option to docker run.

" + "documentation":"

The value for the size (in MiB) of the /dev/shm volume. This parameter maps to the --shm-size option to docker run.

If you are using tasks that use the Fargate launch type, the sharedMemorySize parameter is not supported.

" }, "tmpfs":{ "shape":"TmpfsList", - "documentation":"

The container path, mount options, and size of the tmpfs mount. This parameter maps to the --tmpfs option to docker run.

" + "documentation":"

The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the --tmpfs option to docker run.

If you are using tasks that use the Fargate launch type, the tmpfs parameter is not supported.

" } }, "documentation":"

Linux-specific options that are applied to the container, such as Linux KernelCapabilities.

" @@ -1832,6 +1839,10 @@ "launchType":{ "shape":"LaunchType", "documentation":"

The launch type for services you want to list.

" + }, + "schedulingStrategy":{ + "shape":"SchedulingStrategy", + "documentation":"

The scheduling strategy for services to list.

" } } }, @@ -1979,7 +1990,7 @@ "members":{ "targetGroupArn":{ "shape":"String", - "documentation":"

The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group associated with a service.

" + "documentation":"

The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group associated with a service.

If your service's task definition uses the awsvpc network mode (which is required for the Fargate launch type), you must choose ip as the target type, not instance, because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance.

" }, "loadBalancerName":{ "shape":"String", @@ -1994,7 +2005,7 @@ "documentation":"

The port on the container to associate with the load balancer. This port must correspond to a containerPort in the service's task definition. Your container instances must allow ingress traffic on the hostPort of the port mapping.

" } }, - "documentation":"

Details on a load balancer that is used with a service.

" + "documentation":"

Details on a load balancer that is used with a service.

Services with tasks that use the awsvpc network mode (for example, those with the Fargate launch type) only support Application Load Balancers and Network Load Balancers; Classic Load Balancers are not supported. Also, when you create any target groups for these services, you must choose ip as the target type, not instance, because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance.

" }, "LoadBalancers":{ "type":"list", @@ -2355,7 +2366,7 @@ "members":{ "name":{ "shape":"String", - "documentation":"

The name of the resource, such as cpu, memory, ports, or a user-defined resource.

" + "documentation":"

The name of the resource, such as CPU, MEMORY, PORTS, PORTS_UDP, or a user-defined resource.

" }, "type":{ "shape":"String", @@ -2447,6 +2458,13 @@ } } }, + "SchedulingStrategy":{ + "type":"string", + "enum":[ + "REPLICA", + "DAEMON" + ] + }, "ServerException":{ "type":"structure", "members":{ @@ -2473,7 +2491,7 @@ }, "loadBalancers":{ "shape":"LoadBalancers", - "documentation":"

A list of Elastic Load Balancing load balancer objects, containing the load balancer name, the container name (as it appears in a container definition), and the container port to access from the load balancer.

" + "documentation":"

A list of Elastic Load Balancing load balancer objects, containing the load balancer name, the container name (as it appears in a container definition), and the container port to access from the load balancer.

Services with tasks that use the awsvpc network mode (for example, those with the Fargate launch type) only support Application Load Balancers and Network Load Balancers; Classic Load Balancers are not supported. Also, when you create any target groups for these services, you must choose ip as the target type, not instance, because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance.

" }, "serviceRegistries":{ "shape":"ServiceRegistries", @@ -2542,6 +2560,10 @@ "healthCheckGracePeriodSeconds":{ "shape":"BoxedInteger", "documentation":"

The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy Elastic Load Balancing target health checks after a task has first started.

" + }, + "schedulingStrategy":{ + "shape":"SchedulingStrategy", + "documentation":"

The scheduling strategy to use for the service. For more information, see Services.

There are two service scheduler strategies available:

" } }, "documentation":"

Details on a service within a cluster

" @@ -2591,11 +2613,19 @@ "members":{ "registryArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the Service Registry. The currently supported service registry is Amazon Route 53 Auto Naming Service. For more information, see Service.

" + "documentation":"

The Amazon Resource Name (ARN) of the service registry. The currently supported service registry is Amazon Route 53 Auto Naming. For more information, see Service.

" }, "port":{ "shape":"BoxedInteger", - "documentation":"

The port value used if your Service Discovery service specified an SRV record.

" + "documentation":"

The port value used if your service discovery service specified an SRV record. This field is required if both the awsvpc network mode and SRV records are used.

" + }, + "containerName":{ + "shape":"String", + "documentation":"

The container name value, already specified in the task definition, to be used for your service discovery service. If the task definition that your service task specifies uses the bridge or host network mode, you must specify a containerName and containerPort combination from the task definition. If the task definition that your service task specifies uses the awsvpc network mode and a type SRV DNS record is used, you must specify either a containerName and containerPort combination or a port value, but not both.

" + }, + "containerPort":{ + "shape":"BoxedInteger", + "documentation":"

The port value, already specified in the task definition, to be used for your service discovery service. If the task definition your service task specifies uses the bridge or host network mode, you must specify a containerName and containerPort combination from the task definition. If the task definition your service task specifies uses the awsvpc network mode and a type SRV DNS record is used, you must specify either a containerName and containerPort combination or a port value, but not both.

" } }, "documentation":"

Details of the service registry.

" @@ -3053,7 +3083,7 @@ }, "size":{ "shape":"Integer", - "documentation":"

The size of the tmpfs volume.

" + "documentation":"

The size (in MiB) of the tmpfs volume.

" }, "mountOptions":{ "shape":"StringList", diff --git a/botocore/data/efs/2015-02-01/service-2.json b/botocore/data/efs/2015-02-01/service-2.json index 23ea7204..b20c7d0e 100644 --- a/botocore/data/efs/2015-02-01/service-2.json +++ b/botocore/data/efs/2015-02-01/service-2.json @@ -6,6 +6,7 @@ "protocol":"rest-json", "serviceAbbreviation":"EFS", "serviceFullName":"Amazon Elastic File System", + "serviceId":"EFS", "signatureVersion":"v4", "uid":"elasticfilesystem-2015-02-01" }, diff --git a/botocore/data/eks/2017-11-01/paginators-1.json b/botocore/data/eks/2017-11-01/paginators-1.json new file mode 100644 index 00000000..ea142457 --- /dev/null +++ b/botocore/data/eks/2017-11-01/paginators-1.json @@ -0,0 +1,3 @@ +{ + "pagination": {} +} diff --git a/botocore/data/eks/2017-11-01/service-2.json b/botocore/data/eks/2017-11-01/service-2.json new file mode 100644 index 00000000..1f3813db --- /dev/null +++ b/botocore/data/eks/2017-11-01/service-2.json @@ -0,0 +1,422 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2017-11-01", + "endpointPrefix":"eks", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"Amazon EKS", + "serviceFullName":"Amazon Elastic Container Service for Kubernetes", + "serviceId":"EKS", + "signatureVersion":"v4", + "signingName":"eks", + "uid":"eks-2017-11-01" + }, + "operations":{ + "CreateCluster":{ + "name":"CreateCluster", + "http":{ + "method":"POST", + "requestUri":"/clusters" + }, + "input":{"shape":"CreateClusterRequest"}, + "output":{"shape":"CreateClusterResponse"}, + "errors":[ + {"shape":"ResourceInUseException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"UnsupportedAvailabilityZoneException"} + ], + "documentation":"

Creates an Amazon EKS control plane.

The Amazon EKS control plane consists of control plane instances that run the Kubernetes software, like etcd and the API server. The control plane runs in an account managed by AWS, and the Kubernetes API is exposed via the Amazon EKS API server endpoint.

Amazon EKS worker nodes run in your AWS account and connect to your cluster's control plane via the Kubernetes API server endpoint and a certificate file that is created for your cluster.

The cluster control plane is provisioned across multiple Availability Zones and fronted by an Elastic Load Balancing Network Load Balancer. Amazon EKS also provisions elastic network interfaces in your VPC subnets to provide connectivity from the control plane instances to the worker nodes (for example, to support kubectl exec, logs, and proxy data flows).

After you create an Amazon EKS cluster, you must configure your Kubernetes tooling to communicate with the API server and launch worker nodes into your cluster. For more information, see Managing Cluster Authentication and Launching Amazon EKS Worker Nodesin the Amazon EKS User Guide.

" + }, + "DeleteCluster":{ + "name":"DeleteCluster", + "http":{ + "method":"DELETE", + "requestUri":"/clusters/{name}" + }, + "input":{"shape":"DeleteClusterRequest"}, + "output":{"shape":"DeleteClusterResponse"}, + "errors":[ + {"shape":"ResourceInUseException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ClientException"}, + {"shape":"ServerException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Deletes the Amazon EKS cluster control plane.

If you have active services in your cluster that are associated with a load balancer, you must delete those services before deleting the cluster so that the load balancers are deleted properly. Otherwise, you can have orphaned resources in your VPC that prevent you from being able to delete the VPC. For more information, see Deleting a Cluster in the Amazon EKS User Guide.

" + }, + "DescribeCluster":{ + "name":"DescribeCluster", + "http":{ + "method":"GET", + "requestUri":"/clusters/{name}" + }, + "input":{"shape":"DescribeClusterRequest"}, + "output":{"shape":"DescribeClusterResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ClientException"}, + {"shape":"ServerException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Returns descriptive information about an Amazon EKS cluster.

The API server endpoint and certificate authority data returned by this operation are required for kubelet and kubectl to communicate with your Kubernetes API server. For more information, see Create a kubeconfig for Amazon EKS.

The API server endpoint and certificate authority data are not available until the cluster reaches the ACTIVE state.

" + }, + "ListClusters":{ + "name":"ListClusters", + "http":{ + "method":"GET", + "requestUri":"/clusters" + }, + "input":{"shape":"ListClustersRequest"}, + "output":{"shape":"ListClustersResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServerException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Lists the Amazon EKS clusters in your AWS account in the specified region.

" + } + }, + "shapes":{ + "Certificate":{ + "type":"structure", + "members":{ + "data":{ + "shape":"String", + "documentation":"

The base64 encoded certificate data required to communicate with your cluster. Add this to the certificate-authority-data section of the kubeconfig file for your cluster.

" + } + }, + "documentation":"

An object representing the certificate-authority-data for your cluster.

" + }, + "ClientException":{ + "type":"structure", + "members":{ + "clusterName":{ + "shape":"String", + "documentation":"

The Amazon EKS cluster associated with the exception.

" + }, + "message":{"shape":"String"} + }, + "documentation":"

These errors are usually caused by a client action, such as using an action or resource on behalf of a user that doesn't have permissions to use the action or resource, or specifying an identifier that is not valid.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "Cluster":{ + "type":"structure", + "members":{ + "name":{ + "shape":"String", + "documentation":"

The name of the cluster.

" + }, + "arn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the cluster.

" + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

The Unix epoch time stamp in seconds for when the cluster was created.

" + }, + "version":{ + "shape":"String", + "documentation":"

The Kubernetes server version for the cluster.

" + }, + "endpoint":{ + "shape":"String", + "documentation":"

The endpoint for your Kubernetes API server.

" + }, + "roleArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role that provides permissions for the Kubernetes control plane to make calls to AWS API operations on your behalf.

" + }, + "resourcesVpcConfig":{ + "shape":"VpcConfigResponse", + "documentation":"

The VPC subnets and security groups used by the cluster control plane. Amazon EKS VPC resources have specific requirements to work properly with Kubernetes. For more information, see Cluster VPC Considerations and Cluster Security Group Considerations in the Amazon EKS User Guide.

" + }, + "status":{ + "shape":"ClusterStatus", + "documentation":"

The current status of the cluster.

" + }, + "certificateAuthority":{ + "shape":"Certificate", + "documentation":"

The certificate-authority-data for your cluster.

" + }, + "clientRequestToken":{ + "shape":"String", + "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request.

" + } + }, + "documentation":"

An object representing an Amazon EKS cluster.

" + }, + "ClusterName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[A-Za-z0-9\\-_]*" + }, + "ClusterStatus":{ + "type":"string", + "enum":[ + "CREATING", + "ACTIVE", + "DELETING", + "FAILED" + ] + }, + "CreateClusterRequest":{ + "type":"structure", + "required":[ + "name", + "roleArn", + "resourcesVpcConfig" + ], + "members":{ + "name":{ + "shape":"ClusterName", + "documentation":"

The unique name to give to your cluster.

" + }, + "version":{ + "shape":"String", + "documentation":"

The desired Kubernetes version for your cluster. If you do not specify a value here, the latest version available in Amazon EKS is used.

" + }, + "roleArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role that provides permissions for Amazon EKS to make calls to other AWS API operations on your behalf. For more information, see Amazon EKS Service IAM Role in the Amazon EKS User Guide

" + }, + "resourcesVpcConfig":{ + "shape":"VpcConfigRequest", + "documentation":"

The VPC subnets and security groups used by the cluster control plane. Amazon EKS VPC resources have specific requirements to work properly with Kubernetes. For more information, see Cluster VPC Considerations and Cluster Security Group Considerations in the Amazon EKS User Guide.

" + }, + "clientRequestToken":{ + "shape":"String", + "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request.

", + "idempotencyToken":true + } + } + }, + "CreateClusterResponse":{ + "type":"structure", + "members":{ + "cluster":{ + "shape":"Cluster", + "documentation":"

The full description of your new cluster.

" + } + } + }, + "DeleteClusterRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"String", + "documentation":"

The name of the cluster to delete.

", + "location":"uri", + "locationName":"name" + } + } + }, + "DeleteClusterResponse":{ + "type":"structure", + "members":{ + "cluster":{ + "shape":"Cluster", + "documentation":"

The full description of the cluster to delete.

" + } + } + }, + "DescribeClusterRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"String", + "documentation":"

The name of the cluster to describe.

", + "location":"uri", + "locationName":"name" + } + } + }, + "DescribeClusterResponse":{ + "type":"structure", + "members":{ + "cluster":{ + "shape":"Cluster", + "documentation":"

The full description of your specified cluster.

" + } + } + }, + "InvalidParameterException":{ + "type":"structure", + "members":{ + "clusterName":{ + "shape":"String", + "documentation":"

The Amazon EKS cluster associated with the exception.

" + }, + "message":{"shape":"String"} + }, + "documentation":"

The specified parameter is invalid. Review the available parameters for the API request.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ListClustersRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"ListClustersRequestMaxResults", + "documentation":"

The maximum number of cluster results returned by ListClusters in paginated output. When this parameter is used, ListClusters only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListClusters request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListClusters returns up to 100 results and a nextToken value if applicable.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"String", + "documentation":"

The nextToken value returned from a previous paginated ListClusters request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListClustersRequestMaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListClustersResponse":{ + "type":"structure", + "members":{ + "clusters":{ + "shape":"StringList", + "documentation":"

A list of all of the clusters for your account in the specified region.

" + }, + "nextToken":{ + "shape":"String", + "documentation":"

The nextToken value to include in a future ListClusters request. When the results of a ListClusters request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

" + } + } + }, + "ResourceInUseException":{ + "type":"structure", + "members":{ + "clusterName":{ + "shape":"String", + "documentation":"

The Amazon EKS cluster associated with the exception.

" + }, + "message":{"shape":"String"} + }, + "documentation":"

The specified resource is in use.

", + "error":{"httpStatusCode":409}, + "exception":true + }, + "ResourceLimitExceededException":{ + "type":"structure", + "members":{ + "clusterName":{ + "shape":"String", + "documentation":"

The Amazon EKS cluster associated with the exception.

" + }, + "message":{"shape":"String"} + }, + "documentation":"

You have encountered a service limit on the specified resource.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "clusterName":{ + "shape":"String", + "documentation":"

The Amazon EKS cluster associated with the exception.

" + }, + "message":{"shape":"String"} + }, + "documentation":"

The specified resource could not be found. You can view your available clusters with ListClusters. Amazon EKS clusters are region-specific.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "ServerException":{ + "type":"structure", + "members":{ + "clusterName":{ + "shape":"String", + "documentation":"

The Amazon EKS cluster associated with the exception.

" + }, + "message":{"shape":"String"} + }, + "documentation":"

These errors are usually caused by a server-side issue.

", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "ServiceUnavailableException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The service is unavailable, back off and retry the operation.

", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + "String":{"type":"string"}, + "StringList":{ + "type":"list", + "member":{"shape":"String"} + }, + "Timestamp":{"type":"timestamp"}, + "UnsupportedAvailabilityZoneException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"}, + "clusterName":{ + "shape":"String", + "documentation":"

The Amazon EKS cluster associated with the exception.

" + }, + "validZones":{ + "shape":"StringList", + "documentation":"

The supported Availability Zones for your account. Choose subnets in these Availability Zones for your cluster.

" + } + }, + "documentation":"

At least one of your specified cluster subnets is in an Availability Zone that does not support Amazon EKS. The exception output will specify the supported Availability Zones for your account, from which you can choose subnets for your cluster.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "VpcConfigRequest":{ + "type":"structure", + "required":["subnetIds"], + "members":{ + "subnetIds":{ + "shape":"StringList", + "documentation":"

Specify subnets for your Amazon EKS worker nodes. Amazon EKS creates cross-account elastic network interfaces in these subnets to allow communication between your worker nodes and the Kubernetes control plane.

" + }, + "securityGroupIds":{ + "shape":"StringList", + "documentation":"

Specify one or more security groups for the cross-account elastic network interfaces that Amazon EKS creates to use to allow communication between your worker nodes and the Kubernetes control plane.

" + } + }, + "documentation":"

An object representing an Amazon EKS cluster VPC configuration request.

" + }, + "VpcConfigResponse":{ + "type":"structure", + "members":{ + "subnetIds":{ + "shape":"StringList", + "documentation":"

The subnets associated with your cluster.

" + }, + "securityGroupIds":{ + "shape":"StringList", + "documentation":"

The security groups associated with the cross-account elastic network interfaces that are used to allow communication between your worker nodes and the Kubernetes control plane.

" + }, + "vpcId":{ + "shape":"String", + "documentation":"

The VPC associated with your cluster.

" + } + }, + "documentation":"

An object representing an Amazon EKS cluster VPC configuration response.

" + } + }, + "documentation":"

Amazon Elastic Container Service for Kubernetes (Amazon EKS) is a managed service that makes it easy for you to run Kubernetes on AWS without needing to stand up or maintain your own Kubernetes control plane. Kubernetes is an open-source system for automating the deployment, scaling, and management of containerized applications.

Amazon EKS runs three Kubernetes control plane instances across three Availability Zones to ensure high availability. Amazon EKS automatically detects and replaces unhealthy control plane instances, and it provides automated version upgrades and patching for them.

Amazon EKS is also integrated with many AWS services to provide scalability and security for your applications, including the following:

Amazon EKS runs up to date versions of the open-source Kubernetes software, so you can use all the existing plugins and tooling from the Kubernetes community. Applications running on Amazon EKS are fully compatible with applications running on any standard Kubernetes environment, whether running in on-premises data centers or public clouds. This means that you can easily migrate any standard Kubernetes application to Amazon EKS without any code modification required.

" +} diff --git a/botocore/data/elasticache/2015-02-02/service-2.json b/botocore/data/elasticache/2015-02-02/service-2.json index 92c48b28..95f4b937 100644 --- a/botocore/data/elasticache/2015-02-02/service-2.json +++ b/botocore/data/elasticache/2015-02-02/service-2.json @@ -5,6 +5,7 @@ "endpointPrefix":"elasticache", "protocol":"query", "serviceFullName":"Amazon ElastiCache", + "serviceId":"ElastiCache", "signatureVersion":"v4", "uid":"elasticache-2015-02-02", "xmlNamespace":"http://elasticache.amazonaws.com/doc/2015-02-02/" diff --git a/botocore/data/elasticbeanstalk/2010-12-01/service-2.json b/botocore/data/elasticbeanstalk/2010-12-01/service-2.json index 782c5cee..c542f0e3 100644 --- a/botocore/data/elasticbeanstalk/2010-12-01/service-2.json +++ b/botocore/data/elasticbeanstalk/2010-12-01/service-2.json @@ -423,7 +423,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ElasticBeanstalkServiceException"} ], - "documentation":"

Retrives detailed information about the health of instances in your AWS Elastic Beanstalk. This operation requires enhanced health reporting.

" + "documentation":"

Retrieves detailed information about the health of instances in your AWS Elastic Beanstalk. This operation requires enhanced health reporting.

" }, "DescribePlatformVersion":{ "name":"DescribePlatformVersion", @@ -808,7 +808,7 @@ "members":{ "ServiceRole":{ "shape":"String", - "documentation":"

The ARN of an IAM service role that Elastic Beanstalk has permission to assume.

" + "documentation":"

The ARN of an IAM service role that Elastic Beanstalk has permission to assume.

The ServiceRole property is required the first time that you provide a VersionLifecycleConfig for the application in one of the supporting calls (CreateApplication or UpdateApplicationResourceLifecycle). After you provide it once, in either one of the calls, Elastic Beanstalk persists the Service Role with the application, and you don't need to specify it again in subsequent UpdateApplicationResourceLifecycle calls. You can, however, specify it in subsequent calls to change the Service Role to another value.

" }, "VersionLifecycleConfig":{ "shape":"ApplicationVersionLifecycleConfig", @@ -872,7 +872,7 @@ }, "Status":{ "shape":"ApplicationVersionStatus", - "documentation":"

The processing status of the application version.

" + "documentation":"

The processing status of the application version. Reflects the state of the application version during its creation. Many of the values are only applicable if you specified True for the Process parameter of the CreateApplicationVersion action. The following list describes the possible values.

" } }, "documentation":"

Describes the properties of an application version.

" @@ -1400,7 +1400,7 @@ }, "Process":{ "shape":"ApplicationVersionProccess", - "documentation":"

Preprocesses and validates the environment manifest (env.yaml) and configuration files (*.config files in the .ebextensions folder) in the source bundle. Validating configuration files can identify issues prior to deploying the application version to an environment.

The Process option validates Elastic Beanstalk configuration files. It doesn't validate your application's configuration files, like proxy server or Docker configuration.

" + "documentation":"

Pre-processes and validates the environment manifest (env.yaml) and configuration files (*.config files in the .ebextensions folder) in the source bundle. Validating configuration files can identify issues prior to deploying the application version to an environment.

You must turn processing on for application versions that you create using AWS CodeBuild or AWS CodeCommit. For application versions built from a source bundle in Amazon S3, processing is optional.

The Process option validates Elastic Beanstalk configuration files. It doesn't validate your application's configuration files, like proxy server or Docker configuration.

" } }, "documentation":"

" @@ -1489,7 +1489,7 @@ }, "SolutionStackName":{ "shape":"SolutionStackName", - "documentation":"

This is an alternative to specifying a template name. If specified, AWS Elastic Beanstalk sets the configuration values to the default values associated with the specified solution stack.

" + "documentation":"

This is an alternative to specifying a template name. If specified, AWS Elastic Beanstalk sets the configuration values to the default values associated with the specified solution stack.

For a list of current solution stacks, see Elastic Beanstalk Supported Platforms.

" }, "PlatformArn":{ "shape":"PlatformArn", @@ -2236,7 +2236,8 @@ "Info", "Warning", "Degraded", - "Severe" + "Severe", + "Suspended" ] }, "EnvironmentId":{"type":"string"}, @@ -2381,7 +2382,7 @@ }, "Version":{ "shape":"String", - "documentation":"

The version of this environment tier.

" + "documentation":"

The version of this environment tier. When you don't set a value to it, Elastic Beanstalk uses the latest compatible worker tier version.

This member is deprecated. Any specific version that you set may become out of date. We recommend leaving it unspecified.

" } }, "documentation":"

Describes the properties of an environment tier

" diff --git a/botocore/data/elastictranscoder/2012-09-25/service-2.json b/botocore/data/elastictranscoder/2012-09-25/service-2.json index 19c50d3e..a213f9aa 100644 --- a/botocore/data/elastictranscoder/2012-09-25/service-2.json +++ b/botocore/data/elastictranscoder/2012-09-25/service-2.json @@ -6,6 +6,7 @@ "endpointPrefix":"elastictranscoder", "protocol":"rest-json", "serviceFullName":"Amazon Elastic Transcoder", + "serviceId":"Elastic Transcoder", "signatureVersion":"v4" }, "operations":{ diff --git a/botocore/data/elb/2012-06-01/service-2.json b/botocore/data/elb/2012-06-01/service-2.json index f5fa7766..5ff2b5eb 100644 --- a/botocore/data/elb/2012-06-01/service-2.json +++ b/botocore/data/elb/2012-06-01/service-2.json @@ -5,6 +5,7 @@ "endpointPrefix":"elasticloadbalancing", "protocol":"query", "serviceFullName":"Elastic Load Balancing", + "serviceId":"Elastic Load Balancing", "signatureVersion":"v4", "uid":"elasticloadbalancing-2012-06-01", "xmlNamespace":"http://elasticloadbalancing.amazonaws.com/doc/2012-06-01/" diff --git a/botocore/data/elbv2/2015-12-01/service-2.json b/botocore/data/elbv2/2015-12-01/service-2.json index ff18086e..0d5afbbe 100644 --- a/botocore/data/elbv2/2015-12-01/service-2.json +++ b/botocore/data/elbv2/2015-12-01/service-2.json @@ -73,7 +73,9 @@ {"shape":"CertificateNotFoundException"}, {"shape":"UnsupportedProtocolException"}, {"shape":"TooManyRegistrationsForTargetIdException"}, - {"shape":"TooManyTargetsException"} + {"shape":"TooManyTargetsException"}, + {"shape":"TooManyActionsException"}, + {"shape":"InvalidLoadBalancerActionException"} ], "documentation":"

Creates a listener for the specified Application Load Balancer or Network Load Balancer.

To update a listener, use ModifyListener. When you are finished with a listener, you can delete it using DeleteListener. If you are finished with both the listener and the load balancer, you can delete them both using DeleteLoadBalancer.

This operation is idempotent, which means that it completes at most one time. If you attempt to create multiple listeners with the same settings, each call succeeds.

For more information, see Listeners for Your Application Load Balancers in the Application Load Balancers Guide and Listeners for Your Network Load Balancers in the Network Load Balancers Guide.

" }, @@ -126,9 +128,12 @@ {"shape":"TargetGroupNotFoundException"}, {"shape":"InvalidConfigurationRequestException"}, {"shape":"TooManyRegistrationsForTargetIdException"}, - {"shape":"TooManyTargetsException"} + {"shape":"TooManyTargetsException"}, + {"shape":"UnsupportedProtocolException"}, + {"shape":"TooManyActionsException"}, + {"shape":"InvalidLoadBalancerActionException"} ], - "documentation":"

Creates a rule for the specified listener. The listener must be associated with an Application Load Balancer.

Rules are evaluated in priority order, from the lowest value to the highest value. When the condition for a rule is met, the specified action is taken. If no conditions are met, the action for the default rule is taken. For more information, see Listener Rules in the Application Load Balancers Guide.

To view your current rules, use DescribeRules. To update a rule, use ModifyRule. To set the priorities of your rules, use SetRulePriorities. To delete a rule, use DeleteRule.

" + "documentation":"

Creates a rule for the specified listener. The listener must be associated with an Application Load Balancer.

Rules are evaluated in priority order, from the lowest value to the highest value. When the conditions for a rule are met, its actions are performed. If the conditions for no rules are met, the actions for the default rule are performed. For more information, see Listener Rules in the Application Load Balancers Guide.

To view your current rules, use DescribeRules. To update a rule, use ModifyRule. To set the priorities of your rules, use SetRulePriorities. To delete a rule, use DeleteRule.

" }, "CreateTargetGroup":{ "name":"CreateTargetGroup", @@ -274,7 +279,8 @@ }, "errors":[ {"shape":"ListenerNotFoundException"}, - {"shape":"LoadBalancerNotFoundException"} + {"shape":"LoadBalancerNotFoundException"}, + {"shape":"UnsupportedProtocolException"} ], "documentation":"

Describes the specified listeners or the listeners for the specified Application Load Balancer or Network Load Balancer. You must specify either a load balancer or one or more listeners.

" }, @@ -292,7 +298,7 @@ "errors":[ {"shape":"LoadBalancerNotFoundException"} ], - "documentation":"

Describes the attributes for the specified Application Load Balancer or Network Load Balancer.

" + "documentation":"

Describes the attributes for the specified Application Load Balancer or Network Load Balancer.

For more information, see Load Balancer Attributes in the Application Load Balancers Guide or Load Balancer Attributes in the Network Load Balancers Guide.

" }, "DescribeLoadBalancers":{ "name":"DescribeLoadBalancers", @@ -323,7 +329,8 @@ }, "errors":[ {"shape":"ListenerNotFoundException"}, - {"shape":"RuleNotFoundException"} + {"shape":"RuleNotFoundException"}, + {"shape":"UnsupportedProtocolException"} ], "documentation":"

Describes the specified rules or the rules for the specified listener. You must specify either a listener or one or more rules.

" }, @@ -376,7 +383,7 @@ "errors":[ {"shape":"TargetGroupNotFoundException"} ], - "documentation":"

Describes the attributes for the specified target group.

" + "documentation":"

Describes the attributes for the specified target group.

For more information, see Target Group Attributes in the Application Load Balancers Guide or Target Group Attributes in the Network Load Balancers Guide.

" }, "DescribeTargetGroups":{ "name":"DescribeTargetGroups", @@ -437,7 +444,9 @@ {"shape":"InvalidConfigurationRequestException"}, {"shape":"UnsupportedProtocolException"}, {"shape":"TooManyRegistrationsForTargetIdException"}, - {"shape":"TooManyTargetsException"} + {"shape":"TooManyTargetsException"}, + {"shape":"TooManyActionsException"}, + {"shape":"InvalidLoadBalancerActionException"} ], "documentation":"

Modifies the specified properties of the specified listener.

Any properties that you do not specify retain their current values. However, changing the protocol from HTTPS to HTTP removes the security policy and SSL certificate properties. If you change the protocol from HTTP to HTTPS, you must add the security policy and server certificate.

" }, @@ -476,9 +485,12 @@ {"shape":"OperationNotPermittedException"}, {"shape":"TooManyRegistrationsForTargetIdException"}, {"shape":"TooManyTargetsException"}, - {"shape":"TargetGroupNotFoundException"} + {"shape":"TargetGroupNotFoundException"}, + {"shape":"UnsupportedProtocolException"}, + {"shape":"TooManyActionsException"}, + {"shape":"InvalidLoadBalancerActionException"} ], - "documentation":"

Modifies the specified rule.

Any existing properties that you do not modify retain their current values.

To modify the default action, use ModifyListener.

" + "documentation":"

Modifies the specified rule.

Any existing properties that you do not modify retain their current values.

To modify the actions for the default rule, use ModifyListener.

" }, "ModifyTargetGroup":{ "name":"ModifyTargetGroup", @@ -649,25 +661,43 @@ "shapes":{ "Action":{ "type":"structure", - "required":[ - "Type", - "TargetGroupArn" - ], + "required":["Type"], "members":{ "Type":{ "shape":"ActionTypeEnum", - "documentation":"

The type of action.

" + "documentation":"

The type of action. Each rule must include one forward action.

" }, "TargetGroupArn":{ "shape":"TargetGroupArn", - "documentation":"

The Amazon Resource Name (ARN) of the target group.

" + "documentation":"

The Amazon Resource Name (ARN) of the target group. Specify only when Type is forward.

For a default rule, the protocol of the target group must be HTTP or HTTPS for an Application Load Balancer or TCP for a Network Load Balancer.

" + }, + "AuthenticateOidcConfig":{ + "shape":"AuthenticateOidcActionConfig", + "documentation":"

[HTTPS listener] Information about an identity provider that is compliant with OpenID Connect (OIDC). Specify only when Type is authenticate-oidc.

" + }, + "AuthenticateCognitoConfig":{ + "shape":"AuthenticateCognitoActionConfig", + "documentation":"

[HTTPS listener] Information for using Amazon Cognito to authenticate users. Specify only when Type is authenticate-cognito.

" + }, + "Order":{ + "shape":"ActionOrder", + "documentation":"

The order for the action. This value is required for rules with multiple actions. The action with the lowest value for order is performed first. The forward action must be performed last.

" } }, "documentation":"

Information about an action.

" }, + "ActionOrder":{ + "type":"integer", + "max":50000, + "min":1 + }, "ActionTypeEnum":{ "type":"string", - "enum":["forward"] + "enum":[ + "forward", + "authenticate-oidc", + "authenticate-cognito" + ] }, "Actions":{ "type":"list", @@ -734,6 +764,152 @@ }, "exception":true }, + "AuthenticateCognitoActionAuthenticationRequestExtraParams":{ + "type":"map", + "key":{"shape":"AuthenticateCognitoActionAuthenticationRequestParamName"}, + "value":{"shape":"AuthenticateCognitoActionAuthenticationRequestParamValue"} + }, + "AuthenticateCognitoActionAuthenticationRequestParamName":{"type":"string"}, + "AuthenticateCognitoActionAuthenticationRequestParamValue":{"type":"string"}, + "AuthenticateCognitoActionConditionalBehaviorEnum":{ + "type":"string", + "enum":[ + "deny", + "allow", + "authenticate" + ] + }, + "AuthenticateCognitoActionConfig":{ + "type":"structure", + "required":[ + "UserPoolArn", + "UserPoolClientId", + "UserPoolDomain" + ], + "members":{ + "UserPoolArn":{ + "shape":"AuthenticateCognitoActionUserPoolArn", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Cognito user pool.

" + }, + "UserPoolClientId":{ + "shape":"AuthenticateCognitoActionUserPoolClientId", + "documentation":"

The ID of the Amazon Cognito user pool client.

" + }, + "UserPoolDomain":{ + "shape":"AuthenticateCognitoActionUserPoolDomain", + "documentation":"

The domain prefix or fully-qualified domain name of the Amazon Cognito user pool.

" + }, + "SessionCookieName":{ + "shape":"AuthenticateCognitoActionSessionCookieName", + "documentation":"

The name of the cookie used to maintain session information. The default is AWSELBAuthSessionCookie.

" + }, + "Scope":{ + "shape":"AuthenticateCognitoActionScope", + "documentation":"

The set of user claims to be requested from the IdP. The default is openid.

To verify which scope values your IdP supports and how to separate multiple values, see the documentation for your IdP.

" + }, + "SessionTimeout":{ + "shape":"AuthenticateCognitoActionSessionTimeout", + "documentation":"

The maximum duration of the authentication session, in seconds. The default is 604800 seconds (7 days).

" + }, + "AuthenticationRequestExtraParams":{ + "shape":"AuthenticateCognitoActionAuthenticationRequestExtraParams", + "documentation":"

The query parameters (up to 10) to include in the redirect request to the authorization endpoint.

" + }, + "OnUnauthenticatedRequest":{ + "shape":"AuthenticateCognitoActionConditionalBehaviorEnum", + "documentation":"

The behavior if the user is not authenticated. The following are possible values:

" + } + }, + "documentation":"

Request parameters to use when integrating with Amazon Cognito to authenticate users.

" + }, + "AuthenticateCognitoActionScope":{"type":"string"}, + "AuthenticateCognitoActionSessionCookieName":{"type":"string"}, + "AuthenticateCognitoActionSessionTimeout":{"type":"long"}, + "AuthenticateCognitoActionUserPoolArn":{"type":"string"}, + "AuthenticateCognitoActionUserPoolClientId":{"type":"string"}, + "AuthenticateCognitoActionUserPoolDomain":{"type":"string"}, + "AuthenticateOidcActionAuthenticationRequestExtraParams":{ + "type":"map", + "key":{"shape":"AuthenticateOidcActionAuthenticationRequestParamName"}, + "value":{"shape":"AuthenticateOidcActionAuthenticationRequestParamValue"} + }, + "AuthenticateOidcActionAuthenticationRequestParamName":{"type":"string"}, + "AuthenticateOidcActionAuthenticationRequestParamValue":{"type":"string"}, + "AuthenticateOidcActionAuthorizationEndpoint":{"type":"string"}, + "AuthenticateOidcActionClientId":{"type":"string"}, + "AuthenticateOidcActionClientSecret":{"type":"string"}, + "AuthenticateOidcActionConditionalBehaviorEnum":{ + "type":"string", + "enum":[ + "deny", + "allow", + "authenticate" + ] + }, + "AuthenticateOidcActionConfig":{ + "type":"structure", + "required":[ + "Issuer", + "AuthorizationEndpoint", + "TokenEndpoint", + "UserInfoEndpoint", + "ClientId", + "ClientSecret" + ], + "members":{ + "Issuer":{ + "shape":"AuthenticateOidcActionIssuer", + "documentation":"

The OIDC issuer identifier of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.

" + }, + "AuthorizationEndpoint":{ + "shape":"AuthenticateOidcActionAuthorizationEndpoint", + "documentation":"

The authorization endpoint of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.

" + }, + "TokenEndpoint":{ + "shape":"AuthenticateOidcActionTokenEndpoint", + "documentation":"

The token endpoint of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.

" + }, + "UserInfoEndpoint":{ + "shape":"AuthenticateOidcActionUserInfoEndpoint", + "documentation":"

The user info endpoint of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.

" + }, + "ClientId":{ + "shape":"AuthenticateOidcActionClientId", + "documentation":"

The OAuth 2.0 client identifier.

" + }, + "ClientSecret":{ + "shape":"AuthenticateOidcActionClientSecret", + "documentation":"

The OAuth 2.0 client secret.

" + }, + "SessionCookieName":{ + "shape":"AuthenticateOidcActionSessionCookieName", + "documentation":"

The name of the cookie used to maintain session information. The default is AWSELBAuthSessionCookie.

" + }, + "Scope":{ + "shape":"AuthenticateOidcActionScope", + "documentation":"

The set of user claims to be requested from the IdP. The default is openid.

To verify which scope values your IdP supports and how to separate multiple values, see the documentation for your IdP.

" + }, + "SessionTimeout":{ + "shape":"AuthenticateOidcActionSessionTimeout", + "documentation":"

The maximum duration of the authentication session, in seconds. The default is 604800 seconds (7 days).

" + }, + "AuthenticationRequestExtraParams":{ + "shape":"AuthenticateOidcActionAuthenticationRequestExtraParams", + "documentation":"

The query parameters (up to 10) to include in the redirect request to the authorization endpoint.

" + }, + "OnUnauthenticatedRequest":{ + "shape":"AuthenticateOidcActionConditionalBehaviorEnum", + "documentation":"

The behavior if the user is not authenticated. The following are possible values:

  • deny - Return an HTTP 401 Unauthorized error.

  • allow - Allow the request to be forwarded to the target.

  • authenticate - Redirect the request to the IdP authorization endpoint. This is the default value.

" + } + }, + "documentation":"

Request parameters when using an identity provider (IdP) that is compliant with OpenID Connect (OIDC) to authenticate users.

" + }, + "AuthenticateOidcActionIssuer":{"type":"string"}, + "AuthenticateOidcActionScope":{"type":"string"}, + "AuthenticateOidcActionSessionCookieName":{"type":"string"}, + "AuthenticateOidcActionSessionTimeout":{"type":"long"}, + "AuthenticateOidcActionTokenEndpoint":{"type":"string"}, + "AuthenticateOidcActionUserInfoEndpoint":{"type":"string"}, "AvailabilityZone":{ "type":"structure", "members":{ @@ -851,11 +1027,11 @@ }, "Certificates":{ "shape":"CertificateList", - "documentation":"

[HTTPS listeners] The SSL server certificate. You must provide exactly one certificate.

" + "documentation":"

[HTTPS listeners] The default SSL server certificate. You must provide exactly one certificate. To create a certificate list, use AddListenerCertificates.

" }, "DefaultActions":{ "shape":"Actions", - "documentation":"

The default action for the listener. For Application Load Balancers, the protocol of the specified target group must be HTTP or HTTPS. For Network Load Balancers, the protocol of the specified target group must be TCP.

" + "documentation":"

The actions for the default rule. The rule must include one forward action.

If the action type is forward, you can specify a single target group. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer or TCP for a Network Load Balancer.

If the action type is authenticate-oidc, you can use an identity provider that is OpenID Connect (OIDC) compliant to authenticate users as they access your application.

If the action type is authenticate-cognito, you can use Amazon Cognito to authenticate users as they access your application.

" } } }, @@ -874,7 +1050,7 @@ "members":{ "Name":{ "shape":"LoadBalancerName", - "documentation":"

The name of the load balancer.

This name must be unique per region per account, can have a maximum of 32 characters, must contain only alphanumeric characters or hyphens, and must not begin or end with a hyphen.

" + "documentation":"

The name of the load balancer.

This name must be unique per region per account, can have a maximum of 32 characters, must contain only alphanumeric characters or hyphens, must not begin or end with a hyphen, and must not begin with \"internal-\".

" }, "Subnets":{ "shape":"Subnets", @@ -934,11 +1110,11 @@ }, "Priority":{ "shape":"RulePriority", - "documentation":"

The priority for the rule. A listener can't have multiple rules with the same priority.

" + "documentation":"

The rule priority. A listener can't have multiple rules with the same priority.

" }, "Actions":{ "shape":"Actions", - "documentation":"

An action. Each action has the type forward and specifies a target group.

" + "documentation":"

The actions. Each rule must include one forward action.

If the action type is forward, you can specify a single target group.

If the action type is authenticate-oidc, you can use an identity provider that is OpenID Connect (OIDC) compliant to authenticate users as they access your application.

If the action type is authenticate-cognito, you can use Amazon Cognito to authenticate users as they access your application.

" } } }, @@ -1516,6 +1692,18 @@ }, "exception":true }, + "InvalidLoadBalancerActionException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The requested action is not valid.

", + "error":{ + "code":"InvalidLoadBalancerAction", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "InvalidSchemeException":{ "type":"structure", "members":{ @@ -1732,7 +1920,7 @@ "members":{ "Key":{ "shape":"LoadBalancerAttributeKey", - "documentation":"

The name of the attribute.

  • access_logs.s3.enabled - [Application Load Balancers] Indicates whether access logs stored in Amazon S3 are enabled. The value is true or false.

  • access_logs.s3.bucket - [Application Load Balancers] The name of the S3 bucket for the access logs. This attribute is required if access logs in Amazon S3 are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permission to write to the bucket.

  • access_logs.s3.prefix - [Application Load Balancers] The prefix for the location in the S3 bucket. If you don't specify a prefix, the access logs are stored in the root of the bucket.

  • deletion_protection.enabled - Indicates whether deletion protection is enabled. The value is true or false.

  • idle_timeout.timeout_seconds - [Application Load Balancers] The idle timeout value, in seconds. The valid range is 1-4000. The default is 60 seconds.

  • load_balancing.cross_zone.enabled - [Network Load Balancers] Indicates whether cross-zone load balancing is enabled. The value is true or false. The default is false.

  • routing.http2.enabled - [Application Load Balancers] Indicates whether HTTP/2 is enabled. The value is true or false. The default is true.

" + "documentation":"

The name of the attribute.

The following attributes are supported by both Application Load Balancers and Network Load Balancers:

  • deletion_protection.enabled - Indicates whether deletion protection is enabled. The value is true or false. The default is false.

The following attributes are supported by only Application Load Balancers:

  • access_logs.s3.enabled - Indicates whether access logs are enabled. The value is true or false. The default is false.

  • access_logs.s3.bucket - The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permission to write to the bucket.

  • access_logs.s3.prefix - The prefix for the location in the S3 bucket for the access logs.

  • idle_timeout.timeout_seconds - The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.

  • routing.http2.enabled - Indicates whether HTTP/2 is enabled. The value is true or false. The default is true.

The following attributes are supported by only Network Load Balancers:

  • load_balancing.cross_zone.enabled - Indicates whether cross-zone load balancing is enabled. The value is true or false. The default is false.

" }, "Value":{ "shape":"LoadBalancerAttributeValue", @@ -1844,15 +2032,15 @@ }, "SslPolicy":{ "shape":"SslPolicyName", - "documentation":"

The security policy that defines which protocols and ciphers are supported. For more information, see Security Policies in the Application Load Balancers Guide.

" + "documentation":"

[HTTPS listeners] The security policy that defines which protocols and ciphers are supported. For more information, see Security Policies in the Application Load Balancers Guide.

" }, "Certificates":{ "shape":"CertificateList", - "documentation":"

The default SSL server certificate.

" + "documentation":"

[HTTPS listeners] The default SSL server certificate. You must provide exactly one certificate. To create a certificate list, use AddListenerCertificates.

" }, "DefaultActions":{ "shape":"Actions", - "documentation":"

The default action. For Application Load Balancers, the protocol of the specified target group must be HTTP or HTTPS. For Network Load Balancers, the protocol of the specified target group must be TCP.

" + "documentation":"

The actions for the default rule. The rule must include one forward action.

If the action type is forward, you can specify a single target group. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer or TCP for a Network Load Balancer.

If the action type is authenticate-oidc, you can use an identity provider that is OpenID Connect (OIDC) compliant to authenticate users as they access your application.

If the action type is authenticate-cognito, you can use Amazon Cognito to authenticate users as they access your application.

" } } }, @@ -1861,7 +2049,7 @@ "members":{ "Listeners":{ "shape":"Listeners", - "documentation":"

Information about the modified listeners.

" + "documentation":"

Information about the modified listener.

" } } }, @@ -1901,11 +2089,11 @@ }, "Conditions":{ "shape":"RuleConditionList", - "documentation":"

The conditions.

" + "documentation":"

The conditions. Each condition specifies a field name and a single value.

If the field name is host-header, you can specify a single host name (for example, my.example.com). A host name is case insensitive, can be up to 128 characters in length, and can contain any of the following characters. Note that you can include up to three wildcard characters.

  • A-Z, a-z, 0-9

  • - .

  • * (matches 0 or more characters)

  • ? (matches exactly 1 character)

If the field name is path-pattern, you can specify a single path pattern. A path pattern is case sensitive, can be up to 128 characters in length, and can contain any of the following characters. Note that you can include up to three wildcard characters.

  • A-Z, a-z, 0-9

  • _ - . $ / ~ \" ' @ : +

  • & (using &amp;)

  • * (matches 0 or more characters)

  • ? (matches exactly 1 character)

" }, "Actions":{ "shape":"Actions", - "documentation":"

The actions. The target group must use the HTTP or HTTPS protocol.

" + "documentation":"

The actions.

If the action type is forward, you can specify a single target group.

If the action type is authenticate-oidc, you can use an identity provider that is OpenID Connect (OIDC) compliant to authenticate users as they access your application.

If the action type is authenticate-cognito, you can use Amazon Cognito to authenticate users as they access your application.

" } } }, @@ -1914,7 +2102,7 @@ "members":{ "Rules":{ "shape":"Rules", - "documentation":"

Information about the rule.

" + "documentation":"

Information about the modified rule.

" } } }, @@ -1991,7 +2179,7 @@ "members":{ "TargetGroups":{ "shape":"TargetGroups", - "documentation":"

Information about the target group.

" + "documentation":"

Information about the modified target group.

" } } }, @@ -2304,10 +2492,7 @@ }, "SetSubnetsInput":{ "type":"structure", - "required":[ - "LoadBalancerArn", - "Subnets" - ], + "required":["LoadBalancerArn"], "members":{ "LoadBalancerArn":{ "shape":"LoadBalancerArn", @@ -2567,7 +2752,7 @@ "members":{ "Key":{ "shape":"TargetGroupAttributeKey", - "documentation":"

The name of the attribute.

  • deregistration_delay.timeout_seconds - The amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused. The range is 0-3600 seconds. The default value is 300 seconds.

  • proxy_protocol_v2.enabled - [Network Load Balancers] Indicates whether Proxy Protocol version 2 is enabled.

  • stickiness.enabled - [Application Load Balancers] Indicates whether sticky sessions are enabled. The value is true or false.

  • stickiness.type - [Application Load Balancers] The type of sticky sessions. The possible value is lb_cookie.

  • stickiness.lb_cookie.duration_seconds - [Application Load Balancers] The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).

" + "documentation":"

The name of the attribute.

The following attributes are supported by both Application Load Balancers and Network Load Balancers:

  • deregistration_delay.timeout_seconds - The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused. The range is 0-3600 seconds. The default value is 300 seconds.

The following attributes are supported by only Application Load Balancers:

  • slow_start.duration_seconds - The time period, in seconds, during which a newly registered target receives a linearly increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). Slow start mode is disabled by default.

  • stickiness.enabled - Indicates whether sticky sessions are enabled. The value is true or false. The default is false.

  • stickiness.type - The type of sticky sessions. The possible value is lb_cookie.

  • stickiness.lb_cookie.duration_seconds - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).

The following attributes are supported by only Network Load Balancers:

  • proxy_protocol_v2.enabled - Indicates whether Proxy Protocol version 2 is enabled. The value is true or false. The default is false.

" }, "Value":{ "shape":"TargetGroupAttributeValue", @@ -2682,6 +2867,18 @@ "ip" ] }, + "TooManyActionsException":{ + "type":"structure", + "members":{ + }, + "documentation":"

You've reached the limit on the number of actions per rule.

", + "error":{ + "code":"TooManyActions", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "TooManyCertificatesException":{ "type":"structure", "members":{ diff --git a/botocore/data/emr/2009-03-31/service-2.json b/botocore/data/emr/2009-03-31/service-2.json index e59cfabf..ae2541e7 100644 --- a/botocore/data/emr/2009-03-31/service-2.json +++ b/botocore/data/emr/2009-03-31/service-2.json @@ -7,6 +7,7 @@ "protocol":"json", "serviceAbbreviation":"Amazon EMR", "serviceFullName":"Amazon Elastic MapReduce", + "serviceId":"EMR", "signatureVersion":"v4", "targetPrefix":"ElasticMapReduce", "timestampFormat":"unixTimestamp", diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index 148654f1..33325348 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -97,6 +97,15 @@ "us-west-2" : { } } }, + "api.mediatailor" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-west-1" : { }, + "us-east-1" : { } + } + }, "api.pricing" : { "defaults" : { "credentialScope" : { @@ -221,6 +230,7 @@ "endpoints" : { "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ca-central-1" : { }, @@ -338,6 +348,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-1" : { }, @@ -583,11 +594,14 @@ "endpoints" : { "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "eu-central-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, "us-east-1" : { }, + "us-east-2" : { }, "us-west-2" : { } } }, @@ -809,6 +823,7 @@ }, "elasticfilesystem" : { "endpoints" : { + "ap-northeast-2" : { }, "ap-southeast-2" : { }, "eu-central-1" : { }, "eu-west-1" : { }, @@ -937,8 +952,10 @@ "firehose" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-northeast-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ca-central-1" : { }, "eu-central-1" : { }, "eu-west-1" : { }, "us-east-1" : { }, @@ -952,6 +969,7 @@ "protocols" : [ "https" ] }, "endpoints" : { + "eu-west-1" : { }, "us-east-1" : { }, "us-west-2" : { } } @@ -998,11 +1016,13 @@ "glue" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-northeast-2" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "eu-central-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-2" : { } @@ -1098,6 +1118,7 @@ "endpoints" : { "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "eu-central-1" : { }, @@ -1249,9 +1270,12 @@ "medialive" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-northeast-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "eu-central-1" : { }, "eu-west-1" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-west-2" : { } } @@ -1270,6 +1294,16 @@ "us-west-2" : { } } }, + "mediastore" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-west-2" : { } + } + }, "metering.marketplace" : { "defaults" : { "credentialScope" : { @@ -1347,6 +1381,34 @@ }, "isRegionalized" : false }, + "neptune" : { + "endpoints" : { + "eu-west-1" : { + "credentialScope" : { + "region" : "eu-west-1" + }, + "hostname" : "rds.eu-west-1.amazonaws.com" + }, + "us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "rds.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "rds.us-east-2.amazonaws.com" + }, + "us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "rds.us-west-2.amazonaws.com" + } + } + }, "opsworks" : { "endpoints" : { "ap-northeast-1" : { }, @@ -1519,6 +1581,7 @@ }, "runtime.sagemaker" : { "endpoints" : { + "ap-northeast-1" : { }, "eu-west-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -1583,6 +1646,8 @@ }, "sagemaker" : { "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, "eu-west-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -1698,6 +1763,7 @@ "eu-west-1" : { }, "us-east-1" : { }, "us-east-2" : { }, + "us-west-1" : { }, "us-west-2" : { } } }, @@ -1786,6 +1852,30 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "sqs-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "sqs-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "sqs-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "sqs-fips.us-west-2.amazonaws.com" + }, "sa-east-1" : { }, "us-east-1" : { "sslCommonName" : "queue.{dnsSuffix}" @@ -1817,6 +1907,7 @@ "states" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-northeast-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ca-central-1" : { }, @@ -1825,6 +1916,7 @@ "eu-west-2" : { }, "us-east-1" : { }, "us-east-2" : { }, + "us-west-1" : { }, "us-west-2" : { } } }, @@ -2088,7 +2180,8 @@ "services" : { "apigateway" : { "endpoints" : { - "cn-north-1" : { } + "cn-north-1" : { }, + "cn-northwest-1" : { } } }, "application-autoscaling" : { @@ -2142,12 +2235,29 @@ "cn-northwest-1" : { } } }, + "data.iot" : { + "defaults" : { + "credentialScope" : { + "service" : "iotdata" + }, + "protocols" : [ "https" ] + }, + "endpoints" : { + "cn-north-1" : { } + } + }, "directconnect" : { "endpoints" : { "cn-north-1" : { }, "cn-northwest-1" : { } } }, + "ds" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "dynamodb" : { "defaults" : { "protocols" : [ "http", "https" ] @@ -2168,12 +2278,14 @@ }, "ecr" : { "endpoints" : { - "cn-north-1" : { } + "cn-north-1" : { }, + "cn-northwest-1" : { } } }, "ecs" : { "endpoints" : { - "cn-north-1" : { } + "cn-north-1" : { }, + "cn-northwest-1" : { } } }, "elasticache" : { @@ -2256,7 +2368,8 @@ }, "lambda" : { "endpoints" : { - "cn-north-1" : { } + "cn-north-1" : { }, + "cn-northwest-1" : { } } }, "logs" : { @@ -2525,6 +2638,11 @@ "isRegionalized" : false, "partitionEndpoint" : "aws-us-gov-global" }, + "inspector" : { + "endpoints" : { + "us-gov-west-1" : { } + } + }, "kinesis" : { "endpoints" : { "us-gov-west-1" : { } @@ -2662,6 +2780,14 @@ "endpoints" : { "us-gov-west-1" : { } } + }, + "translate" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "us-gov-west-1" : { } + } } } } ], diff --git a/botocore/data/firehose/2015-08-04/service-2.json b/botocore/data/firehose/2015-08-04/service-2.json index 93de30a4..1e1fef61 100644 --- a/botocore/data/firehose/2015-08-04/service-2.json +++ b/botocore/data/firehose/2015-08-04/service-2.json @@ -26,7 +26,7 @@ {"shape":"LimitExceededException"}, {"shape":"ResourceInUseException"} ], - "documentation":"

Creates a delivery stream.

By default, you can create up to 50 delivery streams per AWS Region.

This is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING. After the delivery stream is created, its status is ACTIVE and it now accepts data. Attempts to send data to a delivery stream that is not in the ACTIVE state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream.

A Kinesis Data Firehose delivery stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis data stream as its source. To specify a Kinesis data stream as input, set the DeliveryStreamType parameter to KinesisStreamAsSource, and provide the Kinesis data stream Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration parameter.

A delivery stream is configured with a single destination: Amazon S3, Amazon ES, Amazon Redshift, or Splunk. Specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration, S3DestinationConfiguration, ElasticsearchDestinationConfiguration, RedshiftDestinationConfiguration, or SplunkDestinationConfiguration.

When you specify S3DestinationConfiguration, you can also provide the following optional values: BufferingHints, EncryptionConfiguration, and CompressionFormat. By default, if no BufferingHints value is provided, Kinesis Data Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries are such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3.

A few notes about Amazon Redshift as a destination:

  • An Amazon Redshift destination requires an S3 bucket as intermediate location. This is because Kinesis Data Firehose first delivers data to Amazon S3 and then uses COPY syntax to load data into an Amazon Redshift table. This is specified in the RedshiftDestinationConfiguration.S3Configuration parameter.

  • The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift COPY operation that reads from the S3 bucket doesn't support these compression formats.

  • We strongly recommend that you use the user name and password that you provide exclusively with Kinesis Data Firehose. In addition, the permissions for the account should be restricted for Amazon Redshift INSERT permissions.

Kinesis Data Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Kinesis Data Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Grant Kinesis Firehose Access to an Amazon S3 Destination in the Amazon Kinesis Data Firehose Developer Guide.

" + "documentation":"

Creates a Kinesis Data Firehose delivery stream.

By default, you can create up to 50 delivery streams per AWS Region.

This is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING. After the delivery stream is created, its status is ACTIVE and it now accepts data. Attempts to send data to a delivery stream that is not in the ACTIVE state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream.

A Kinesis Data Firehose delivery stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the DeliveryStreamType parameter to KinesisStreamAsSource, and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration parameter.

A delivery stream is configured with a single destination: Amazon S3, Amazon ES, Amazon Redshift, or Splunk. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration, S3DestinationConfiguration, ElasticsearchDestinationConfiguration, RedshiftDestinationConfiguration, or SplunkDestinationConfiguration.

When you specify S3DestinationConfiguration, you can also provide the following optional values: BufferingHints, EncryptionConfiguration, and CompressionFormat. By default, if no BufferingHints value is provided, Kinesis Data Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3.

A few notes about Amazon Redshift as a destination:

  • An Amazon Redshift destination requires an S3 bucket as intermediate location. Kinesis Data Firehose first delivers data to Amazon S3 and then uses COPY syntax to load data into an Amazon Redshift table. This is specified in the RedshiftDestinationConfiguration.S3Configuration parameter.

  • The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift COPY operation that reads from the S3 bucket doesn't support these compression formats.

  • We strongly recommend that you use the user name and password you provide exclusively with Kinesis Data Firehose, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions.

Kinesis Data Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Kinesis Data Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination in the Amazon Kinesis Data Firehose Developer Guide.

" }, "DeleteDeliveryStream":{ "name":"DeleteDeliveryStream", @@ -40,7 +40,7 @@ {"shape":"ResourceInUseException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Deletes a delivery stream and its data.

You can delete a delivery stream only if it is in ACTIVE or DELETING state, and not in the CREATING state. While the deletion request is in process, the delivery stream is in the DELETING state.

To check the state of a delivery stream, use DescribeDeliveryStream.

While the delivery stream is DELETING state, the service may continue to accept the records, but the service doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, you should first stop any applications that are sending records before deleting a delivery stream.

" + "documentation":"

Deletes a delivery stream and its data.

You can delete a delivery stream only if it is in ACTIVE or DELETING state, and not in the CREATING state. While the deletion request is in process, the delivery stream is in the DELETING state.

To check the state of a delivery stream, use DescribeDeliveryStream.

While the delivery stream is DELETING state, the service might continue to accept the records, but it doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, you should first stop any applications that are sending records before deleting a delivery stream.

" }, "DescribeDeliveryStream":{ "name":"DescribeDeliveryStream", @@ -93,7 +93,7 @@ {"shape":"InvalidArgumentException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers.

By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. Note that if you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits.

You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on.

Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.

The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation.

If the PutRecord operation throws a ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.

Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

" + "documentation":"

Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers.

By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits.

You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on.

Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.

The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation.

If the PutRecord operation throws a ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.

Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

" }, "PutRecordBatch":{ "name":"PutRecordBatch", @@ -108,7 +108,7 @@ {"shape":"InvalidArgumentException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers.

By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits, see Amazon Kinesis Data Firehose Limits.

Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before 64-bit encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed.

You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on.

Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.

The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Kinesis Data Firehose attempts to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records.

A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailable or InternalFailure. ErrorMessage provides more detailed information about the error.

If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination.

If PutRecordBatch throws ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.

Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

" + "documentation":"

Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers.

By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits, see Amazon Kinesis Data Firehose Limits.

Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before 64-bit encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed.

You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on.

Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.

The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records.

A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailable or InternalFailure. ErrorMessage provides more detailed information about the error.

If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination.

If PutRecordBatch throws ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.

Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

" }, "TagDeliveryStream":{ "name":"TagDeliveryStream", @@ -156,7 +156,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

Updates the specified destination of the specified delivery stream.

Use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are usually effective within a few minutes.

Switching between Amazon ES and other services is not supported. For an Amazon ES destination, you can only update to another Amazon ES destination.

If the destination type is the same, Kinesis Data Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified, then the existing EncryptionConfiguration is maintained on the destination.

If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Kinesis Data Firehose does not merge any parameters. In this case, all parameters must be specified.

Kinesis Data Firehose uses CurrentDeliveryStreamVersionId to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and you can retrieve it using DescribeDeliveryStream. Use the new version ID to set CurrentDeliveryStreamVersionId in the next call.

" + "documentation":"

Updates the specified destination of the specified delivery stream.

Use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are usually effective within a few minutes.

Switching between Amazon ES and other services is not supported. For an Amazon ES destination, you can only update to another Amazon ES destination.

If the destination type is the same, Kinesis Data Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified, then the existing EncryptionConfiguration is maintained on the destination.

If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Kinesis Data Firehose does not merge any parameters. In this case, all parameters must be specified.

Kinesis Data Firehose uses CurrentDeliveryStreamVersionId to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using DescribeDeliveryStream. Use the new version ID to set CurrentDeliveryStreamVersionId in the next call.

" } }, "shapes":{ @@ -166,6 +166,10 @@ "min":1, "pattern":"arn:.*" }, + "BlockSizeBytes":{ + "type":"integer", + "min":67108864 + }, "BooleanObject":{"type":"boolean"}, "BucketARN":{ "type":"string", @@ -185,7 +189,7 @@ "documentation":"

Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.

" } }, - "documentation":"

Describes hints for the buffering to perform before delivering data to the destination. These options are treated as hints, and therefore Kinesis Data Firehose might choose to use different values whenever it is optimal.

" + "documentation":"

Describes hints for the buffering to perform before delivering data to the destination. These options are treated as hints, and therefore Kinesis Data Firehose might choose to use different values when it is optimal.

" }, "CloudWatchLoggingOptions":{ "type":"structure", @@ -210,6 +214,11 @@ "min":1, "pattern":"jdbc:(redshift|postgresql)://((?!-)[A-Za-z0-9-]{1,63}(?The name of the delivery stream. This name must be unique per AWS account in the same Region. If the delivery streams are in different accounts or different Regions, you can have multiple delivery streams with the same name.

" + "documentation":"

The name of the delivery stream. This name must be unique per AWS account in the same AWS Region. If the delivery streams are in different accounts or different Regions, you can have multiple delivery streams with the same name.

" }, "DeliveryStreamType":{ "shape":"DeliveryStreamType", @@ -303,6 +312,28 @@ "max":1024000, "min":0 }, + "DataFormatConversionConfiguration":{ + "type":"structure", + "members":{ + "SchemaConfiguration":{ + "shape":"SchemaConfiguration", + "documentation":"

Specifies the AWS Glue Data Catalog table that contains the column information.

" + }, + "InputFormatConfiguration":{ + "shape":"InputFormatConfiguration", + "documentation":"

Specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON.

" + }, + "OutputFormatConfiguration":{ + "shape":"OutputFormatConfiguration", + "documentation":"

Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format.

" + }, + "Enabled":{ + "shape":"BooleanObject", + "documentation":"

Defaults to true. Set it to false if you want to disable format conversion while preserving the configuration details.

" + } + }, + "documentation":"

Specifies that you want Kinesis Data Firehose to convert data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. Kinesis Data Firehose uses the serializer and deserializer that you specify, in addition to the column information from the AWS Glue table, to deserialize your input data from JSON and then serialize it to the Parquet or ORC format. For more information, see Kinesis Data Firehose Record Format Conversion.

" + }, "DataTableColumns":{"type":"string"}, "DataTableName":{ "type":"string", @@ -348,7 +379,7 @@ }, "DeliveryStreamARN":{ "shape":"DeliveryStreamARN", - "documentation":"

The Amazon Resource Name (ARN) of the delivery stream.

" + "documentation":"

The Amazon Resource Name (ARN) of the delivery stream. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" }, "DeliveryStreamStatus":{ "shape":"DeliveryStreamStatus", @@ -426,11 +457,11 @@ }, "Limit":{ "shape":"DescribeDeliveryStreamInputLimit", - "documentation":"

The limit on the number of destinations to return. Currently, you can have one destination per delivery stream.

" + "documentation":"

The limit on the number of destinations to return. You can have one destination per delivery stream.

" }, "ExclusiveStartDestinationId":{ "shape":"DestinationId", - "documentation":"

The ID of the destination to start returning the destination information. Currently, Kinesis Data Firehose supports one destination per delivery stream.

" + "documentation":"

The ID of the destination to start returning the destination information. Kinesis Data Firehose supports one destination per delivery stream.

" } } }, @@ -449,6 +480,20 @@ } } }, + "Deserializer":{ + "type":"structure", + "members":{ + "OpenXJsonSerDe":{ + "shape":"OpenXJsonSerDe", + "documentation":"

The OpenX SerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.

" + }, + "HiveJsonSerDe":{ + "shape":"HiveJsonSerDe", + "documentation":"

The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.

" + } + }, + "documentation":"

The deserializer you want Kinesis Data Firehose to use for converting the input data from JSON. Kinesis Data Firehose then serializes the data to its final format using the Serializer. Kinesis Data Firehose supports two types of deserializers: the Apache Hive JSON SerDe and the OpenX JSON SerDe.

" + }, "DestinationDescription":{ "type":"structure", "required":["DestinationId"], @@ -525,11 +570,11 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Kinesis Data Firehose Access to an Amazon Destination.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and AWS Service Namespaces.

" }, "DomainARN":{ "shape":"ElasticsearchDomainARN", - "documentation":"

The ARN of the Amazon ES domain. The IAM role must have permissions for DescribeElasticsearchDomain, DescribeElasticsearchDomains, and DescribeElasticsearchDomainConfig after assuming the role specified in RoleARN.

" + "documentation":"

The ARN of the Amazon ES domain. The IAM role must have permissions for DescribeElasticsearchDomain, DescribeElasticsearchDomains, and DescribeElasticsearchDomainConfig after assuming the role specified in RoleARN. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" }, "IndexName":{ "shape":"ElasticsearchIndexName", @@ -537,11 +582,11 @@ }, "TypeName":{ "shape":"ElasticsearchTypeName", - "documentation":"

The Elasticsearch type name.

" + "documentation":"

The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during run time.

" }, "IndexRotationPeriod":{ "shape":"ElasticsearchIndexRotationPeriod", - "documentation":"

The Elasticsearch index rotation period. Index rotation appends a time stamp to the IndexName to facilitate the expiration of old data. For more information, see Index Rotation for the Amazon ES Destination. The default value is OneDay.

" + "documentation":"

The Elasticsearch index rotation period. Index rotation appends a time stamp to the IndexName to facilitate the expiration of old data. For more information, see Index Rotation for the Amazon ES Destination. The default value is OneDay.

" }, "BufferingHints":{ "shape":"ElasticsearchBufferingHints", @@ -553,7 +598,7 @@ }, "S3BackupMode":{ "shape":"ElasticsearchS3BackupMode", - "documentation":"

Defines how documents should be delivered to Amazon S3. When set to FailedDocumentsOnly, Kinesis Data Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with elasticsearch-failed/ appended to the key prefix. When set to AllDocuments, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents with elasticsearch-failed/ appended to the prefix. For more information, see Data Delivery Failure Handling. Default value is FailedDocumentsOnly.

" + "documentation":"

Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Kinesis Data Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with elasticsearch-failed/ appended to the key prefix. When set to AllDocuments, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents with elasticsearch-failed/ appended to the prefix. For more information, see Amazon S3 Backup for the Amazon ES Destination. Default value is FailedDocumentsOnly.

" }, "S3Configuration":{ "shape":"S3DestinationConfiguration", @@ -565,7 +610,7 @@ }, "CloudWatchLoggingOptions":{ "shape":"CloudWatchLoggingOptions", - "documentation":"

The CloudWatch logging options for your delivery stream.

" + "documentation":"

The Amazon CloudWatch logging options for your delivery stream.

" } }, "documentation":"

Describes the configuration of a destination in Amazon ES.

" @@ -575,11 +620,11 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of the AWS credentials.

" + "documentation":"

The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" }, "DomainARN":{ "shape":"ElasticsearchDomainARN", - "documentation":"

The ARN of the Amazon ES domain.

" + "documentation":"

The ARN of the Amazon ES domain. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" }, "IndexName":{ "shape":"ElasticsearchIndexName", @@ -615,7 +660,7 @@ }, "CloudWatchLoggingOptions":{ "shape":"CloudWatchLoggingOptions", - "documentation":"

The CloudWatch logging options.

" + "documentation":"

The Amazon CloudWatch logging options.

" } }, "documentation":"

The destination description in Amazon ES.

" @@ -625,11 +670,11 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and AWS Service Namespaces.

" }, "DomainARN":{ "shape":"ElasticsearchDomainARN", - "documentation":"

The ARN of the Amazon ES domain. The IAM role must have permissions for DescribeElasticsearchDomain, DescribeElasticsearchDomains, and DescribeElasticsearchDomainConfig after assuming the IAM role specified in RoleARN.

" + "documentation":"

The ARN of the Amazon ES domain. The IAM role must have permissions for DescribeElasticsearchDomain, DescribeElasticsearchDomains, and DescribeElasticsearchDomainConfig after assuming the IAM role specified in RoleARN. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" }, "IndexName":{ "shape":"ElasticsearchIndexName", @@ -637,15 +682,15 @@ }, "TypeName":{ "shape":"ElasticsearchTypeName", - "documentation":"

The Elasticsearch type name.

" + "documentation":"

The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during runtime.

" }, "IndexRotationPeriod":{ "shape":"ElasticsearchIndexRotationPeriod", - "documentation":"

The Elasticsearch index rotation period. Index rotation appends a time stamp to IndexName to facilitate the expiration of old data. For more information, see Index Rotation for the Amazon ES Destination. Default value is OneDay.

" + "documentation":"

The Elasticsearch index rotation period. Index rotation appends a time stamp to IndexName to facilitate the expiration of old data. For more information, see Index Rotation for the Amazon ES Destination. Default value is OneDay.

" }, "BufferingHints":{ "shape":"ElasticsearchBufferingHints", - "documentation":"

The buffering options. If no value is specified, ElasticsearchBufferingHints object default values are used.

" + "documentation":"

The buffering options. If no value is specified, ElasticsearchBufferingHints object default values are used.

" }, "RetryOptions":{ "shape":"ElasticsearchRetryOptions", @@ -697,7 +742,7 @@ "members":{ "DurationInSeconds":{ "shape":"ElasticsearchRetryDurationInSeconds", - "documentation":"

After an initial failure to deliver to Amazon ES, the total amount of time during which Kinesis Data Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.

" + "documentation":"

After an initial failure to deliver to Amazon ES, the total amount of time during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.

" } }, "documentation":"

Configures retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon ES.

" @@ -739,11 +784,11 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of the AWS credentials.

" + "documentation":"

The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" }, "BucketARN":{ "shape":"BucketARN", - "documentation":"

The ARN of the S3 bucket.

" + "documentation":"

The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" }, "Prefix":{ "shape":"Prefix", @@ -763,7 +808,7 @@ }, "CloudWatchLoggingOptions":{ "shape":"CloudWatchLoggingOptions", - "documentation":"

The CloudWatch logging options for your delivery stream.

" + "documentation":"

The Amazon CloudWatch logging options for your delivery stream.

" }, "ProcessingConfiguration":{ "shape":"ProcessingConfiguration", @@ -776,6 +821,10 @@ "S3BackupConfiguration":{ "shape":"S3DestinationConfiguration", "documentation":"

The configuration for backup in Amazon S3.

" + }, + "DataFormatConversionConfiguration":{ + "shape":"DataFormatConversionConfiguration", + "documentation":"

The serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3.

" } }, "documentation":"

Describes the configuration of a destination in Amazon S3.

" @@ -792,15 +841,15 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of the AWS credentials.

" + "documentation":"

The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" }, "BucketARN":{ "shape":"BucketARN", - "documentation":"

The ARN of the S3 bucket.

" + "documentation":"

The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" }, "Prefix":{ "shape":"Prefix", - "documentation":"

The \"YYYY/MM/DD/HH\" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. If the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Data Firehose Developer Guide.

" + "documentation":"

The \"YYYY/MM/DD/HH\" time format prefix is automatically used for delivered Amazon S3 files. You can specify an extra prefix to be added in front of the time format prefix. If the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Data Firehose Developer Guide.

" }, "BufferingHints":{ "shape":"BufferingHints", @@ -816,7 +865,7 @@ }, "CloudWatchLoggingOptions":{ "shape":"CloudWatchLoggingOptions", - "documentation":"

The CloudWatch logging options for your delivery stream.

" + "documentation":"

The Amazon CloudWatch logging options for your delivery stream.

" }, "ProcessingConfiguration":{ "shape":"ProcessingConfiguration", @@ -829,6 +878,10 @@ "S3BackupDescription":{ "shape":"S3DestinationDescription", "documentation":"

The configuration for backup in Amazon S3.

" + }, + "DataFormatConversionConfiguration":{ + "shape":"DataFormatConversionConfiguration", + "documentation":"

The serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3.

" } }, "documentation":"

Describes a destination in Amazon S3.

" @@ -838,11 +891,11 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of the AWS credentials.

" + "documentation":"

The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" }, "BucketARN":{ "shape":"BucketARN", - "documentation":"

The ARN of the S3 bucket.

" + "documentation":"

The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" }, "Prefix":{ "shape":"Prefix", @@ -862,7 +915,7 @@ }, "CloudWatchLoggingOptions":{ "shape":"CloudWatchLoggingOptions", - "documentation":"

The CloudWatch logging options for your delivery stream.

" + "documentation":"

The Amazon CloudWatch logging options for your delivery stream.

" }, "ProcessingConfiguration":{ "shape":"ProcessingConfiguration", @@ -875,6 +928,10 @@ "S3BackupUpdate":{ "shape":"S3DestinationUpdate", "documentation":"

The Amazon S3 destination for backup.

" + }, + "DataFormatConversionConfiguration":{ + "shape":"DataFormatConversionConfiguration", + "documentation":"

The serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3.

" } }, "documentation":"

Describes an update for a destination in Amazon S3.

" @@ -893,6 +950,26 @@ ] }, "HECToken":{"type":"string"}, + "HiveJsonSerDe":{ + "type":"structure", + "members":{ + "TimestampFormats":{ + "shape":"ListOfNonEmptyStrings", + "documentation":"

Indicates how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse time stamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.

" + } + }, + "documentation":"

The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.

" + }, + "InputFormatConfiguration":{ + "type":"structure", + "members":{ + "Deserializer":{ + "shape":"Deserializer", + "documentation":"

Specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. If both are non-null, the server rejects the request.

" + } + }, + "documentation":"

Specifies the deserializer you want to use to convert the format of the input data.

" + }, "IntervalInSeconds":{ "type":"integer", "max":900, @@ -915,7 +992,7 @@ "members":{ "AWSKMSKeyARN":{ "shape":"AWSKMSKeyARN", - "documentation":"

The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket.

" + "documentation":"

The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" } }, "documentation":"

Describes an encryption key for a destination in Amazon S3.

" @@ -935,11 +1012,11 @@ "members":{ "KinesisStreamARN":{ "shape":"KinesisStreamARN", - "documentation":"

The ARN of the source Kinesis data stream.

" + "documentation":"

The ARN of the source Kinesis data stream. For more information, see Amazon Kinesis Data Streams ARN Format.

" }, "RoleARN":{ "shape":"RoleARN", - "documentation":"

The ARN of the role that provides access to the source Kinesis data stream.

" + "documentation":"

The ARN of the role that provides access to the source Kinesis data stream. For more information, see AWS Identity and Access Management (IAM) ARN Format.

" } }, "documentation":"

The stream and role Amazon Resource Names (ARNs) for a Kinesis data stream used as the source for a delivery stream.

" @@ -949,11 +1026,11 @@ "members":{ "KinesisStreamARN":{ "shape":"KinesisStreamARN", - "documentation":"

The Amazon Resource Name (ARN) of the source Kinesis data stream.

" + "documentation":"

The Amazon Resource Name (ARN) of the source Kinesis data stream. For more information, see Amazon Kinesis Data Streams ARN Format.

" }, "RoleARN":{ "shape":"RoleARN", - "documentation":"

The ARN of the role used by the source Kinesis data stream.

" + "documentation":"

The ARN of the role used by the source Kinesis data stream. For more information, see AWS Identity and Access Management (IAM) ARN Format.

" }, "DeliveryStartTimestamp":{ "shape":"DeliveryStartTimestamp", @@ -1012,6 +1089,14 @@ } } }, + "ListOfNonEmptyStrings":{ + "type":"list", + "member":{"shape":"NonEmptyString"} + }, + "ListOfNonEmptyStringsWithoutWhitespace":{ + "type":"list", + "member":{"shape":"NonEmptyStringWithoutWhitespace"} + }, "ListTagsForDeliveryStreamInput":{ "type":"structure", "required":["DeliveryStreamName"], @@ -1064,10 +1149,164 @@ "type":"string", "enum":["NoEncryption"] }, + "NonEmptyString":{ + "type":"string", + "pattern":"^(?!\\s*$).+" + }, + "NonEmptyStringWithoutWhitespace":{ + "type":"string", + "pattern":"^\\S+$" + }, "NonNegativeIntegerObject":{ "type":"integer", "min":0 }, + "OpenXJsonSerDe":{ + "type":"structure", + "members":{ + "ConvertDotsInJsonKeysToUnderscores":{ + "shape":"BooleanObject", + "documentation":"

When set to true, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is \"a.b\", you can define the column name to be \"a_b\" when using this option.

The default is false.

" + }, + "CaseInsensitive":{ + "shape":"BooleanObject", + "documentation":"

When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.

" + }, + "ColumnToJsonKeyMappings":{ + "shape":"ColumnToJsonKeyMappings", + "documentation":"

Maps column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to {\"ts\": \"timestamp\"} to map this key to a column named ts.

" + } + }, + "documentation":"

The OpenX SerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.

" + }, + "OrcCompression":{ + "type":"string", + "enum":[ + "NONE", + "ZLIB", + "SNAPPY" + ] + }, + "OrcFormatVersion":{ + "type":"string", + "enum":[ + "V0_11", + "V0_12" + ] + }, + "OrcRowIndexStride":{ + "type":"integer", + "min":1000 + }, + "OrcSerDe":{ + "type":"structure", + "members":{ + "StripeSizeBytes":{ + "shape":"OrcStripeSizeBytes", + "documentation":"

The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.

" + }, + "BlockSizeBytes":{ + "shape":"BlockSizeBytes", + "documentation":"

The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.

" + }, + "RowIndexStride":{ + "shape":"OrcRowIndexStride", + "documentation":"

The number of rows between index entries. The default is 10,000 and the minimum is 1,000.

" + }, + "EnablePadding":{ + "shape":"BooleanObject", + "documentation":"

Set this to true to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is false.

" + }, + "PaddingTolerance":{ + "shape":"Proportion", + "documentation":"

A number between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size.

For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task.

Kinesis Data Firehose ignores this parameter when OrcSerDe$EnablePadding is false.

" + }, + "Compression":{ + "shape":"OrcCompression", + "documentation":"

The compression code to use over data blocks. The default is SNAPPY.

" + }, + "BloomFilterColumns":{ + "shape":"ListOfNonEmptyStringsWithoutWhitespace", + "documentation":"

The column names for which you want Kinesis Data Firehose to create bloom filters. The default is null.

" + }, + "BloomFilterFalsePositiveProbability":{ + "shape":"Proportion", + "documentation":"

The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is 0.05, the minimum is 0, and the maximum is 1.

" + }, + "DictionaryKeyThreshold":{ + "shape":"Proportion", + "documentation":"

Represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to 1.

" + }, + "FormatVersion":{ + "shape":"OrcFormatVersion", + "documentation":"

The version of the file to write. The possible values are V0_11 and V0_12. The default is V0_12.

" + } + }, + "documentation":"

A serializer to use for converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC.

" + }, + "OrcStripeSizeBytes":{ + "type":"integer", + "min":8388608 + }, + "OutputFormatConfiguration":{ + "type":"structure", + "members":{ + "Serializer":{ + "shape":"Serializer", + "documentation":"

Specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. If both are non-null, the server rejects the request.

" + } + }, + "documentation":"

Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data before it writes it to Amazon S3.

" + }, + "ParquetCompression":{ + "type":"string", + "enum":[ + "UNCOMPRESSED", + "GZIP", + "SNAPPY" + ] + }, + "ParquetPageSizeBytes":{ + "type":"integer", + "min":65536 + }, + "ParquetSerDe":{ + "type":"structure", + "members":{ + "BlockSizeBytes":{ + "shape":"BlockSizeBytes", + "documentation":"

The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.

" + }, + "PageSizeBytes":{ + "shape":"ParquetPageSizeBytes", + "documentation":"

The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.

" + }, + "Compression":{ + "shape":"ParquetCompression", + "documentation":"

The compression code to use over data blocks. The possible values are UNCOMPRESSED, SNAPPY, and GZIP, with the default being SNAPPY. Use SNAPPY for higher decompression speed. Use GZIP if the compression ration is more important than speed.

" + }, + "EnableDictionaryCompression":{ + "shape":"BooleanObject", + "documentation":"

Indicates whether to enable dictionary compression.

" + }, + "MaxPaddingBytes":{ + "shape":"NonNegativeIntegerObject", + "documentation":"

The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 0.

" + }, + "WriterVersion":{ + "shape":"ParquetWriterVersion", + "documentation":"

Indicates the version of row format to output. The possible values are V1 and V2. The default is V1.

" + } + }, + "documentation":"

A serializer to use for converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet.

" + }, + "ParquetWriterVersion":{ + "type":"string", + "enum":[ + "V1", + "V2" + ] + }, "Password":{ "type":"string", "min":6, @@ -1148,6 +1387,11 @@ "type":"string", "enum":["Lambda"] }, + "Proportion":{ + "type":"double", + "max":1, + "min":0 + }, "PutRecordBatchInput":{ "type":"structure", "required":[ @@ -1267,7 +1511,7 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of the AWS credentials.

" + "documentation":"

The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" }, "ClusterJDBCURL":{ "shape":"ClusterJDBCURL", @@ -1324,7 +1568,7 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of the AWS credentials.

" + "documentation":"

The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" }, "ClusterJDBCURL":{ "shape":"ClusterJDBCURL", @@ -1360,7 +1604,7 @@ }, "CloudWatchLoggingOptions":{ "shape":"CloudWatchLoggingOptions", - "documentation":"

The CloudWatch logging options for your delivery stream.

" + "documentation":"

The Amazon CloudWatch logging options for your delivery stream.

" } }, "documentation":"

Describes a destination in Amazon Redshift.

" @@ -1370,7 +1614,7 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of the AWS credentials.

" + "documentation":"

The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" }, "ClusterJDBCURL":{ "shape":"ClusterJDBCURL", @@ -1410,7 +1654,7 @@ }, "CloudWatchLoggingOptions":{ "shape":"CloudWatchLoggingOptions", - "documentation":"

The CloudWatch logging options for your delivery stream.

" + "documentation":"

The Amazon CloudWatch logging options for your delivery stream.

" } }, "documentation":"

Describes an update for a destination in Amazon Redshift.

" @@ -1481,11 +1725,11 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of the AWS credentials.

" + "documentation":"

The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" }, "BucketARN":{ "shape":"BucketARN", - "documentation":"

The ARN of the S3 bucket.

" + "documentation":"

The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" }, "Prefix":{ "shape":"Prefix", @@ -1522,11 +1766,11 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of the AWS credentials.

" + "documentation":"

The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" }, "BucketARN":{ "shape":"BucketARN", - "documentation":"

The ARN of the S3 bucket.

" + "documentation":"

The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" }, "Prefix":{ "shape":"Prefix", @@ -1546,7 +1790,7 @@ }, "CloudWatchLoggingOptions":{ "shape":"CloudWatchLoggingOptions", - "documentation":"

The CloudWatch logging options for your delivery stream.

" + "documentation":"

The Amazon CloudWatch logging options for your delivery stream.

" } }, "documentation":"

Describes a destination in Amazon S3.

" @@ -1556,11 +1800,11 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of the AWS credentials.

" + "documentation":"

The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" }, "BucketARN":{ "shape":"BucketARN", - "documentation":"

The ARN of the S3 bucket.

" + "documentation":"

The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" }, "Prefix":{ "shape":"Prefix", @@ -1585,6 +1829,50 @@ }, "documentation":"

Describes an update for a destination in Amazon S3.

" }, + "SchemaConfiguration":{ + "type":"structure", + "members":{ + "RoleARN":{ + "shape":"NonEmptyStringWithoutWhitespace", + "documentation":"

The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.

" + }, + "CatalogId":{ + "shape":"NonEmptyStringWithoutWhitespace", + "documentation":"

The ID of the AWS Glue Data Catalog. If you don't supply this, the AWS account ID is used by default.

" + }, + "DatabaseName":{ + "shape":"NonEmptyStringWithoutWhitespace", + "documentation":"

Specifies the name of the AWS Glue database that contains the schema for the output data.

" + }, + "TableName":{ + "shape":"NonEmptyStringWithoutWhitespace", + "documentation":"

Specifies the AWS Glue table that contains the column information that constitutes your data schema.

" + }, + "Region":{ + "shape":"NonEmptyStringWithoutWhitespace", + "documentation":"

If you don't specify an AWS Region, the default is the current Region.

" + }, + "VersionId":{ + "shape":"NonEmptyStringWithoutWhitespace", + "documentation":"

Specifies the table version for the output data schema. If you don't specify this version ID, or if you set it to LATEST, Kinesis Data Firehose uses the most recent version. This means that any updates to the table are automatically picked up.

" + } + }, + "documentation":"

Specifies the schema to which you want Kinesis Data Firehose to configure your data before it writes it to Amazon S3.

" + }, + "Serializer":{ + "type":"structure", + "members":{ + "ParquetSerDe":{ + "shape":"ParquetSerDe", + "documentation":"

A serializer to use for converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet.

" + }, + "OrcSerDe":{ + "shape":"OrcSerDe", + "documentation":"

A serializer to use for converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC.

" + } + }, + "documentation":"

The serializer that you want Kinesis Data Firehose to use to convert data to the target format before writing it to Amazon S3. Kinesis Data Firehose supports two types of serializers: the ORC SerDe and the Parquet SerDe.

" + }, "ServiceUnavailableException":{ "type":"structure", "members":{ @@ -1655,7 +1943,7 @@ }, "CloudWatchLoggingOptions":{ "shape":"CloudWatchLoggingOptions", - "documentation":"

The CloudWatch logging options for your delivery stream.

" + "documentation":"

The Amazon CloudWatch logging options for your delivery stream.

" } }, "documentation":"

Describes the configuration of a destination in Splunk.

" @@ -1673,7 +1961,7 @@ }, "HECToken":{ "shape":"HECToken", - "documentation":"

This is a GUID you obtain from your Splunk cluster when you create a new HEC endpoint.

" + "documentation":"

A GUID you obtain from your Splunk cluster when you create a new HEC endpoint.

" }, "HECAcknowledgmentTimeoutInSeconds":{ "shape":"HECAcknowledgmentTimeoutInSeconds", @@ -1697,7 +1985,7 @@ }, "CloudWatchLoggingOptions":{ "shape":"CloudWatchLoggingOptions", - "documentation":"

The CloudWatch logging options for your delivery stream.

" + "documentation":"

The Amazon CloudWatch logging options for your delivery stream.

" } }, "documentation":"

Describes a destination in Splunk.

" @@ -1739,7 +2027,7 @@ }, "CloudWatchLoggingOptions":{ "shape":"CloudWatchLoggingOptions", - "documentation":"

The CloudWatch logging options for your delivery stream.

" + "documentation":"

The Amazon CloudWatch logging options for your delivery stream.

" } }, "documentation":"

Describes an update for a destination in Splunk.

" @@ -1862,7 +2150,7 @@ }, "CurrentDeliveryStreamVersionId":{ "shape":"DeliveryStreamVersionId", - "documentation":"

Obtain this value from the VersionId result of DeliveryStreamDescription. This value is required, and it helps the service perform conditional operations. For example, if there is an interleaving update and this value is null, then the update destination fails. After the update is successful, the VersionId value is updated. The service then performs a merge of the old configuration with the new configuration.

" + "documentation":"

Obtain this value from the VersionId result of DeliveryStreamDescription. This value is required, and helps the service perform conditional operations. For example, if there is an interleaving update and this value is null, then the update destination fails. After the update is successful, the VersionId value is updated. The service then performs a merge of the old configuration with the new configuration.

" }, "DestinationId":{ "shape":"DestinationId", diff --git a/botocore/data/gamelift/2015-10-01/service-2.json b/botocore/data/gamelift/2015-10-01/service-2.json index 8d085da3..4b399380 100644 --- a/botocore/data/gamelift/2015-10-01/service-2.json +++ b/botocore/data/gamelift/2015-10-01/service-2.json @@ -6,6 +6,7 @@ "jsonVersion":"1.1", "protocol":"json", "serviceFullName":"Amazon GameLift", + "serviceId":"GameLift", "signatureVersion":"v4", "targetPrefix":"GameLift", "uid":"gamelift-2015-10-01" @@ -76,7 +77,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Creates a new fleet to run your game servers. A fleet is a set of Amazon Elastic Compute Cloud (Amazon EC2) instances, each of which can run multiple server processes to host game sessions. You set up a fleet to use instances with certain hardware specifications (see Amazon EC2 Instance Types for more information), and deploy your game build to run on each instance.

To create a new fleet, you must specify the following: (1) a fleet name, (2) the build ID of a successfully uploaded game build, (3) an EC2 instance type, and (4) a run-time configuration, which describes the server processes to run on each instance in the fleet. If you don't specify a fleet type (on-demand or spot), the new fleet uses on-demand instances by default.

You can also configure the new fleet with the following settings:

  • Fleet description

  • Access permissions for inbound traffic

  • Fleet-wide game session protection

  • Resource usage limits

If you use Amazon CloudWatch for metrics, you can add the new fleet to a metric group. By adding multiple fleets to a metric group, you can view aggregated metrics for all the fleets in the group.

If the CreateFleet call is successful, Amazon GameLift performs the following tasks. You can track the process of a fleet by checking the fleet status or by monitoring fleet creation events:

  • Creates a fleet record. Status: NEW.

  • Begins writing events to the fleet event log, which can be accessed in the Amazon GameLift console.

    Sets the fleet's target capacity to 1 (desired instances), which triggers Amazon GameLift to start one new EC2 instance.

  • Downloads the game build to the new instance and installs it. Statuses: DOWNLOADING, VALIDATING, BUILDING.

  • Starts launching server processes on the instance. If the fleet is configured to run multiple server processes per instance, Amazon GameLift staggers each launch by a few seconds. Status: ACTIVATING.

  • Sets the fleet's status to ACTIVE as soon as one server process is ready to host a game session.

Fleet-related operations include:

" + "documentation":"

Creates a new fleet to run your game servers. A fleet is a set of Amazon Elastic Compute Cloud (Amazon EC2) instances, each of which can run multiple server processes to host game sessions. You set up a fleet to use instances with certain hardware specifications (see Amazon EC2 Instance Types for more information), and deploy your game build to run on each instance.

To create a new fleet, you must specify the following: (1) a fleet name, (2) the build ID of a successfully uploaded game build, (3) an EC2 instance type, and (4) a run-time configuration, which describes the server processes to run on each instance in the fleet. If you don't specify a fleet type (on-demand or spot), the new fleet uses on-demand instances by default.

You can also configure the new fleet with the following settings:

  • Fleet description

  • Access permissions for inbound traffic

  • Fleet-wide game session protection

  • Resource usage limits

If you use Amazon CloudWatch for metrics, you can add the new fleet to a metric group. By adding multiple fleets to a metric group, you can view aggregated metrics for all the fleets in the group.

If the CreateFleet call is successful, Amazon GameLift performs the following tasks. You can track the process of a fleet by checking the fleet status or by monitoring fleet creation events:

  • Creates a fleet record. Status: NEW.

  • Begins writing events to the fleet event log, which can be accessed in the Amazon GameLift console.

    Sets the fleet's target capacity to 1 (desired instances), which triggers Amazon GameLift to start one new EC2 instance.

  • Downloads the game build to the new instance and installs it. Statuses: DOWNLOADING, VALIDATING, BUILDING.

  • Starts launching server processes on the instance. If the fleet is configured to run multiple server processes per instance, Amazon GameLift staggers each launch by a few seconds. Status: ACTIVATING.

  • Sets the fleet's status to ACTIVE as soon as one server process is ready to host a game session.

Fleet-related operations include:

" }, "CreateGameSession":{ "name":"CreateGameSession", @@ -262,7 +263,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Deletes everything related to a fleet. Before deleting a fleet, you must set the fleet's desired capacity to zero. See UpdateFleetCapacity.

This action removes the fleet's resources and the fleet record. Once a fleet is deleted, you can no longer use that fleet.

Fleet-related operations include:

" + "documentation":"

Deletes everything related to a fleet. Before deleting a fleet, you must set the fleet's desired capacity to zero. See UpdateFleetCapacity.

This action removes the fleet's resources and the fleet record. Once a fleet is deleted, you can no longer use that fleet.

Fleet-related operations include:

" }, "DeleteGameSessionQueue":{ "name":"DeleteGameSessionQueue", @@ -309,7 +310,7 @@ {"shape":"UnauthorizedException"}, {"shape":"NotFoundException"} ], - "documentation":"

Deletes a fleet scaling policy. This action means that the policy is no longer in force and removes all record of it. To delete a scaling policy, specify both the scaling policy name and the fleet ID it is associated with.

Fleet-related operations include:

" + "documentation":"

Deletes a fleet scaling policy. This action means that the policy is no longer in force and removes all record of it. To delete a scaling policy, specify both the scaling policy name and the fleet ID it is associated with.

To temporarily suspend scaling policies, call StopFleetActions. This operation suspends all policies for the fleet.

Operations related to fleet capacity scaling include:

" }, "DeleteVpcPeeringAuthorization":{ "name":"DeleteVpcPeeringAuthorization", @@ -388,7 +389,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves the following information for the specified EC2 instance type:

  • maximum number of instances allowed per AWS account (service limit)

  • current usage level for the AWS account

Service limits vary depending on region. Available regions for Amazon GameLift can be found in the AWS Management Console for Amazon GameLift (see the drop-down list in the upper right corner).

Fleet-related operations include:

" + "documentation":"

Retrieves the following information for the specified EC2 instance type:

  • maximum number of instances allowed per AWS account (service limit)

  • current usage level for the AWS account

Service limits vary depending on region. Available regions for Amazon GameLift can be found in the AWS Management Console for Amazon GameLift (see the drop-down list in the upper right corner).

Fleet-related operations include:

" }, "DescribeFleetAttributes":{ "name":"DescribeFleetAttributes", @@ -404,7 +405,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves fleet properties, including metadata, status, and configuration, for one or more fleets. You can request attributes for all fleets, or specify a list of one or more fleet IDs. When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetAttributes object is returned for each requested fleet ID. When specifying a list of fleet IDs, attribute objects are returned only for fleets that currently exist.

Some API actions may limit the number of fleet IDs allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed.

Fleet-related operations include:

" + "documentation":"

Retrieves fleet properties, including metadata, status, and configuration, for one or more fleets. You can request attributes for all fleets, or specify a list of one or more fleet IDs. When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetAttributes object is returned for each requested fleet ID. When specifying a list of fleet IDs, attribute objects are returned only for fleets that currently exist.

Some API actions may limit the number of fleet IDs allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed.

Fleet-related operations include:

" }, "DescribeFleetCapacity":{ "name":"DescribeFleetCapacity", @@ -420,7 +421,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves the current status of fleet capacity for one or more fleets. This information includes the number of instances that have been requested for the fleet and the number currently active. You can request capacity for all fleets, or specify a list of one or more fleet IDs. When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetCapacity object is returned for each requested fleet ID. When specifying a list of fleet IDs, attribute objects are returned only for fleets that currently exist.

Some API actions may limit the number of fleet IDs allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed.

Fleet-related operations include:

" + "documentation":"

Retrieves the current status of fleet capacity for one or more fleets. This information includes the number of instances that have been requested for the fleet and the number currently active. You can request capacity for all fleets, or specify a list of one or more fleet IDs. When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetCapacity object is returned for each requested fleet ID. When specifying a list of fleet IDs, attribute objects are returned only for fleets that currently exist.

Some API actions may limit the number of fleet IDs allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed.

Fleet-related operations include:

" }, "DescribeFleetEvents":{ "name":"DescribeFleetEvents", @@ -436,7 +437,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Retrieves entries from the specified fleet's event log. You can specify a time range to limit the result set. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, a collection of event log entries matching the request are returned.

Fleet-related operations include:

" + "documentation":"

Retrieves entries from the specified fleet's event log. You can specify a time range to limit the result set. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, a collection of event log entries matching the request are returned.

Fleet-related operations include:

" }, "DescribeFleetPortSettings":{ "name":"DescribeFleetPortSettings", @@ -452,7 +453,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves the inbound connection permissions for a fleet. Connection permissions include a range of IP addresses and port settings that incoming traffic can use to access server processes in the fleet. To get a fleet's inbound connection permissions, specify a fleet ID. If successful, a collection of IpPermission objects is returned for the requested fleet ID. If the requested fleet has been deleted, the result set is empty.

Fleet-related operations include:

" + "documentation":"

Retrieves the inbound connection permissions for a fleet. Connection permissions include a range of IP addresses and port settings that incoming traffic can use to access server processes in the fleet. To get a fleet's inbound connection permissions, specify a fleet ID. If successful, a collection of IpPermission objects is returned for the requested fleet ID. If the requested fleet has been deleted, the result set is empty.

Fleet-related operations include:

" }, "DescribeFleetUtilization":{ "name":"DescribeFleetUtilization", @@ -468,7 +469,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves utilization statistics for one or more fleets. You can request utilization data for all fleets, or specify a list of one or more fleet IDs. When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetUtilization object is returned for each requested fleet ID. When specifying a list of fleet IDs, utilization objects are returned only for fleets that currently exist.

Some API actions may limit the number of fleet IDs allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed.

Fleet-related operations include:

" + "documentation":"

Retrieves utilization statistics for one or more fleets. You can request utilization data for all fleets, or specify a list of one or more fleet IDs. When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetUtilization object is returned for each requested fleet ID. When specifying a list of fleet IDs, utilization objects are returned only for fleets that currently exist.

Some API actions may limit the number of fleet IDs allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed.

Fleet-related operations include:

" }, "DescribeGameSessionDetails":{ "name":"DescribeGameSessionDetails", @@ -628,7 +629,7 @@ {"shape":"InternalServiceException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Retrieves the current run-time configuration for the specified fleet. The run-time configuration tells Amazon GameLift how to launch server processes on instances in the fleet.

Fleet-related operations include:

" + "documentation":"

Retrieves the current run-time configuration for the specified fleet. The run-time configuration tells Amazon GameLift how to launch server processes on instances in the fleet.

Fleet-related operations include:

" }, "DescribeScalingPolicies":{ "name":"DescribeScalingPolicies", @@ -644,7 +645,7 @@ {"shape":"UnauthorizedException"}, {"shape":"NotFoundException"} ], - "documentation":"

Retrieves all scaling policies applied to a fleet.

To get a fleet's scaling policies, specify the fleet ID. You can filter this request by policy status, such as to retrieve only active scaling policies. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, set of ScalingPolicy objects is returned for the fleet.

Fleet-related operations include:

" + "documentation":"

Retrieves all scaling policies applied to a fleet.

To get a fleet's scaling policies, specify the fleet ID. You can filter this request by policy status, such as to retrieve only active scaling policies. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, set of ScalingPolicy objects is returned for the fleet.

A fleet may have all of its scaling policies suspended (StopFleetActions). This action does not affect the status of the scaling policies, which remains ACTIVE. To see whether a fleet's scaling policies are in force or suspended, call DescribeFleetAttributes and check the stopped actions.

Operations related to fleet capacity scaling include:

" }, "DescribeVpcPeeringAuthorizations":{ "name":"DescribeVpcPeeringAuthorizations", @@ -753,7 +754,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves a collection of fleet records for this AWS account. You can filter the result set by build ID. Use the pagination parameters to retrieve results in sequential pages.

Fleet records are not listed in any particular order.

Fleet-related operations include:

" + "documentation":"

Retrieves a collection of fleet records for this AWS account. You can filter the result set by build ID. Use the pagination parameters to retrieve results in sequential pages.

Fleet records are not listed in any particular order.

Fleet-related operations include:

" }, "PutScalingPolicy":{ "name":"PutScalingPolicy", @@ -769,7 +770,7 @@ {"shape":"UnauthorizedException"}, {"shape":"NotFoundException"} ], - "documentation":"

Creates or updates a scaling policy for a fleet. An active scaling policy prompts Amazon GameLift to track a certain metric for a fleet and automatically change the fleet's capacity in specific circumstances. Each scaling policy contains one rule statement. Fleets can have multiple scaling policies in force simultaneously.

A scaling policy rule statement has the following structure:

If [MetricName] is [ComparisonOperator] [Threshold] for [EvaluationPeriods] minutes, then [ScalingAdjustmentType] to/by [ScalingAdjustment].

For example, this policy: \"If the number of idle instances exceeds 20 for more than 15 minutes, then reduce the fleet capacity by 10 instances\" could be implemented as the following rule statement:

If [IdleInstances] is [GreaterThanOrEqualToThreshold] [20] for [15] minutes, then [ChangeInCapacity] by [-10].

To create or update a scaling policy, specify a unique combination of name and fleet ID, and set the rule values. All parameters for this action are required. If successful, the policy name is returned. Scaling policies cannot be suspended or made inactive. To stop enforcing a scaling policy, call DeleteScalingPolicy.

Fleet-related operations include:

" + "documentation":"

Creates or updates a scaling policy for a fleet. Scaling policies are used to automatically scale a fleet's hosting capacity to meet player demand. An active scaling policy instructs Amazon GameLift to track a fleet metric and automatically change the fleet's capacity when a certain threshold is reached. There are two types of scaling policies: target-based and rule-based. Use a target-based policy to quickly and efficiently manage fleet scaling; this option is the most commonly used. Use rule-based policies when you need to exert fine-grained control over auto-scaling.

Fleets can have multiple scaling policies of each type in force at the same time; you can have one target-based policy, one or multiple rule-based scaling policies, or both. We recommend caution, however, because multiple auto-scaling policies can have unintended consequences.

You can temporarily suspend all scaling policies for a fleet by calling StopFleetActions with the fleet action AUTO_SCALING. To resume scaling policies, call StartFleetActions with the same fleet action. To stop just one scaling policy--or to permanently remove it, you must delete the policy with DeleteScalingPolicy.

Learn more about how to work with auto-scaling in Set Up Fleet Automatic Scaling.

Target-based policy

A target-based policy tracks a single metric: PercentAvailableGameSessions. This metric tells us how much of a fleet's hosting capacity is ready to host game sessions but is not currently in use. This is the fleet's buffer; it measures the additional player demand that the fleet could handle at current capacity. With a target-based policy, you set your ideal buffer size and leave it to Amazon GameLift to take whatever action is needed to maintain that target.

For example, you might choose to maintain a 10% buffer for a fleet that has the capacity to host 100 simultaneous game sessions. This policy tells Amazon GameLift to take action whenever the fleet's available capacity falls below or rises above 10 game sessions. Amazon GameLift will start new instances or stop unused instances in order to return to the 10% buffer.

To create or update a target-based policy, specify a fleet ID and name, and set the policy type to \"TargetBased\". Specify the metric to track (PercentAvailableGameSessions) and reference a TargetConfiguration object with your desired buffer value. Exclude all other parameters. On a successful request, the policy name is returned. The scaling policy is automatically in force as soon as it's successfully created. If the fleet's auto-scaling actions are temporarily suspended, the new policy will be in force once the fleet actions are restarted.

Rule-based policy

A rule-based policy tracks specified fleet metric, sets a threshold value, and specifies the type of action to initiate when triggered. With a rule-based policy, you can select from several available fleet metrics. Each policy specifies whether to scale up or scale down (and by how much), so you need one policy for each type of action.

For example, a policy may make the following statement: \"If the percentage of idle instances is greater than 20% for more than 15 minutes, then reduce the fleet capacity by 10%.\"

A policy's rule statement has the following structure:

If [MetricName] is [ComparisonOperator] [Threshold] for [EvaluationPeriods] minutes, then [ScalingAdjustmentType] to/by [ScalingAdjustment].

To implement the example, the rule statement would look like this:

If [PercentIdleInstances] is [GreaterThanThreshold] [20] for [15] minutes, then [PercentChangeInCapacity] to/by [10].

To create or update a scaling policy, specify a unique combination of name and fleet ID, and set the policy type to \"RuleBased\". Specify the parameter values for a policy rule statement. On a successful request, the policy name is returned. Scaling policies are automatically in force as soon as they're successfully created. If the fleet's auto-scaling actions are temporarily suspended, the new policy will be in force once the fleet actions are restarted.

Operations related to fleet capacity scaling include:

" }, "RequestUploadCredentials":{ "name":"RequestUploadCredentials", @@ -819,7 +820,23 @@ {"shape":"UnauthorizedException"}, {"shape":"TerminalRoutingStrategyException"} ], - "documentation":"

Retrieves all active game sessions that match a set of search criteria and sorts them in a specified order. You can search or sort by the following game session attributes:

  • gameSessionId -- Unique identifier for the game session. You can use either a GameSessionId or GameSessionArn value.

  • gameSessionName -- Name assigned to a game session. This value is set when requesting a new game session with CreateGameSession or updating with UpdateGameSession. Game session names do not need to be unique to a game session.

  • gameSessionProperties -- Custom data defined in a game session's GameProperty parameter. GameProperty values are stored as key:value pairs; the filter expression must indicate the key and a string to search the data values for. For example, to search for game sessions with custom data containing the key:value pair \"gameMode:brawl\", specify the following: gameSessionProperties.gameMode = \"brawl\". All custom data values are searched as strings.

  • maximumSessions -- Maximum number of player sessions allowed for a game session. This value is set when requesting a new game session with CreateGameSession or updating with UpdateGameSession.

  • creationTimeMillis -- Value indicating when a game session was created. It is expressed in Unix time as milliseconds.

  • playerSessionCount -- Number of players currently connected to a game session. This value changes rapidly as players join the session or drop out.

  • hasAvailablePlayerSessions -- Boolean value indicating whether a game session has reached its maximum number of players. It is highly recommended that all search requests include this filter attribute to optimize search performance and return only sessions that players can join.

Returned values for playerSessionCount and hasAvailablePlayerSessions change quickly as players join sessions and others drop out. Results should be considered a snapshot in time. Be sure to refresh search results often, and handle sessions that fill up before a player can join.

To search or sort, specify either a fleet ID or an alias ID, and provide a search filter expression, a sort expression, or both. If successful, a collection of GameSession objects matching the request is returned. Use the pagination parameters to retrieve results as a set of sequential pages.

You can search for game sessions one fleet at a time only. To find game sessions across multiple fleets, you must search each fleet separately and combine the results. This search feature finds only game sessions that are in ACTIVE status. To locate games in statuses other than active, use DescribeGameSessionDetails.

Game-session-related operations include:

" + "documentation":"

Retrieves all active game sessions that match a set of search criteria and sorts them in a specified order. You can search or sort by the following game session attributes:

  • gameSessionId -- Unique identifier for the game session. You can use either a GameSessionId or GameSessionArn value.

  • gameSessionName -- Name assigned to a game session. This value is set when requesting a new game session with CreateGameSession or updating with UpdateGameSession. Game session names do not need to be unique to a game session.

  • gameSessionProperties -- Custom data defined in a game session's GameProperty parameter. GameProperty values are stored as key:value pairs; the filter expression must indicate the key and a string to search the data values for. For example, to search for game sessions with custom data containing the key:value pair \"gameMode:brawl\", specify the following: gameSessionProperties.gameMode = \"brawl\". All custom data values are searched as strings.

  • maximumSessions -- Maximum number of player sessions allowed for a game session. This value is set when requesting a new game session with CreateGameSession or updating with UpdateGameSession.

  • creationTimeMillis -- Value indicating when a game session was created. It is expressed in Unix time as milliseconds.

  • playerSessionCount -- Number of players currently connected to a game session. This value changes rapidly as players join the session or drop out.

  • hasAvailablePlayerSessions -- Boolean value indicating whether a game session has reached its maximum number of players. It is highly recommended that all search requests include this filter attribute to optimize search performance and return only sessions that players can join.

Returned values for playerSessionCount and hasAvailablePlayerSessions change quickly as players join sessions and others drop out. Results should be considered a snapshot in time. Be sure to refresh search results often, and handle sessions that fill up before a player can join.

To search or sort, specify either a fleet ID or an alias ID, and provide a search filter expression, a sort expression, or both. If successful, a collection of GameSession objects matching the request is returned. Use the pagination parameters to retrieve results as a set of sequential pages.

You can search for game sessions one fleet at a time only. To find game sessions across multiple fleets, you must search each fleet separately and combine the results. This search feature finds only game sessions that are in ACTIVE status. To locate games in statuses other than active, use DescribeGameSessionDetails.

Game-session-related operations include:

" + }, + "StartFleetActions":{ + "name":"StartFleetActions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartFleetActionsInput"}, + "output":{"shape":"StartFleetActionsOutput"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

Resumes activity on a fleet that was suspended with StopFleetActions. Currently, this operation is used to restart a fleet's auto-scaling activity.

To start fleet actions, specify the fleet ID and the type of actions to restart. When auto-scaling fleet actions are restarted, Amazon GameLift once again initiates scaling events as triggered by the fleet's scaling policies. If actions on the fleet were never stopped, this operation will have no effect. You can view a fleet's stopped actions using DescribeFleetAttributes.

Operations related to fleet capacity scaling include:

" }, "StartGameSessionPlacement":{ "name":"StartGameSessionPlacement", @@ -869,6 +886,22 @@ ], "documentation":"

Uses FlexMatch to create a game match for a group of players based on custom matchmaking rules, and starts a new game for the matched players. Each matchmaking request specifies the type of match to build (team configuration, rules for an acceptable match, etc.). The request also specifies the players to find a match for and where to host the new game session for optimal performance. A matchmaking request might start with a single player or a group of players who want to play together. FlexMatch finds additional players as needed to fill the match. Match type, rules, and the queue used to place a new game session are defined in a MatchmakingConfiguration. For complete information on setting up and using FlexMatch, see the topic Adding FlexMatch to Your Game.

To start matchmaking, provide a unique ticket ID, specify a matchmaking configuration, and include the players to be matched. You must also include a set of player attributes relevant for the matchmaking configuration. If successful, a matchmaking ticket is returned with status set to QUEUED. Track the status of the ticket to respond as needed and acquire game session connection information for successfully completed matches.

Tracking ticket status -- A couple of options are available for tracking the status of matchmaking requests:

  • Polling -- Call DescribeMatchmaking. This operation returns the full ticket object, including current status and (for completed tickets) game session connection info. We recommend polling no more than once every 10 seconds.

  • Notifications -- Get event notifications for changes in ticket status using Amazon Simple Notification Service (SNS). Notifications are easy to set up (see CreateMatchmakingConfiguration) and typically deliver match status changes faster and more efficiently than polling. We recommend that you use polling to back up to notifications (since delivery is not guaranteed) and call DescribeMatchmaking only when notifications are not received within 30 seconds.

Processing a matchmaking request -- FlexMatch handles a matchmaking request as follows:

  1. Your client code submits a StartMatchmaking request for one or more players and tracks the status of the request ticket.

  2. FlexMatch uses this ticket and others in process to build an acceptable match. When a potential match is identified, all tickets in the proposed match are advanced to the next status.

  3. If the match requires player acceptance (set in the matchmaking configuration), the tickets move into status REQUIRES_ACCEPTANCE. This status triggers your client code to solicit acceptance from all players in every ticket involved in the match, and then call AcceptMatch for each player. If any player rejects or fails to accept the match before a specified timeout, the proposed match is dropped (see AcceptMatch for more details).

  4. Once a match is proposed and accepted, the matchmaking tickets move into status PLACING. FlexMatch locates resources for a new game session using the game session queue (set in the matchmaking configuration) and creates the game session based on the match data.

  5. When the match is successfully placed, the matchmaking tickets move into COMPLETED status. Connection information (including game session endpoint and player session) is added to the matchmaking tickets. Matched players can use the connection information to join the game.

Matchmaking-related operations include:

" }, + "StopFleetActions":{ + "name":"StopFleetActions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopFleetActionsInput"}, + "output":{"shape":"StopFleetActionsOutput"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

Suspends activity on a fleet. Currently, this operation is used to stop a fleet's auto-scaling activity. It is used to temporarily stop scaling events triggered by the fleet's scaling policies. The policies can be retained and auto-scaling activity can be restarted using StartFleetActions. You can view a fleet's stopped actions using DescribeFleetAttributes.

To stop fleet actions, specify the fleet ID and the type of actions to suspend. When auto-scaling fleet actions are stopped, Amazon GameLift no longer initiates scaling events except to maintain the fleet's desired instances setting (FleetCapacity. Changes to the fleet's capacity must be done manually using UpdateFleetCapacity.

" + }, "StopGameSessionPlacement":{ "name":"StopGameSessionPlacement", "http":{ @@ -950,7 +983,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Updates fleet properties, including name and description, for a fleet. To update metadata, specify the fleet ID and the property values that you want to change. If successful, the fleet ID for the updated fleet is returned.

Fleet-related operations include:

" + "documentation":"

Updates fleet properties, including name and description, for a fleet. To update metadata, specify the fleet ID and the property values that you want to change. If successful, the fleet ID for the updated fleet is returned.

Fleet-related operations include:

" }, "UpdateFleetCapacity":{ "name":"UpdateFleetCapacity", @@ -969,7 +1002,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Updates capacity settings for a fleet. Use this action to specify the number of EC2 instances (hosts) that you want this fleet to contain. Before calling this action, you may want to call DescribeEC2InstanceLimits to get the maximum capacity based on the fleet's EC2 instance type.

If you're using autoscaling (see PutScalingPolicy), you may want to specify a minimum and/or maximum capacity. If you don't provide these, autoscaling can set capacity anywhere between zero and the service limits.

To update fleet capacity, specify the fleet ID and the number of instances you want the fleet to host. If successful, Amazon GameLift starts or terminates instances so that the fleet's active instance count matches the desired instance count. You can view a fleet's current capacity information by calling DescribeFleetCapacity. If the desired instance count is higher than the instance type's limit, the \"Limit Exceeded\" exception occurs.

Fleet-related operations include:

" + "documentation":"

Updates capacity settings for a fleet. Use this action to specify the number of EC2 instances (hosts) that you want this fleet to contain. Before calling this action, you may want to call DescribeEC2InstanceLimits to get the maximum capacity based on the fleet's EC2 instance type.

Specify minimum and maximum number of instances. Amazon GameLift will not change fleet capacity to values fall outside of this range. This is particularly important when using auto-scaling (see PutScalingPolicy) to allow capacity to adjust based on player demand while imposing limits on automatic adjustments.

To update fleet capacity, specify the fleet ID and the number of instances you want the fleet to host. If successful, Amazon GameLift starts or terminates instances so that the fleet's active instance count matches the desired instance count. You can view a fleet's current capacity information by calling DescribeFleetCapacity. If the desired instance count is higher than the instance type's limit, the \"Limit Exceeded\" exception occurs.

Fleet-related operations include:

" }, "UpdateFleetPortSettings":{ "name":"UpdateFleetPortSettings", @@ -988,7 +1021,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Updates port settings for a fleet. To update settings, specify the fleet ID to be updated and list the permissions you want to update. List the permissions you want to add in InboundPermissionAuthorizations, and permissions you want to remove in InboundPermissionRevocations. Permissions to be removed must match existing fleet permissions. If successful, the fleet ID for the updated fleet is returned.

Fleet-related operations include:

" + "documentation":"

Updates port settings for a fleet. To update settings, specify the fleet ID to be updated and list the permissions you want to update. List the permissions you want to add in InboundPermissionAuthorizations, and permissions you want to remove in InboundPermissionRevocations. Permissions to be removed must match existing fleet permissions. If successful, the fleet ID for the updated fleet is returned.

Fleet-related operations include:

" }, "UpdateGameSession":{ "name":"UpdateGameSession", @@ -1055,7 +1088,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InvalidFleetStatusException"} ], - "documentation":"

Updates the current run-time configuration for the specified fleet, which tells Amazon GameLift how to launch server processes on instances in the fleet. You can update a fleet's run-time configuration at any time after the fleet is created; it does not need to be in an ACTIVE status.

To update run-time configuration, specify the fleet ID and provide a RuntimeConfiguration object with the updated collection of server process configurations.

Each instance in a Amazon GameLift fleet checks regularly for an updated run-time configuration and changes how it launches server processes to comply with the latest version. Existing server processes are not affected by the update; they continue to run until they end, while Amazon GameLift simply adds new server processes to fit the current run-time configuration. As a result, the run-time configuration changes are applied gradually as existing processes shut down and new processes are launched in Amazon GameLift's normal process recycling activity.

Fleet-related operations include:

" + "documentation":"

Updates the current run-time configuration for the specified fleet, which tells Amazon GameLift how to launch server processes on instances in the fleet. You can update a fleet's run-time configuration at any time after the fleet is created; it does not need to be in an ACTIVE status.

To update run-time configuration, specify the fleet ID and provide a RuntimeConfiguration object with the updated collection of server process configurations.

Each instance in a Amazon GameLift fleet checks regularly for an updated run-time configuration and changes how it launches server processes to comply with the latest version. Existing server processes are not affected by the update; they continue to run until they end, while Amazon GameLift simply adds new server processes to fit the current run-time configuration. As a result, the run-time configuration changes are applied gradually as existing processes shut down and new processes are launched in Amazon GameLift's normal process recycling activity.

Fleet-related operations include:

" }, "ValidateMatchmakingRuleSet":{ "name":"ValidateMatchmakingRuleSet", @@ -1380,7 +1413,7 @@ }, "NewGameSessionProtectionPolicy":{ "shape":"ProtectionPolicy", - "documentation":"

Game session protection policy to apply to all instances in this fleet. If this parameter is not set, instances in this fleet default to no protection. You can change a fleet's protection policy using UpdateFleetAttributes, but this change will only affect sessions created after the policy change. You can also set protection for individual instances using UpdateGameSession.

  • NoProtection -- The game session can be terminated during a scale-down event.

  • FullProtection -- If the game session is in an ACTIVE status, it cannot be terminated during a scale-down event.

" + "documentation":"

Game session protection policy to apply to all instances in this fleet. If this parameter is not set, instances in this fleet default to no protection. You can change a fleet's protection policy using UpdateFleetAttributes, but this change will only affect sessions created after the policy change. You can also set protection for individual instances using UpdateGameSession.

  • NoProtection -- The game session can be terminated during a scale-down event.

  • FullProtection -- If the game session is in an ACTIVE status, it cannot be terminated during a scale-down event.

" }, "RuntimeConfiguration":{ "shape":"RuntimeConfiguration", @@ -2535,7 +2568,7 @@ "documentation":"

Number of instances in the fleet that are no longer active but haven't yet been terminated.

" } }, - "documentation":"

Current status of fleet capacity. The number of active instances should match or be in the process of matching the number of desired instances. Pending and terminating counts are non-zero only if fleet capacity is adjusting to an UpdateFleetCapacity request, or if access to resources is temporarily affected.

Fleet-related operations include:

" + "documentation":"

Current status of fleet capacity. The number of active instances should match or be in the process of matching the number of desired instances. Pending and terminating counts are non-zero only if fleet capacity is adjusting to an UpdateFleetCapacity request, or if access to resources is temporarily affected.

Fleet-related operations include:

" }, "EC2InstanceLimit":{ "type":"structure", @@ -2670,6 +2703,16 @@ "type":"list", "member":{"shape":"Event"} }, + "FleetAction":{ + "type":"string", + "enum":["AUTO_SCALING"] + }, + "FleetActionList":{ + "type":"list", + "member":{"shape":"FleetAction"}, + "max":1, + "min":1 + }, "FleetAttributes":{ "type":"structure", "members":{ @@ -2740,9 +2783,13 @@ "MetricGroups":{ "shape":"MetricGroupList", "documentation":"

Names of metric groups that this fleet is included in. In Amazon CloudWatch, you can view metrics for an individual fleet or aggregated metrics for fleets that are in a fleet metric group. A fleet can be included in only one metric group at a time.

" + }, + "StoppedActions":{ + "shape":"FleetActionList", + "documentation":"

List of fleet actions that have been suspended using StopFleetActions. This includes auto-scaling.

" } }, - "documentation":"

General properties describing a fleet.

Fleet-related operations include:

" + "documentation":"

General properties describing a fleet.

Fleet-related operations include:

" }, "FleetAttributesList":{ "type":"list", @@ -2764,7 +2811,7 @@ "documentation":"

Current status of fleet capacity.

" } }, - "documentation":"

Information about the fleet's capacity. Fleet capacity is measured in EC2 instances. By default, new fleets have a capacity of one instance, but can be updated as needed. The maximum number of instances for a fleet is determined by the fleet's instance type.

Fleet-related operations include:

" + "documentation":"

Information about the fleet's capacity. Fleet capacity is measured in EC2 instances. By default, new fleets have a capacity of one instance, but can be updated as needed. The maximum number of instances for a fleet is determined by the fleet's instance type.

Fleet-related operations include:

" }, "FleetCapacityExceededException":{ "type":"structure", @@ -2832,7 +2879,7 @@ "documentation":"

Maximum players allowed across all game sessions currently being hosted on all instances in the fleet.

" } }, - "documentation":"

Current status of fleet utilization, including the number of game and player sessions being hosted.

Fleet-related operations include:

" + "documentation":"

Current status of fleet utilization, including the number of game and player sessions being hosted.

Fleet-related operations include:

" }, "FleetUtilizationList":{ "type":"list", @@ -2936,7 +2983,7 @@ }, "MatchmakerData":{ "shape":"MatchmakerData", - "documentation":"

Information about the matchmaking process that was used to create the game session. It is in JSON syntax, formated as a string. In addition the matchmaking configuration used, it contains data on all players assigned to the match, including player attributes and team assignments. For more details on matchmaker data, see Match Data. Matchmaker data is useful when requesting match backfills, and is updated whenever new players are added during a successful backfill (see StartMatchBackfill).

" + "documentation":"

Information about the matchmaking process that was used to create the game session. It is in JSON syntax, formatted as a string. In addition the matchmaking configuration used, it contains data on all players assigned to the match, including player attributes and team assignments. For more details on matchmaker data, see Match Data. Matchmaker data is useful when requesting match backfills, and is updated whenever new players are added during a successful backfill (see StartMatchBackfill).

" } }, "documentation":"

Properties describing a game session.

A game session in ACTIVE status can host players. When a game session ends, its status is set to TERMINATED.

Once the session ends, the game session object is retained for 30 days. This means you can reuse idempotency token values after this time. Game session logs are retained for 14 days.

Game-session-related operations include:

" @@ -3072,7 +3119,7 @@ }, "MatchmakerData":{ "shape":"MatchmakerData", - "documentation":"

Information on the matchmaking process for this game. Data is in JSON syntax, formated as a string. It identifies the matchmaking configuration used to create the match, and contains data on all players assigned to the match, including player attributes and team assignments. For more details on matchmaker data, see Match Data.

" + "documentation":"

Information on the matchmaking process for this game. Data is in JSON syntax, formatted as a string. It identifies the matchmaking configuration used to create the match, and contains data on all players assigned to the match, including player attributes and team assignments. For more details on matchmaker data, see Match Data.

" } }, "documentation":"

Object that describes a StartGameSessionPlacement request. This object includes the full details of the original request plus the current status and start/end time stamps.

Game session placement-related operations include:

" @@ -3933,6 +3980,13 @@ "TIMEDOUT" ] }, + "PolicyType":{ + "type":"string", + "enum":[ + "RuleBased", + "TargetBased" + ] + }, "PortNumber":{ "type":"integer", "max":60000, @@ -3958,11 +4012,6 @@ "required":[ "Name", "FleetId", - "ScalingAdjustment", - "ScalingAdjustmentType", - "Threshold", - "ComparisonOperator", - "EvaluationPeriods", "MetricName" ], "members":{ @@ -3972,7 +4021,7 @@ }, "FleetId":{ "shape":"FleetId", - "documentation":"

Unique identifier for a fleet to apply this policy to.

" + "documentation":"

Unique identifier for a fleet to apply this policy to. The fleet cannot be in any of the following statuses: ERROR or DELETING.

" }, "ScalingAdjustment":{ "shape":"Integer", @@ -3996,7 +4045,15 @@ }, "MetricName":{ "shape":"MetricName", - "documentation":"

Name of the Amazon GameLift-defined metric that is used to trigger an adjustment.

  • ActivatingGameSessions -- number of game sessions in the process of being created (game session status = ACTIVATING).

  • ActiveGameSessions -- number of game sessions currently running (game session status = ACTIVE).

  • CurrentPlayerSessions -- number of active or reserved player sessions (player session status = ACTIVE or RESERVED).

  • AvailablePlayerSessions -- number of player session slots currently available in active game sessions across the fleet, calculated by subtracting a game session's current player session count from its maximum player session count. This number includes game sessions that are not currently accepting players (game session PlayerSessionCreationPolicy = DENY_ALL).

  • ActiveInstances -- number of instances currently running a game session.

  • IdleInstances -- number of instances not currently running a game session.

" + "documentation":"

Name of the Amazon GameLift-defined metric that is used to trigger a scaling adjustment. For detailed descriptions of fleet metrics, see Monitor Amazon GameLift with Amazon CloudWatch.

  • ActivatingGameSessions -- Game sessions in the process of being created.

  • ActiveGameSessions -- Game sessions that are currently running.

  • ActiveInstances -- Fleet instances that are currently running at least one game session.

  • AvailableGameSessions -- Additional game sessions that fleet could host simultaneously, given current capacity.

  • AvailablePlayerSessions -- Empty player slots in currently active game sessions. This includes game sessions that are not currently accepting players. Reserved player slots are not included.

  • CurrentPlayerSessions -- Player slots in active game sessions that are being used by a player or are reserved for a player.

  • IdleInstances -- Active instances that are currently hosting zero game sessions.

  • PercentAvailableGameSessions -- Unused percentage of the total number of game sessions that a fleet could host simultaneously, given current capacity. Use this metric for a target-based scaling policy.

  • PercentIdleInstances -- Percentage of the total number of active instances that are hosting zero game sessions.

  • QueueDepth -- Pending game session placement requests, in any queue, where the current fleet is the top-priority destination.

  • WaitTime -- Current wait time for pending game session placement requests, in any queue, where the current fleet is the top-priority destination.

" + }, + "PolicyType":{ + "shape":"PolicyType", + "documentation":"

Type of scaling policy to create. For a target-based policy, set the parameter MetricName to 'PercentAvailableGameSessions' and specify a TargetConfiguration. For a rule-based policy set the following parameters: MetricName, ComparisonOperator, Threshold, EvaluationPeriods, ScalingAdjustmentType, and ScalingAdjustment.

" + }, + "TargetConfiguration":{ + "shape":"TargetConfiguration", + "documentation":"

Object that contains settings for a target-based scaling policy.

" } }, "documentation":"

Represents the input for a request action.

" @@ -4091,7 +4148,7 @@ "documentation":"

Message text to be used with a terminal routing strategy.

" } }, - "documentation":"

Routing configuration for a fleet alias.

Fleet-related operations include:

" + "documentation":"

Routing configuration for a fleet alias.

Fleet-related operations include:

" }, "RoutingStrategyType":{ "type":"string", @@ -4126,7 +4183,7 @@ "documentation":"

Maximum amount of time (in seconds) that a game session can remain in status ACTIVATING. If the game session is not active before the timeout, activation is terminated and the game session status is changed to TERMINATED.

" } }, - "documentation":"

A collection of server process configurations that describe what processes to run on each instance in a fleet. All fleets must have a run-time configuration. Each instance in the fleet launches the server processes specified in the run-time configuration and launches new ones as existing processes end. Each instance regularly checks for an updated run-time configuration and follows the new instructions.

The run-time configuration enables the instances in a fleet to run multiple processes simultaneously. Potential scenarios are as follows: (1) Run multiple processes of a single game server executable to maximize usage of your hosting resources. (2) Run one or more processes of different build executables, such as your game server executable and a related program, or two or more different versions of a game server. (3) Run multiple processes of a single game server but with different launch parameters, for example to run one process on each instance in debug mode.

A Amazon GameLift instance is limited to 50 processes running simultaneously. A run-time configuration must specify fewer than this limit. To calculate the total number of processes specified in a run-time configuration, add the values of the ConcurrentExecutions parameter for each ServerProcess object in the run-time configuration.

Fleet-related operations include:

" + "documentation":"

A collection of server process configurations that describe what processes to run on each instance in a fleet. All fleets must have a run-time configuration. Each instance in the fleet launches the server processes specified in the run-time configuration and launches new ones as existing processes end. Each instance regularly checks for an updated run-time configuration and follows the new instructions.

The run-time configuration enables the instances in a fleet to run multiple processes simultaneously. Potential scenarios are as follows: (1) Run multiple processes of a single game server executable to maximize usage of your hosting resources. (2) Run one or more processes of different build executables, such as your game server executable and a related program, or two or more different versions of a game server. (3) Run multiple processes of a single game server but with different launch parameters, for example to run one process on each instance in debug mode.

A Amazon GameLift instance is limited to 50 processes running simultaneously. A run-time configuration must specify fewer than this limit. To calculate the total number of processes specified in a run-time configuration, add the values of the ConcurrentExecutions parameter for each ServerProcess object in the run-time configuration.

Fleet-related operations include:

" }, "S3Location":{ "type":"structure", @@ -4167,7 +4224,7 @@ }, "Status":{ "shape":"ScalingStatusType", - "documentation":"

Current status of the scaling policy. The scaling policy is only in force when in an ACTIVE status.

  • ACTIVE -- The scaling policy is currently in force.

  • UPDATE_REQUESTED -- A request to update the scaling policy has been received.

  • UPDATING -- A change is being made to the scaling policy.

  • DELETE_REQUESTED -- A request to delete the scaling policy has been received.

  • DELETING -- The scaling policy is being deleted.

  • DELETED -- The scaling policy has been deleted.

  • ERROR -- An error occurred in creating the policy. It should be removed and recreated.

" + "documentation":"

Current status of the scaling policy. The scaling policy can be in force only when in an ACTIVE status. Scaling policies can be suspended for individual fleets (see StopFleetActions; if suspended for a fleet, the policy status does not change. View a fleet's stopped actions by calling DescribeFleetCapacity.

  • ACTIVE -- The scaling policy can be used for auto-scaling a fleet.

  • UPDATE_REQUESTED -- A request to update the scaling policy has been received.

  • UPDATING -- A change is being made to the scaling policy.

  • DELETE_REQUESTED -- A request to delete the scaling policy has been received.

  • DELETING -- The scaling policy is being deleted.

  • DELETED -- The scaling policy has been deleted.

  • ERROR -- An error occurred in creating the policy. It should be removed and recreated.

" }, "ScalingAdjustment":{ "shape":"Integer", @@ -4191,10 +4248,18 @@ }, "MetricName":{ "shape":"MetricName", - "documentation":"

Name of the Amazon GameLift-defined metric that is used to trigger an adjustment.

  • ActivatingGameSessions -- number of game sessions in the process of being created (game session status = ACTIVATING).

  • ActiveGameSessions -- number of game sessions currently running (game session status = ACTIVE).

  • CurrentPlayerSessions -- number of active or reserved player sessions (player session status = ACTIVE or RESERVED).

  • AvailablePlayerSessions -- number of player session slots currently available in active game sessions across the fleet, calculated by subtracting a game session's current player session count from its maximum player session count. This number does include game sessions that are not currently accepting players (game session PlayerSessionCreationPolicy = DENY_ALL).

  • ActiveInstances -- number of instances currently running a game session.

  • IdleInstances -- number of instances not currently running a game session.

" + "documentation":"

Name of the Amazon GameLift-defined metric that is used to trigger a scaling adjustment. For detailed descriptions of fleet metrics, see Monitor Amazon GameLift with Amazon CloudWatch.

  • ActivatingGameSessions -- Game sessions in the process of being created.

  • ActiveGameSessions -- Game sessions that are currently running.

  • ActiveInstances -- Fleet instances that are currently running at least one game session.

  • AvailableGameSessions -- Additional game sessions that fleet could host simultaneously, given current capacity.

  • AvailablePlayerSessions -- Empty player slots in currently active game sessions. This includes game sessions that are not currently accepting players. Reserved player slots are not included.

  • CurrentPlayerSessions -- Player slots in active game sessions that are being used by a player or are reserved for a player.

  • IdleInstances -- Active instances that are currently hosting zero game sessions.

  • PercentAvailableGameSessions -- Unused percentage of the total number of game sessions that a fleet could host simultaneously, given current capacity. Use this metric for a target-based scaling policy.

  • PercentIdleInstances -- Percentage of the total number of active instances that are hosting zero game sessions.

  • QueueDepth -- Pending game session placement requests, in any queue, where the current fleet is the top-priority destination.

  • WaitTime -- Current wait time for pending game session placement requests, in any queue, where the current fleet is the top-priority destination.

" + }, + "PolicyType":{ + "shape":"PolicyType", + "documentation":"

Type of scaling policy to create. For a target-based policy, set the parameter MetricName to 'PercentAvailableGameSessions' and specify a TargetConfiguration. For a rule-based policy set the following parameters: MetricName, ComparisonOperator, Threshold, EvaluationPeriods, ScalingAdjustmentType, and ScalingAdjustment.

" + }, + "TargetConfiguration":{ + "shape":"TargetConfiguration", + "documentation":"

Object that contains settings for a target-based scaling policy.

" } }, - "documentation":"

Rule that controls how a fleet is scaled. Scaling policies are uniquely identified by the combination of name and fleet ID.

Fleet-related operations include:

" + "documentation":"

Rule that controls how a fleet is scaled. Scaling policies are uniquely identified by the combination of name and fleet ID.

Operations related to fleet capacity scaling include:

" }, "ScalingPolicyList":{ "type":"list", @@ -4290,6 +4355,28 @@ "min":0, "pattern":"[a-zA-Z0-9:_/-]*" }, + "StartFleetActionsInput":{ + "type":"structure", + "required":[ + "FleetId", + "Actions" + ], + "members":{ + "FleetId":{ + "shape":"FleetId", + "documentation":"

Unique identifier for a fleet

" + }, + "Actions":{ + "shape":"FleetActionList", + "documentation":"

List of actions to restart on the fleet.

" + } + } + }, + "StartFleetActionsOutput":{ + "type":"structure", + "members":{ + } + }, "StartGameSessionPlacementInput":{ "type":"structure", "required":[ @@ -4412,6 +4499,28 @@ }, "documentation":"

Represents the returned data in response to a request action.

" }, + "StopFleetActionsInput":{ + "type":"structure", + "required":[ + "FleetId", + "Actions" + ], + "members":{ + "FleetId":{ + "shape":"FleetId", + "documentation":"

Unique identifier for a fleet

" + }, + "Actions":{ + "shape":"FleetActionList", + "documentation":"

List of actions to suspend on the fleet.

" + } + } + }, + "StopFleetActionsOutput":{ + "type":"structure", + "members":{ + } + }, "StopGameSessionPlacementInput":{ "type":"structure", "required":["PlacementId"], @@ -4459,6 +4568,17 @@ "member":{"shape":"NonZeroAndMaxString"} }, "StringModel":{"type":"string"}, + "TargetConfiguration":{ + "type":"structure", + "required":["TargetValue"], + "members":{ + "TargetValue":{ + "shape":"Double", + "documentation":"

Desired value to use with a target-based scaling policy. The value must be relevant for whatever metric the scaling policy is using. For example, in a policy using the metric PercentAvailableGameSessions, the target value should be the preferred size of the fleet's buffer (the percent of capacity that should be idle and ready for new game sessions).

" + } + }, + "documentation":"

Settings for a target-based scaling policy (see ScalingPolicy. A target-based policy tracks a particular fleet metric specifies a target value for the metric. As player usage changes, the policy triggers Amazon GameLift to adjust capacity so that the metric returns to the target value. The target configuration specifies settings as needed for the target based policy, including the target value.

Operations related to fleet capacity scaling include:

" + }, "TerminalRoutingStrategyException":{ "type":"structure", "members":{ @@ -4916,5 +5036,5 @@ "min":0 } }, - "documentation":"Amazon GameLift Service

Amazon GameLift is a managed service for developers who need a scalable, dedicated server solution for their multiplayer games. Use Amazon GameLift for these tasks: (1) set up computing resources and deploy your game servers, (2) run game sessions and get players into games, (3) automatically scale your resources to meet player demand and manage costs, and (4) track in-depth metrics on game server performance and player usage.

The Amazon GameLift service API includes two important function sets:

  • Manage game sessions and player access -- Retrieve information on available game sessions; create new game sessions; send player requests to join a game session.

  • Configure and manage game server resources -- Manage builds, fleets, queues, and aliases; set autoscaling policies; retrieve logs and metrics.

This reference guide describes the low-level service API for Amazon GameLift. You can use the API functionality with these tools:

  • The Amazon Web Services software development kit (AWS SDK) is available in multiple languages including C++ and C#. Use the SDK to access the API programmatically from an application, such as a game client.

  • The AWS command-line interface (CLI) tool is primarily useful for handling administrative actions, such as setting up and managing Amazon GameLift settings and resources. You can use the AWS CLI to manage all of your AWS services.

  • The AWS Management Console for Amazon GameLift provides a web interface to manage your Amazon GameLift settings and resources. The console includes a dashboard for tracking key resources, including builds and fleets, and displays usage and performance metrics for your games as customizable graphs.

  • Amazon GameLift Local is a tool for testing your game's integration with Amazon GameLift before deploying it on the service. This tools supports a subset of key API actions, which can be called from either the AWS CLI or programmatically. See Testing an Integration.

Learn more

API SUMMARY

This list offers a functional overview of the Amazon GameLift service API.

Managing Games and Players

Use these actions to start new game sessions, find existing game sessions, track game session status and other information, and enable player access to game sessions.

  • Discover existing game sessions

    • SearchGameSessions -- Retrieve all available game sessions or search for game sessions that match a set of criteria.

  • Start new game sessions

    • Start new games with Queues to find the best available hosting resources across multiple regions, minimize player latency, and balance game session activity for efficiency and cost effectiveness.

    • CreateGameSession -- Start a new game session on a specific fleet. Available in Amazon GameLift Local.

  • Match players to game sessions with FlexMatch matchmaking

    • StartMatchmaking -- Request matchmaking for one players or a group who want to play together.

    • StartMatchBackfill - Request additional player matches to fill empty slots in an existing game session.

    • DescribeMatchmaking -- Get details on a matchmaking request, including status.

    • AcceptMatch -- Register that a player accepts a proposed match, for matches that require player acceptance.

    • StopMatchmaking -- Cancel a matchmaking request.

  • Manage game session data

    • DescribeGameSessions -- Retrieve metadata for one or more game sessions, including length of time active and current player count. Available in Amazon GameLift Local.

    • DescribeGameSessionDetails -- Retrieve metadata and the game session protection setting for one or more game sessions.

    • UpdateGameSession -- Change game session settings, such as maximum player count and join policy.

    • GetGameSessionLogUrl -- Get the location of saved logs for a game session.

  • Manage player sessions

    • CreatePlayerSession -- Send a request for a player to join a game session. Available in Amazon GameLift Local.

    • CreatePlayerSessions -- Send a request for multiple players to join a game session. Available in Amazon GameLift Local.

    • DescribePlayerSessions -- Get details on player activity, including status, playing time, and player data. Available in Amazon GameLift Local.

Setting Up and Managing Game Servers

When setting up Amazon GameLift resources for your game, you first create a game build and upload it to Amazon GameLift. You can then use these actions to configure and manage a fleet of resources to run your game servers, scale capacity to meet player demand, access performance and utilization metrics, and more.

" + "documentation":"Amazon GameLift Service

Amazon GameLift is a managed service for developers who need a scalable, dedicated server solution for their multiplayer games. Use Amazon GameLift for these tasks: (1) set up computing resources and deploy your game servers, (2) run game sessions and get players into games, (3) automatically scale your resources to meet player demand and manage costs, and (4) track in-depth metrics on game server performance and player usage.

The Amazon GameLift service API includes two important function sets:

  • Manage game sessions and player access -- Retrieve information on available game sessions; create new game sessions; send player requests to join a game session.

  • Configure and manage game server resources -- Manage builds, fleets, queues, and aliases; set auto-scaling policies; retrieve logs and metrics.

This reference guide describes the low-level service API for Amazon GameLift. You can use the API functionality with these tools:

  • The Amazon Web Services software development kit (AWS SDK) is available in multiple languages including C++ and C#. Use the SDK to access the API programmatically from an application, such as a game client.

  • The AWS command-line interface (CLI) tool is primarily useful for handling administrative actions, such as setting up and managing Amazon GameLift settings and resources. You can use the AWS CLI to manage all of your AWS services.

  • The AWS Management Console for Amazon GameLift provides a web interface to manage your Amazon GameLift settings and resources. The console includes a dashboard for tracking key resources, including builds and fleets, and displays usage and performance metrics for your games as customizable graphs.

  • Amazon GameLift Local is a tool for testing your game's integration with Amazon GameLift before deploying it on the service. This tools supports a subset of key API actions, which can be called from either the AWS CLI or programmatically. See Testing an Integration.

Learn more

API SUMMARY

This list offers a functional overview of the Amazon GameLift service API.

Managing Games and Players

Use these actions to start new game sessions, find existing game sessions, track game session status and other information, and enable player access to game sessions.

  • Discover existing game sessions

    • SearchGameSessions -- Retrieve all available game sessions or search for game sessions that match a set of criteria.

  • Start new game sessions

    • Start new games with Queues to find the best available hosting resources across multiple regions, minimize player latency, and balance game session activity for efficiency and cost effectiveness.

    • CreateGameSession -- Start a new game session on a specific fleet. Available in Amazon GameLift Local.

  • Match players to game sessions with FlexMatch matchmaking

    • StartMatchmaking -- Request matchmaking for one players or a group who want to play together.

    • StartMatchBackfill - Request additional player matches to fill empty slots in an existing game session.

    • DescribeMatchmaking -- Get details on a matchmaking request, including status.

    • AcceptMatch -- Register that a player accepts a proposed match, for matches that require player acceptance.

    • StopMatchmaking -- Cancel a matchmaking request.

  • Manage game session data

    • DescribeGameSessions -- Retrieve metadata for one or more game sessions, including length of time active and current player count. Available in Amazon GameLift Local.

    • DescribeGameSessionDetails -- Retrieve metadata and the game session protection setting for one or more game sessions.

    • UpdateGameSession -- Change game session settings, such as maximum player count and join policy.

    • GetGameSessionLogUrl -- Get the location of saved logs for a game session.

  • Manage player sessions

    • CreatePlayerSession -- Send a request for a player to join a game session. Available in Amazon GameLift Local.

    • CreatePlayerSessions -- Send a request for multiple players to join a game session. Available in Amazon GameLift Local.

    • DescribePlayerSessions -- Get details on player activity, including status, playing time, and player data. Available in Amazon GameLift Local.

Setting Up and Managing Game Servers

When setting up Amazon GameLift resources for your game, you first create a game build and upload it to Amazon GameLift. You can then use these actions to configure and manage a fleet of resources to run your game servers, scale capacity to meet player demand, access performance and utilization metrics, and more.

" } diff --git a/botocore/data/glacier/2012-06-01/service-2.json b/botocore/data/glacier/2012-06-01/service-2.json index 357a3154..57a8d8e2 100644 --- a/botocore/data/glacier/2012-06-01/service-2.json +++ b/botocore/data/glacier/2012-06-01/service-2.json @@ -6,6 +6,7 @@ "endpointPrefix":"glacier", "protocol":"rest-json", "serviceFullName":"Amazon Glacier", + "serviceId":"Glacier", "signatureVersion":"v4", "uid":"glacier-2012-06-01" }, diff --git a/botocore/data/glue/2017-03-31/service-2.json b/botocore/data/glue/2017-03-31/service-2.json index b50f20fb..a8e5c661 100644 --- a/botocore/data/glue/2017-03-31/service-2.json +++ b/botocore/data/glue/2017-03-31/service-2.json @@ -6,6 +6,7 @@ "jsonVersion":"1.1", "protocol":"json", "serviceFullName":"AWS Glue", + "serviceId":"Glue", "signatureVersion":"v4", "targetPrefix":"AWSGlue", "uid":"glue-2017-03-31" @@ -167,7 +168,7 @@ {"shape":"OperationTimeoutException"}, {"shape":"ResourceNumberLimitExceededException"} ], - "documentation":"

Creates a new crawler with specified targets, role, configuration, and optional schedule. At least one crawl target must be specified, in either the s3Targets or the jdbcTargets field.

" + "documentation":"

Creates a new crawler with specified targets, role, configuration, and optional schedule. At least one crawl target must be specified, in the s3Targets field, the jdbcTargets field, or the DynamoDBTargets field.

" }, "CreateDatabase":{ "name":"CreateDatabase", @@ -969,7 +970,7 @@ {"shape":"CrawlerRunningException"}, {"shape":"OperationTimeoutException"} ], - "documentation":"

Starts a crawl using the specified crawler, regardless of what is scheduled. If the crawler is already running, does nothing.

" + "documentation":"

Starts a crawl using the specified crawler, regardless of what is scheduled. If the crawler is already running, returns a CrawlerRunningException.

" }, "StartCrawlerSchedule":{ "name":"StartCrawlerSchedule", @@ -1282,6 +1283,10 @@ "Timeout":{ "shape":"Timeout", "documentation":"

The job run timeout in minutes. It overrides the timeout value of the job.

" + }, + "NotificationProperty":{ + "shape":"NotificationProperty", + "documentation":"

Specifies configuration properties of a job run notification.

" } }, "documentation":"

Defines an action to be initiated by a trigger.

" @@ -1675,7 +1680,7 @@ "documentation":"

A JsonClassifier object.

" } }, - "documentation":"

Classifiers are written in Python and triggered during a crawl task. You can write your own classifiers to best categorize your data sources and specify the appropriate schemas to use for them. A classifier checks whether a given file is in a format it can handle, and if it is, the classifier creates a schema in the form of a StructType object that matches that data format.

A classifier can be a grok classifier, an XML classifier, or a JSON classifier, asspecified in one of the fields in the Classifier object.

" + "documentation":"

Classifiers are triggered during a crawl task. A classifier checks whether a given file is in a format it can handle, and if it is, the classifier creates a schema in the form of a StructType object that matches that data format.

You can use the standard classifiers that AWS Glue supplies, or you can write your own classifiers to best categorize your data sources and specify the appropriate schemas to use for them. A classifier can be a grok classifier, an XML classifier, or a JSON classifier, as specified in one of the fields in the Classifier object.

" }, "ClassifierList":{ "type":"list", @@ -2042,7 +2047,7 @@ }, "Configuration":{ "shape":"CrawlerConfiguration", - "documentation":"

Crawler configuration information. This versioned JSON string allows users to specify aspects of a Crawler's behavior.

You can use this field to force partitions to inherit metadata such as classification, input format, output format, serde information, and schema from their parent table, rather than detect this information separately for each partition. Use the following JSON string to specify that behavior:

Example: '{ \"Version\": 1.0, \"CrawlerOutput\": { \"Partitions\": { \"AddOrUpdateBehavior\": \"InheritFromTable\" } } }'

" + "documentation":"

Crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler's behavior. For more information, see Configuring a Crawler.

" } }, "documentation":"

Specifies a crawler program that examines a data source and uses classifiers to try to determine its schema. If successful, the crawler records metadata concerning the data source in the AWS Glue Data Catalog.

" @@ -2151,6 +2156,10 @@ "JdbcTargets":{ "shape":"JdbcTargetList", "documentation":"

Specifies JDBC targets.

" + }, + "DynamoDBTargets":{ + "shape":"DynamoDBTargetList", + "documentation":"

Specifies DynamoDB targets.

" } }, "documentation":"

Specifies data stores to crawl.

" @@ -2231,7 +2240,7 @@ }, "Classifiers":{ "shape":"ClassifierNameList", - "documentation":"

A list of custom classifiers that the user has registered. By default, all AWS classifiers are included in a crawl, but these custom classifiers always override the default classifiers for a given classification.

" + "documentation":"

A list of custom classifiers that the user has registered. By default, all built-in classifiers are included in a crawl, but these custom classifiers always override the default classifiers for a given classification.

" }, "TablePrefix":{ "shape":"TablePrefix", @@ -2243,7 +2252,7 @@ }, "Configuration":{ "shape":"CrawlerConfiguration", - "documentation":"

Crawler configuration information. This versioned JSON string allows users to specify aspects of a Crawler's behavior.

You can use this field to force partitions to inherit metadata such as classification, input format, output format, serde information, and schema from their parent table, rather than detect this information separately for each partition. Use the following JSON string to specify that behavior:

Example: '{ \"Version\": 1.0, \"CrawlerOutput\": { \"Partitions\": { \"AddOrUpdateBehavior\": \"InheritFromTable\" } } }'

" + "documentation":"

Crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler's behavior. For more information, see Configuring a Crawler.

" } } }, @@ -2451,6 +2460,10 @@ "Timeout":{ "shape":"Timeout", "documentation":"

The job timeout in minutes. The default is 2880 minutes (48 hours).

" + }, + "NotificationProperty":{ + "shape":"NotificationProperty", + "documentation":"

Specifies configuration properties of a job notification.

" } } }, @@ -3094,6 +3107,20 @@ "type":"list", "member":{"shape":"DevEndpoint"} }, + "DynamoDBTarget":{ + "type":"structure", + "members":{ + "Path":{ + "shape":"Path", + "documentation":"

The name of the DynamoDB table to crawl.

" + } + }, + "documentation":"

Specifies a DynamoDB table to crawl.

" + }, + "DynamoDBTargetList":{ + "type":"list", + "member":{"shape":"DynamoDBTarget"} + }, "EntityNotFoundException":{ "type":"structure", "members":{ @@ -3135,6 +3162,7 @@ }, "documentation":"

An execution property of a job.

" }, + "ExecutionTime":{"type":"integer"}, "FieldType":{"type":"string"}, "FilterString":{ "type":"string", @@ -4196,6 +4224,10 @@ "Timeout":{ "shape":"Timeout", "documentation":"

The job timeout in minutes.

" + }, + "NotificationProperty":{ + "shape":"NotificationProperty", + "documentation":"

Specifies configuration properties of a job notification.

" } }, "documentation":"

Specifies a job definition.

" @@ -4301,12 +4333,16 @@ "documentation":"

The number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

" }, "ExecutionTime":{ - "shape":"IntegerValue", + "shape":"ExecutionTime", "documentation":"

The amount of time (in seconds) that the job run consumed resources.

" }, "Timeout":{ "shape":"Timeout", "documentation":"

The job run timeout in minutes.

" + }, + "NotificationProperty":{ + "shape":"NotificationProperty", + "documentation":"

Specifies configuration properties of a job run notification.

" } }, "documentation":"

Contains information about a job run.

" @@ -4369,6 +4405,10 @@ "Timeout":{ "shape":"Timeout", "documentation":"

The job timeout in minutes. The default is 2880 minutes (48 hours).

" + }, + "NotificationProperty":{ + "shape":"NotificationProperty", + "documentation":"

Specifies configuration properties of a job notification.

" } }, "documentation":"

Specifies information used to update an existing job definition. Note that the previous job definition will be completely overwritten by this information.

" @@ -4466,6 +4506,10 @@ "S3":{ "shape":"CodeGenNodeArgs", "documentation":"

An Amazon S3 location.

" + }, + "DynamoDB":{ + "shape":"CodeGenNodeArgs", + "documentation":"

A DynamoDB Table location.

" } }, "documentation":"

The location of resources.

" @@ -4582,6 +4626,21 @@ "type":"integer", "min":0 }, + "NotificationProperty":{ + "type":"structure", + "members":{ + "NotifyDelayAfter":{ + "shape":"NotifyDelayAfter", + "documentation":"

After a job run starts, the number of minutes to wait before sending a job run delay notification.

" + } + }, + "documentation":"

Specifies configuration properties of a notification.

" + }, + "NotifyDelayAfter":{ + "type":"integer", + "box":true, + "min":1 + }, "OperationTimeoutException":{ "type":"structure", "members":{ @@ -5071,6 +5130,10 @@ "Timeout":{ "shape":"Timeout", "documentation":"

The job run timeout in minutes. It overrides the timeout value of the job.

" + }, + "NotificationProperty":{ + "shape":"NotificationProperty", + "documentation":"

Specifies configuration properties of a job run notification.

" } } }, @@ -5596,7 +5659,7 @@ }, "Classifiers":{ "shape":"ClassifierNameList", - "documentation":"

A list of custom classifiers that the user has registered. By default, all classifiers are included in a crawl, but these custom classifiers always override the default classifiers for a given classification.

" + "documentation":"

A list of custom classifiers that the user has registered. By default, all built-in classifiers are included in a crawl, but these custom classifiers always override the default classifiers for a given classification.

" }, "TablePrefix":{ "shape":"TablePrefix", @@ -5608,7 +5671,7 @@ }, "Configuration":{ "shape":"CrawlerConfiguration", - "documentation":"

Crawler configuration information. This versioned JSON string allows users to specify aspects of a Crawler's behavior.

You can use this field to force partitions to inherit metadata such as classification, input format, output format, serde information, and schema from their parent table, rather than detect this information separately for each partition. Use the following JSON string to specify that behavior:

Example: '{ \"Version\": 1.0, \"CrawlerOutput\": { \"Partitions\": { \"AddOrUpdateBehavior\": \"InheritFromTable\" } } }'

" + "documentation":"

Crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler's behavior. For more information, see Configuring a Crawler.

" } } }, diff --git a/botocore/data/greengrass/2017-06-07/service-2.json b/botocore/data/greengrass/2017-06-07/service-2.json index 094c90d4..7e6e1697 100644 --- a/botocore/data/greengrass/2017-06-07/service-2.json +++ b/botocore/data/greengrass/2017-06-07/service-2.json @@ -4,6 +4,7 @@ "endpointPrefix" : "greengrass", "signingName" : "greengrass", "serviceFullName" : "AWS Greengrass", + "serviceId" : "Greengrass", "protocol" : "rest-json", "jsonVersion" : "1.1", "uid" : "greengrass-2017-06-07", diff --git a/botocore/data/health/2016-08-04/service-2.json b/botocore/data/health/2016-08-04/service-2.json index bacf55c2..89eb5029 100644 --- a/botocore/data/health/2016-08-04/service-2.json +++ b/botocore/data/health/2016-08-04/service-2.json @@ -7,6 +7,7 @@ "protocol":"json", "serviceAbbreviation":"AWSHealth", "serviceFullName":"AWS Health APIs and Notifications", + "serviceId":"Health", "signatureVersion":"v4", "targetPrefix":"AWSHealth_20160804", "uid":"health-2016-08-04" diff --git a/botocore/data/importexport/2010-06-01/service-2.json b/botocore/data/importexport/2010-06-01/service-2.json index be353107..64d0ad11 100644 --- a/botocore/data/importexport/2010-06-01/service-2.json +++ b/botocore/data/importexport/2010-06-01/service-2.json @@ -6,6 +6,7 @@ "endpointPrefix":"importexport", "globalEndpoint":"importexport.amazonaws.com", "serviceFullName":"AWS Import/Export", + "serviceId":"ImportExport", "signatureVersion":"v2", "xmlNamespace":"http://importexport.amazonaws.com/doc/2010-06-01/", "protocol":"query" diff --git a/botocore/data/inspector/2016-02-16/service-2.json b/botocore/data/inspector/2016-02-16/service-2.json index f897dd6e..a560d14c 100644 --- a/botocore/data/inspector/2016-02-16/service-2.json +++ b/botocore/data/inspector/2016-02-16/service-2.json @@ -6,6 +6,7 @@ "jsonVersion":"1.1", "protocol":"json", "serviceFullName":"Amazon Inspector", + "serviceId":"Inspector", "signatureVersion":"v4", "targetPrefix":"InspectorService", "uid":"inspector-2016-02-16" @@ -40,7 +41,8 @@ {"shape":"InvalidInputException"}, {"shape":"LimitExceededException"}, {"shape":"AccessDeniedException"}, - {"shape":"NoSuchEntityException"} + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidCrossAccountRoleException"} ], "documentation":"

Creates a new assessment target using the ARN of the resource group that is generated by CreateResourceGroup. If the service-linked role isn’t already registered, also creates and registers a service-linked role to grant Amazon Inspector access to AWS Services needed to perform security assessments. You can create up to 50 assessment targets per AWS account. You can run up to 500 concurrent agents per AWS account. For more information, see Amazon Inspector Assessment Targets.

" }, @@ -61,6 +63,23 @@ ], "documentation":"

Creates an assessment template for the assessment target that is specified by the ARN of the assessment target. If the service-linked role isn’t already registered, also creates and registers a service-linked role to grant Amazon Inspector access to AWS Services needed to perform security assessments.

" }, + "CreateExclusionsPreview":{ + "name":"CreateExclusionsPreview", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateExclusionsPreviewRequest"}, + "output":{"shape":"CreateExclusionsPreviewResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"PreviewGenerationInProgressException"}, + {"shape":"InternalException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ], + "documentation":"

Starts the generation of an exclusions preview for the specified assessment template. The exclusions preview lists the potential exclusions (ExclusionPreview) that Inspector can detect before it runs the assessment.

" + }, "CreateResourceGroup":{ "name":"CreateResourceGroup", "http":{ @@ -179,6 +198,20 @@ ], "documentation":"

Describes the IAM role that enables Amazon Inspector to access your AWS account.

" }, + "DescribeExclusions":{ + "name":"DescribeExclusions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeExclusionsRequest"}, + "output":{"shape":"DescribeExclusionsResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

Describes the exclusions that are specified by the exclusions' ARNs.

" + }, "DescribeFindings":{ "name":"DescribeFindings", "http":{ @@ -239,6 +272,22 @@ ], "documentation":"

Produces an assessment report that includes detailed and comprehensive results of a specified assessment run.

" }, + "GetExclusionsPreview":{ + "name":"GetExclusionsPreview", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetExclusionsPreviewRequest"}, + "output":{"shape":"GetExclusionsPreviewResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"InternalException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ], + "documentation":"

Retrieves the exclusions preview (a list of ExclusionPreview objects) specified by the preview token. You can obtain the preview token by running the CreateExclusionsPreview API.

" + }, "GetTelemetryMetadata":{ "name":"GetTelemetryMetadata", "http":{ @@ -334,6 +383,22 @@ ], "documentation":"

Lists all the event subscriptions for the assessment template that is specified by the ARN of the assessment template. For more information, see SubscribeToEvent and UnsubscribeFromEvent.

" }, + "ListExclusions":{ + "name":"ListExclusions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListExclusionsRequest"}, + "output":{"shape":"ListExclusionsResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchEntityException"} + ], + "documentation":"

List exclusions that are generated by the assessment run.

" + }, "ListFindings":{ "name":"ListFindings", "http":{ @@ -1097,7 +1162,6 @@ "required":[ "arn", "name", - "resourceGroupArn", "createdAt", "updatedAt" ], @@ -1173,7 +1237,7 @@ }, "durationInSeconds":{ "shape":"AssessmentRunDuration", - "documentation":"

The duration in seconds specified for this assessment tempate. The default value is 3600 seconds (one hour). The maximum value is 86400 seconds (one day).

" + "documentation":"

The duration in seconds specified for this assessment template. The default value is 3600 seconds (one hour). The maximum value is 86400 seconds (one day).

" }, "rulesPackageArns":{ "shape":"AssessmentTemplateRulesPackageArnList", @@ -1185,7 +1249,7 @@ }, "lastAssessmentRunArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the most recent assessment run associated with this assessment template. This value exists only when the value of assessmentRunCount is greater than zero.

" + "documentation":"

The Amazon Resource Name (ARN) of the most recent assessment run associated with this assessment template. This value exists only when the value of assessmentRunCount is greaterpa than zero.

" }, "assessmentRunCount":{ "shape":"ArnCount", @@ -1316,13 +1380,16 @@ "max":10, "min":1 }, + "BatchDescribeExclusionsArnList":{ + "type":"list", + "member":{"shape":"Arn"}, + "max":100, + "min":1 + }, "Bool":{"type":"boolean"}, "CreateAssessmentTargetRequest":{ "type":"structure", - "required":[ - "assessmentTargetName", - "resourceGroupArn" - ], + "required":["assessmentTargetName"], "members":{ "assessmentTargetName":{ "shape":"AssessmentTargetName", @@ -1385,6 +1452,26 @@ } } }, + "CreateExclusionsPreviewRequest":{ + "type":"structure", + "required":["assessmentTemplateArn"], + "members":{ + "assessmentTemplateArn":{ + "shape":"Arn", + "documentation":"

The ARN that specifies the assessment template for which you want to create an exclusions preview.

" + } + } + }, + "CreateExclusionsPreviewResponse":{ + "type":"structure", + "required":["previewToken"], + "members":{ + "previewToken":{ + "shape":"UUID", + "documentation":"

Specifies the unique identifier of the requested exclusions preview. You can use the unique identifier to retrieve the exclusions preview when running the GetExclusionsPreview API.

" + } + } + }, "CreateResourceGroupRequest":{ "type":"structure", "required":["resourceGroupTags"], @@ -1535,6 +1622,37 @@ } } }, + "DescribeExclusionsRequest":{ + "type":"structure", + "required":["exclusionArns"], + "members":{ + "exclusionArns":{ + "shape":"BatchDescribeExclusionsArnList", + "documentation":"

The list of ARNs that specify the exclusions that you want to describe.

" + }, + "locale":{ + "shape":"Locale", + "documentation":"

The locale into which you want to translate the exclusion's title, description, and recommendation.

" + } + } + }, + "DescribeExclusionsResponse":{ + "type":"structure", + "required":[ + "exclusions", + "failedItems" + ], + "members":{ + "exclusions":{ + "shape":"ExclusionMap", + "documentation":"

Information about the exclusions.

" + }, + "failedItems":{ + "shape":"FailedItems", + "documentation":"

Exclusion details that cannot be described. An error code is provided for each failed item.

" + } + } + }, "DescribeFindingsRequest":{ "type":"structure", "required":["findingArns"], @@ -1667,6 +1785,88 @@ "max":50, "min":1 }, + "Exclusion":{ + "type":"structure", + "required":[ + "arn", + "title", + "description", + "recommendation", + "scopes" + ], + "members":{ + "arn":{ + "shape":"Arn", + "documentation":"

The ARN that specifies the exclusion.

" + }, + "title":{ + "shape":"Text", + "documentation":"

The name of the exclusion.

" + }, + "description":{ + "shape":"Text", + "documentation":"

The description of the exclusion.

" + }, + "recommendation":{ + "shape":"Text", + "documentation":"

The recommendation for the exclusion.

" + }, + "scopes":{ + "shape":"ScopeList", + "documentation":"

The AWS resources for which the exclusion pertains.

" + }, + "attributes":{ + "shape":"AttributeList", + "documentation":"

The system-defined attributes for the exclusion.

" + } + }, + "documentation":"

Contains information about what was excluded from an assessment run.

" + }, + "ExclusionMap":{ + "type":"map", + "key":{"shape":"Arn"}, + "value":{"shape":"Exclusion"}, + "max":100, + "min":1 + }, + "ExclusionPreview":{ + "type":"structure", + "required":[ + "title", + "description", + "recommendation", + "scopes" + ], + "members":{ + "title":{ + "shape":"Text", + "documentation":"

The name of the exclusion preview.

" + }, + "description":{ + "shape":"Text", + "documentation":"

The description of the exclusion preview.

" + }, + "recommendation":{ + "shape":"Text", + "documentation":"

The recommendation for the exclusion preview.

" + }, + "scopes":{ + "shape":"ScopeList", + "documentation":"

The AWS resources for which the exclusion preview pertains.

" + }, + "attributes":{ + "shape":"AttributeList", + "documentation":"

The system-defined attributes for the exclusion preview.

" + } + }, + "documentation":"

Contains information about what is excluded from an assessment run given the current state of the assessment template.

" + }, + "ExclusionPreviewList":{ + "type":"list", + "member":{"shape":"ExclusionPreview"}, + "max":100, + "min":0 + }, "FailedItemDetails":{ "type":"structure", "required":[ @@ -1878,6 +2078,53 @@ } } }, + "GetExclusionsPreviewRequest":{ + "type":"structure", + "required":[ + "assessmentTemplateArn", + "previewToken" + ], + "members":{ + "assessmentTemplateArn":{ + "shape":"Arn", + "documentation":"

The ARN that specifies the assessment template for which the exclusions preview was requested.

" + }, + "previewToken":{ + "shape":"UUID", + "documentation":"

The unique identifier associated of the exclusions preview.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the GetExclusionsPreviewRequest action. Subsequent calls to the action fill nextToken in the request with the value of nextToken from the previous response to continue listing data.

" + }, + "maxResults":{ + "shape":"ListMaxResults", + "documentation":"

You can use this parameter to indicate the maximum number of items you want in the response. The default value is 100. The maximum value is 500.

" + }, + "locale":{ + "shape":"Locale", + "documentation":"

The locale into which you want to translate the exclusion's title, description, and recommendation.

" + } + } + }, + "GetExclusionsPreviewResponse":{ + "type":"structure", + "required":["previewStatus"], + "members":{ + "previewStatus":{ + "shape":"PreviewStatus", + "documentation":"

Specifies the status of the request to generate an exclusions preview.

" + }, + "exclusionPreviews":{ + "shape":"ExclusionPreviewList", + "documentation":"

Information about the exclusions included in the preview.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

When a response is generated, if there is more data to be listed, this parameters is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to null.

" + } + } + }, "GetTelemetryMetadataRequest":{ "type":"structure", "required":["assessmentRunArn"], @@ -2290,6 +2537,38 @@ } } }, + "ListExclusionsRequest":{ + "type":"structure", + "required":["assessmentRunArn"], + "members":{ + "assessmentRunArn":{ + "shape":"Arn", + "documentation":"

The ARN of the assessment run that generated the exclusions that you want to list.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the ListExclusionsRequest action. Subsequent calls to the action fill nextToken in the request with the value of nextToken from the previous response to continue listing data.

" + }, + "maxResults":{ + "shape":"ListMaxResults", + "documentation":"

You can use this parameter to indicate the maximum number of items you want in the response. The default value is 100. The maximum value is 500.

" + } + } + }, + "ListExclusionsResponse":{ + "type":"structure", + "required":["exclusionArns"], + "members":{ + "exclusionArns":{ + "shape":"ListReturnedArnList", + "documentation":"

A list of exclusions' ARNs returned by the action.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

When a response is generated, if there is more data to be listed, this parameters is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to null.

" + } + } + }, "ListFindingsRequest":{ "type":"structure", "members":{ @@ -2494,6 +2773,22 @@ } } }, + "PreviewGenerationInProgressException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

The request is rejected. The specified assessment template is currently generating an exclusions preview.

", + "exception":true + }, + "PreviewStatus":{ + "type":"string", + "enum":[ + "WORK_IN_PROGRESS", + "COMPLETED" + ] + }, "ProviderName":{ "type":"string", "max":1000, @@ -2661,6 +2956,33 @@ "max":1000, "min":0 }, + "Scope":{ + "type":"structure", + "members":{ + "key":{ + "shape":"ScopeType", + "documentation":"

The type of the scope.

" + }, + "value":{ + "shape":"ScopeValue", + "documentation":"

The resource identifier for the specified scope type.

" + } + }, + "documentation":"

This data type contains key-value pairs that identify various Amazon resources.

" + }, + "ScopeList":{ + "type":"list", + "member":{"shape":"Scope"}, + "min":1 + }, + "ScopeType":{ + "type":"string", + "enum":[ + "INSTANCE_ID", + "RULES_PACKAGE_ARN" + ] + }, + "ScopeValue":{"type":"string"}, "ServiceName":{ "type":"string", "max":128, @@ -2871,6 +3193,10 @@ }, "documentation":"

This data type is used in the AssessmentRunFilter data type.

" }, + "UUID":{ + "type":"string", + "pattern":"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" + }, "UnsubscribeFromEventRequest":{ "type":"structure", "required":[ @@ -2910,8 +3236,7 @@ "type":"structure", "required":[ "assessmentTargetArn", - "assessmentTargetName", - "resourceGroupArn" + "assessmentTargetName" ], "members":{ "assessmentTargetArn":{ diff --git a/botocore/data/iot-data/2015-05-28/service-2.json b/botocore/data/iot-data/2015-05-28/service-2.json index bca28eeb..88fba6cd 100644 --- a/botocore/data/iot-data/2015-05-28/service-2.json +++ b/botocore/data/iot-data/2015-05-28/service-2.json @@ -6,6 +6,7 @@ "endpointPrefix":"data.iot", "protocol":"rest-json", "serviceFullName":"AWS IoT Data Plane", + "serviceId":"IoT Data Plane", "signatureVersion":"v4", "signingName":"iotdata" }, diff --git a/botocore/data/iot-jobs-data/2017-09-29/service-2.json b/botocore/data/iot-jobs-data/2017-09-29/service-2.json index 91dcd3bc..8fb28a2b 100644 --- a/botocore/data/iot-jobs-data/2017-09-29/service-2.json +++ b/botocore/data/iot-jobs-data/2017-09-29/service-2.json @@ -5,6 +5,7 @@ "endpointPrefix":"data.jobs.iot", "protocol":"rest-json", "serviceFullName":"AWS IoT Jobs Data Plane", + "serviceId":"IoT Jobs Data Plane", "signatureVersion":"v4", "signingName":"iot-jobs-data", "uid":"iot-jobs-data-2017-09-29" diff --git a/botocore/data/iot/2015-05-28/service-2.json b/botocore/data/iot/2015-05-28/service-2.json index ae7ae8d2..1c16d66e 100644 --- a/botocore/data/iot/2015-05-28/service-2.json +++ b/botocore/data/iot/2015-05-28/service-2.json @@ -151,6 +151,23 @@ ], "documentation":"

Cancels a job.

" }, + "CancelJobExecution":{ + "name":"CancelJobExecution", + "http":{ + "method":"PUT", + "requestUri":"/things/{thingName}/jobs/{jobId}/cancel" + }, + "input":{"shape":"CancelJobExecutionRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidStateTransitionException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"VersionConflictException"} + ], + "documentation":"

Cancels the execution of a job for a given thing.

" + }, "ClearDefaultAuthorizer":{ "name":"ClearDefaultAuthorizer", "http":{ @@ -462,6 +479,39 @@ ], "documentation":"

Deletes the specified certificate.

A certificate cannot be deleted if it has a policy attached to it or if its status is set to ACTIVE. To delete a certificate, first use the DetachPrincipalPolicy API to detach all policies. Next, use the UpdateCertificate API to set the certificate to the INACTIVE status.

" }, + "DeleteJob":{ + "name":"DeleteJob", + "http":{ + "method":"DELETE", + "requestUri":"/jobs/{jobId}" + }, + "input":{"shape":"DeleteJobRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidStateTransitionException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Deletes a job and its related job executions.

Deleting a job may take time, depending on the number of job executions created for the job and various other factors. While the job is being deleted, the status of the job will be shown as \"DELETION_IN_PROGRESS\". Attempting to delete or cancel a job whose status is already \"DELETION_IN_PROGRESS\" will result in an error.

Only 10 jobs may have status \"DELETION_IN_PROGRESS\" at the same time, or a LimitExceededException will occur.

" + }, + "DeleteJobExecution":{ + "name":"DeleteJobExecution", + "http":{ + "method":"DELETE", + "requestUri":"/things/{thingName}/jobs/{jobId}/executionNumber/{executionNumber}" + }, + "input":{"shape":"DeleteJobExecutionRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidStateTransitionException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Deletes a job execution.

" + }, "DeleteOTAUpdate":{ "name":"DeleteOTAUpdate", "http":{ @@ -1028,7 +1078,7 @@ {"shape":"InternalFailureException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Gets effective policies.

" + "documentation":"

Gets a list of the policies that have an effect on the authorization behavior of the specified device when it connects to the AWS IoT device gateway.

" }, "GetIndexingConfiguration":{ "name":"GetIndexingConfiguration", @@ -1930,7 +1980,7 @@ {"shape":"InternalFailureException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Test custom authorization.

" + "documentation":"

Tests if a specified principal is authorized to perform an AWS IoT action on a specified resource. Use this to test and debug the authorization behavior of devices that connect to the AWS IoT device gateway.

" }, "TestInvokeAuthorizer":{ "name":"TestInvokeAuthorizer", @@ -1949,7 +1999,7 @@ {"shape":"InternalFailureException"}, {"shape":"InvalidResponseException"} ], - "documentation":"

Invoke the specified custom authorizer for testing purposes.

" + "documentation":"

Tests a custom authorization behavior by invoking a specified custom authorizer. Use this to test and debug the custom authorization behavior of devices that connect to the AWS IoT device gateway.

" }, "TransferCertificate":{ "name":"TransferCertificate", @@ -2668,6 +2718,41 @@ }, "documentation":"

The input for the CancelCertificateTransfer operation.

" }, + "CancelJobExecutionRequest":{ + "type":"structure", + "required":[ + "jobId", + "thingName" + ], + "members":{ + "jobId":{ + "shape":"JobId", + "documentation":"

The ID of the job to be canceled.

", + "location":"uri", + "locationName":"jobId" + }, + "thingName":{ + "shape":"ThingName", + "documentation":"

The name of the thing whose execution of the job will be canceled.

", + "location":"uri", + "locationName":"thingName" + }, + "force":{ + "shape":"ForceFlag", + "documentation":"

(Optional) If true the job execution will be canceled if it has status IN_PROGRESS or QUEUED, otherwise the job execution will be canceled only if it has status QUEUED. If you attempt to cancel a job execution that is IN_PROGRESS, and you do not set force to true, then an InvalidStateTransitionException will be thrown. The default is false.

Canceling a job execution which is \"IN_PROGRESS\", will cause the device to be unable to update the job execution status. Use caution and ensure that the device is able to recover to a valid state.

", + "location":"querystring", + "locationName":"force" + }, + "expectedVersion":{ + "shape":"ExpectedVersion", + "documentation":"

(Optional) The expected current version of the job execution. Each time you update the job execution, its version is incremented. If the version of the job execution stored in Jobs does not match, the update is rejected with a VersionMismatch error, and an ErrorResponse that contains the current job execution status data is returned. (This makes it unnecessary to perform a separate DescribeJobExecution request in order to obtain the job execution status data.)

" + }, + "statusDetails":{ + "shape":"DetailsMap", + "documentation":"

A collection of name/value pairs that describe the status of the job execution. If not specified, the statusDetails are unchanged. You can specify at most 10 name/value pairs.

" + } + } + }, "CancelJobRequest":{ "type":"structure", "required":["jobId"], @@ -2681,6 +2766,12 @@ "comment":{ "shape":"Comment", "documentation":"

An optional comment string describing why the job was canceled.

" + }, + "force":{ + "shape":"ForceFlag", + "documentation":"

(Optional) If true job executions with status \"IN_PROGRESS\" and \"QUEUED\" are canceled, otherwise only job executions with status \"QUEUED\" are canceled. The default is false.

Canceling a job which is \"IN_PROGRESS\", will cause a device which is executing the job to be unable to update the job execution status. Use caution and ensure that each device executing a job which is canceled is able to recover to a valid state.

", + "location":"querystring", + "locationName":"force" } } }, @@ -3663,6 +3754,58 @@ "error":{"httpStatusCode":409}, "exception":true }, + "DeleteJobExecutionRequest":{ + "type":"structure", + "required":[ + "jobId", + "thingName", + "executionNumber" + ], + "members":{ + "jobId":{ + "shape":"JobId", + "documentation":"

The ID of the job whose execution on a particular device will be deleted.

", + "location":"uri", + "locationName":"jobId" + }, + "thingName":{ + "shape":"ThingName", + "documentation":"

The name of the thing whose job execution will be deleted.

", + "location":"uri", + "locationName":"thingName" + }, + "executionNumber":{ + "shape":"ExecutionNumber", + "documentation":"

The ID of the job execution to be deleted. The executionNumber refers to the execution of a particular job on a particular device.

Note that once a job execution is deleted, the executionNumber may be reused by IoT, so be sure you get and use the correct value here.

", + "location":"uri", + "locationName":"executionNumber" + }, + "force":{ + "shape":"ForceFlag", + "documentation":"

(Optional) When true, you can delete a job execution which is \"IN_PROGRESS\". Otherwise, you can only delete a job execution which is in a terminal state (\"SUCCEEDED\", \"FAILED\", \"REJECTED\", \"REMOVED\" or \"CANCELED\") or an exception will occur. The default is false.

Deleting a job execution which is \"IN_PROGRESS\", will cause the device to be unable to access job information or update the job execution status. Use caution and ensure that the device is able to recover to a valid state.

", + "location":"querystring", + "locationName":"force" + } + } + }, + "DeleteJobRequest":{ + "type":"structure", + "required":["jobId"], + "members":{ + "jobId":{ + "shape":"JobId", + "documentation":"

The ID of the job to be deleted.

After a job deletion is completed, you may reuse this jobId when you create a new job. However, this is not recommended, and you must ensure that your devices are not using the jobId to refer to the deleted job.

", + "location":"uri", + "locationName":"jobId" + }, + "force":{ + "shape":"ForceFlag", + "documentation":"

(Optional) When true, you can delete a job which is \"IN_PROGRESS\". Otherwise, you can only delete a job which is in a terminal state (\"COMPLETED\" or \"CANCELED\") or an exception will occur. The default is false.

Deleting a job which is \"IN_PROGRESS\", will cause a device which is executing the job to be unable to access job information or update the job execution status. Use caution and ensure that each device executing a job which is deleted is able to recover to a valid state.

", + "location":"querystring", + "locationName":"force" + } + } + }, "DeleteOTAUpdateRequest":{ "type":"structure", "required":["otaUpdateId"], @@ -4644,6 +4787,7 @@ ] }, "ExecutionNumber":{"type":"long"}, + "ExpectedVersion":{"type":"long"}, "ExpiresInSec":{ "type":"long", "max":3600, @@ -4694,6 +4838,8 @@ }, "Flag":{"type":"boolean"}, "ForceDelete":{"type":"boolean"}, + "ForceFlag":{"type":"boolean"}, + "Forced":{"type":"boolean"}, "FunctionArn":{"type":"string"}, "GEMaxResults":{ "type":"integer", @@ -5090,6 +5236,15 @@ "error":{"httpStatusCode":400}, "exception":true }, + "InvalidStateTransitionException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "documentation":"

An attempt was made to change to an invalid state, for example by deleting a job or a job execution which is \"IN_PROGRESS\" without setting the force parameter.

", + "error":{"httpStatusCode":409}, + "exception":true + }, "IotAnalyticsAction":{ "type":"structure", "members":{ @@ -5103,10 +5258,10 @@ }, "roleArn":{ "shape":"AwsArn", - "documentation":"

The ARN of the role which has a policy that grants IoT permission to send message data via IoT Analytics (iotanalytics:BatchPutMessage).

" + "documentation":"

The ARN of the role which has a policy that grants IoT Analytics permission to send message data via IoT Analytics (iotanalytics:BatchPutMessage).

" } }, - "documentation":"

Sends message data to an AWS IoT Analytics channel.

" + "documentation":"

Sends messge data to an AWS IoT Analytics channel.

" }, "IsAuthenticated":{"type":"boolean"}, "IsDefaultVersion":{"type":"boolean"}, @@ -5130,6 +5285,10 @@ "shape":"JobStatus", "documentation":"

The status of the job, one of IN_PROGRESS, CANCELED, or COMPLETED.

" }, + "forceCanceled":{ + "shape":"Forced", + "documentation":"

Will be true if the job was canceled with the optional force parameter set to true.

" + }, "comment":{ "shape":"Comment", "documentation":"

If the job was updated, describes the reason for the update.

" @@ -5205,6 +5364,10 @@ "shape":"JobExecutionStatus", "documentation":"

The status of the job execution (IN_PROGRESS, QUEUED, FAILED, SUCCESS, CANCELED, or REJECTED).

" }, + "forceCanceled":{ + "shape":"Forced", + "documentation":"

Will be true if the job execution was canceled with the optional force parameter set to true.

" + }, "statusDetails":{ "shape":"JobExecutionStatusDetails", "documentation":"

A collection of name/value pairs that describe the status of the job execution.

" @@ -5228,6 +5391,10 @@ "executionNumber":{ "shape":"ExecutionNumber", "documentation":"

A string (consisting of the digits \"0\" through \"9\") which identifies this particular job execution on this particular device. It can be used in commands which return or update job execution information.

" + }, + "versionNumber":{ + "shape":"VersionNumber", + "documentation":"

The version of the job execution. Job execution versions are incremented each time they are updated by a device.

" } }, "documentation":"

The job execution object represents the execution of a job on a particular device.

" @@ -5337,7 +5504,7 @@ "members":{ "processingTargets":{ "shape":"ProcessingTargetNameList", - "documentation":"

The devices on which the job is executing.

" + "documentation":"

The target devices to which the job execution is being rolled out. This value will be null after the job execution has finished rolling out to all the target devices.

" }, "numberOfCanceledThings":{ "shape":"CanceledThings", @@ -5375,7 +5542,8 @@ "enum":[ "IN_PROGRESS", "CANCELED", - "COMPLETED" + "COMPLETED", + "DELETION_IN_PROGRESS" ] }, "JobSummary":{ @@ -5498,7 +5666,7 @@ "documentation":"

The message for the exception.

" } }, - "documentation":"

The number of attached entities exceeds the limit.

", + "documentation":"

A limit has been exceeded.

", "error":{"httpStatusCode":410}, "exception":true }, @@ -7751,7 +7919,7 @@ }, "messageFormat":{ "shape":"MessageFormat", - "documentation":"

The message format of the message to publish. Optional. Accepted values are \"JSON\" and \"RAW\". The default value of the attribute is \"RAW\". SNS uses this setting to determine if the payload should be parsed and relevant platform-specific bits of the payload should be extracted. To read more about SNS message formats, see http://docs.aws.amazon.com/sns/latest/dg/json-formats.html refer to their official documentation.

" + "documentation":"

(Optional) The message format of the message to publish. Accepted values are \"JSON\" and \"RAW\". The default value of the attribute is \"RAW\". SNS uses this setting to determine if the payload should be parsed and relevant platform-specific bits of the payload should be extracted. To read more about SNS message formats, see http://docs.aws.amazon.com/sns/latest/dg/json-formats.html refer to their official documentation.

" } }, "documentation":"

Describes an action to publish to an Amazon SNS topic.

" @@ -8847,10 +9015,11 @@ "documentation":"

The message for the exception.

" } }, - "documentation":"

An exception thrown when the version of a thing passed to a command is different than the version specified with the --version parameter.

", + "documentation":"

An exception thrown when the version of an entity specified with the expectedVersion parameter does not match the latest version in the system.

", "error":{"httpStatusCode":409}, "exception":true }, + "VersionNumber":{"type":"long"}, "VersionsLimitExceededException":{ "type":"structure", "members":{ diff --git a/botocore/data/iot1click-devices/2018-05-14/service-2.json b/botocore/data/iot1click-devices/2018-05-14/service-2.json new file mode 100644 index 00000000..127c6d12 --- /dev/null +++ b/botocore/data/iot1click-devices/2018-05-14/service-2.json @@ -0,0 +1,889 @@ +{ + "metadata" : { + "apiVersion" : "2018-05-14", + "endpointPrefix" : "devices.iot1click", + "signingName" : "iot1click", + "serviceFullName" : "AWS IoT 1-Click Devices Service", + "serviceId" : "IoT 1Click Devices Service", + "protocol" : "rest-json", + "jsonVersion" : "1.1", + "uid" : "devices-2018-05-14", + "signatureVersion" : "v4" + }, + "operations" : { + "ClaimDevicesByClaimCode" : { + "name" : "ClaimDevicesByClaimCode", + "http" : { + "method" : "PUT", + "requestUri" : "/claims/{claimCode}", + "responseCode" : 200 + }, + "input" : { + "shape" : "ClaimDevicesByClaimCodeRequest" + }, + "output" : { + "shape" : "ClaimDevicesByClaimCodeResponse", + "documentation" : "

200 response

" + }, + "errors" : [ { + "shape" : "InvalidRequestException", + "documentation" : "

400 response

" + }, { + "shape" : "InternalFailureException", + "documentation" : "

500 response

" + }, { + "shape" : "ForbiddenException", + "documentation" : "

403 response

" + } ], + "documentation" : "

Adds device(s) to your account (i.e., claim one or more devices) if and only if\n you received a claim code with the device(s).

" + }, + "DescribeDevice" : { + "name" : "DescribeDevice", + "http" : { + "method" : "GET", + "requestUri" : "/devices/{deviceId}", + "responseCode" : 200 + }, + "input" : { + "shape" : "DescribeDeviceRequest" + }, + "output" : { + "shape" : "DescribeDeviceResponse", + "documentation" : "

200 response

" + }, + "errors" : [ { + "shape" : "ResourceNotFoundException", + "documentation" : "

404 response

" + }, { + "shape" : "InvalidRequestException", + "documentation" : "

400 response

" + }, { + "shape" : "InternalFailureException", + "documentation" : "

500 response

" + } ], + "documentation" : "

Given a device ID, returns a DescribeDeviceResponse object describing\n the details of the device.

" + }, + "FinalizeDeviceClaim" : { + "name" : "FinalizeDeviceClaim", + "http" : { + "method" : "PUT", + "requestUri" : "/devices/{deviceId}/finalize-claim", + "responseCode" : 200 + }, + "input" : { + "shape" : "FinalizeDeviceClaimRequest" + }, + "output" : { + "shape" : "FinalizeDeviceClaimResponse", + "documentation" : "

200 response

" + }, + "errors" : [ { + "shape" : "ResourceNotFoundException", + "documentation" : "

404 response

" + }, { + "shape" : "InvalidRequestException", + "documentation" : "

400 response

" + }, { + "shape" : "InternalFailureException", + "documentation" : "

500 response

" + }, { + "shape" : "PreconditionFailedException", + "documentation" : "

412 response

" + }, { + "shape" : "ResourceConflictException", + "documentation" : "

409 response

" + } ], + "documentation" : "

Given a device ID, finalizes the claim request for the associated device.

\n

Claiming a device consists of initiating a claim, then publishing a device\n event, and finalizing the claim. For a device of type button, a\n device event can be published by simply clicking the device.

\n
" + }, + "GetDeviceMethods" : { + "name" : "GetDeviceMethods", + "http" : { + "method" : "GET", + "requestUri" : "/devices/{deviceId}/methods", + "responseCode" : 200 + }, + "input" : { + "shape" : "GetDeviceMethodsRequest" + }, + "output" : { + "shape" : "GetDeviceMethodsResponse", + "documentation" : "

200 response

" + }, + "errors" : [ { + "shape" : "ResourceNotFoundException", + "documentation" : "

404 response

" + }, { + "shape" : "InvalidRequestException", + "documentation" : "

400 response

" + }, { + "shape" : "InternalFailureException", + "documentation" : "

500 response

" + } ], + "documentation" : "

Given a device ID, returns the invokable methods associated with the\n device.

" + }, + "InitiateDeviceClaim" : { + "name" : "InitiateDeviceClaim", + "http" : { + "method" : "PUT", + "requestUri" : "/devices/{deviceId}/initiate-claim", + "responseCode" : 200 + }, + "input" : { + "shape" : "InitiateDeviceClaimRequest" + }, + "output" : { + "shape" : "InitiateDeviceClaimResponse", + "documentation" : "

200 response

" + }, + "errors" : [ { + "shape" : "ResourceNotFoundException", + "documentation" : "

404 response

" + }, { + "shape" : "InvalidRequestException", + "documentation" : "

400 response

" + }, { + "shape" : "InternalFailureException", + "documentation" : "

500 response

" + }, { + "shape" : "ResourceConflictException", + "documentation" : "

409 response

" + } ], + "documentation" : "

Given a device ID, initiates a claim request for the associated device.

\n

Claiming a device consists of initiating a claim, then publishing a device\n event, and finalizing the claim. For a device of type button, a\n device event can be published by simply clicking the device.

\n
" + }, + "InvokeDeviceMethod" : { + "name" : "InvokeDeviceMethod", + "http" : { + "method" : "POST", + "requestUri" : "/devices/{deviceId}/methods", + "responseCode" : 200 + }, + "input" : { + "shape" : "InvokeDeviceMethodRequest" + }, + "output" : { + "shape" : "InvokeDeviceMethodResponse", + "documentation" : "

200 response

" + }, + "errors" : [ { + "shape" : "InvalidRequestException", + "documentation" : "

400 response

" + }, { + "shape" : "PreconditionFailedException", + "documentation" : "

412 response

" + }, { + "shape" : "InternalFailureException", + "documentation" : "

500 response

" + }, { + "shape" : "ResourceNotFoundException", + "documentation" : "

404 response

" + }, { + "shape" : "RangeNotSatisfiableException", + "documentation" : "

416 response

" + }, { + "shape" : "ResourceConflictException", + "documentation" : "

409 response

" + } ], + "documentation" : "

Given a device ID, issues a request to invoke a named device method (with possible\n parameters). See the \"Example POST\" code snippet below.

" + }, + "ListDeviceEvents" : { + "name" : "ListDeviceEvents", + "http" : { + "method" : "GET", + "requestUri" : "/devices/{deviceId}/events", + "responseCode" : 200 + }, + "input" : { + "shape" : "ListDeviceEventsRequest" + }, + "output" : { + "shape" : "ListDeviceEventsResponse", + "documentation" : "

200 response

" + }, + "errors" : [ { + "shape" : "ResourceNotFoundException", + "documentation" : "

404 response

" + }, { + "shape" : "RangeNotSatisfiableException", + "documentation" : "

416 response

" + }, { + "shape" : "InvalidRequestException", + "documentation" : "

400 response

" + }, { + "shape" : "InternalFailureException", + "documentation" : "

500 response

" + } ], + "documentation" : "

Using a device ID, returns a DeviceEventsResponse object containing\n an array of events for the device.

" + }, + "ListDevices" : { + "name" : "ListDevices", + "http" : { + "method" : "GET", + "requestUri" : "/devices", + "responseCode" : 200 + }, + "input" : { + "shape" : "ListDevicesRequest" + }, + "output" : { + "shape" : "ListDevicesResponse", + "documentation" : "

200 response

" + }, + "errors" : [ { + "shape" : "RangeNotSatisfiableException", + "documentation" : "

416 response

" + }, { + "shape" : "InvalidRequestException", + "documentation" : "

400 response

" + }, { + "shape" : "InternalFailureException", + "documentation" : "

500 response

" + } ], + "documentation" : "

Lists the 1-Click compatible devices associated with your AWS account.

" + }, + "UnclaimDevice" : { + "name" : "UnclaimDevice", + "http" : { + "method" : "PUT", + "requestUri" : "/devices/{deviceId}/unclaim", + "responseCode" : 200 + }, + "input" : { + "shape" : "UnclaimDeviceRequest" + }, + "output" : { + "shape" : "UnclaimDeviceResponse", + "documentation" : "

200 response

" + }, + "errors" : [ { + "shape" : "ResourceNotFoundException", + "documentation" : "

404 response

" + }, { + "shape" : "InvalidRequestException", + "documentation" : "

400 response

" + }, { + "shape" : "InternalFailureException", + "documentation" : "

500 response

" + } ], + "documentation" : "

Disassociates a device from your AWS account using its device ID.

" + }, + "UpdateDeviceState" : { + "name" : "UpdateDeviceState", + "http" : { + "method" : "PUT", + "requestUri" : "/devices/{deviceId}/state", + "responseCode" : 200 + }, + "input" : { + "shape" : "UpdateDeviceStateRequest" + }, + "output" : { + "shape" : "UpdateDeviceStateResponse", + "documentation" : "

200 response

" + }, + "errors" : [ { + "shape" : "ResourceNotFoundException", + "documentation" : "

404 response

" + }, { + "shape" : "InvalidRequestException", + "documentation" : "

400 response

" + }, { + "shape" : "InternalFailureException", + "documentation" : "

500 response

" + } ], + "documentation" : "

Using a Boolean value (true or false), this operation\n enables or disables the device given a device ID.

" + } + }, + "shapes" : { + "Attributes" : { + "type" : "structure", + "members" : { } + }, + "ClaimDevicesByClaimCodeRequest" : { + "type" : "structure", + "members" : { + "ClaimCode" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "claimCode", + "documentation" : "

The claim code, starting with \"C-\", as provided by the device manufacturer.

" + } + }, + "required" : [ "ClaimCode" ] + }, + "ClaimDevicesByClaimCodeResponse" : { + "type" : "structure", + "members" : { + "ClaimCode" : { + "shape" : "__stringMin12Max40", + "locationName" : "claimCode", + "documentation" : "

The claim code provided by the device manufacturer.

" + }, + "Total" : { + "shape" : "__integer", + "locationName" : "total", + "documentation" : "

The total number of devices associated with the claim code that has been processed\n in the claim request.

" + } + } + }, + "DescribeDeviceRequest" : { + "type" : "structure", + "members" : { + "DeviceId" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "deviceId", + "documentation" : "

The unique identifier of the device.

" + } + }, + "required" : [ "DeviceId" ] + }, + "DescribeDeviceResponse" : { + "type" : "structure", + "members" : { + "DeviceDescription" : { + "shape" : "DeviceDescription", + "locationName" : "deviceDescription", + "documentation" : "

Device details.

" + } + } + }, + "Device" : { + "type" : "structure", + "members" : { + "Attributes" : { + "shape" : "Attributes", + "locationName" : "attributes", + "documentation" : "

The user specified attributes associated with the device for an event.

" + }, + "DeviceId" : { + "shape" : "__string", + "locationName" : "deviceId", + "documentation" : "

The unique identifier of the device.

" + }, + "Type" : { + "shape" : "__string", + "locationName" : "type", + "documentation" : "

The device type, such as \"button\".

" + } + } + }, + "DeviceAttributes" : { + "type" : "map", + "documentation" : "

\n DeviceAttributes is a string-to-string map specified by the user.

", + "key" : { + "shape" : "__string" + }, + "value" : { + "shape" : "__string" + } + }, + "DeviceClaimResponse" : { + "type" : "structure", + "members" : { + "State" : { + "shape" : "__string", + "locationName" : "state", + "documentation" : "

The device's final claim state.

" + } + } + }, + "DeviceDescription" : { + "type" : "structure", + "members" : { + "Attributes" : { + "shape" : "DeviceAttributes", + "locationName" : "attributes", + "documentation" : "

An array of zero or more elements of DeviceAttribute objects\n providing user specified device attributes.

" + }, + "DeviceId" : { + "shape" : "__string", + "locationName" : "deviceId", + "documentation" : "

The unique identifier of the device.

" + }, + "Enabled" : { + "shape" : "__boolean", + "locationName" : "enabled", + "documentation" : "

A Boolean value indicating whether or not the device is enabled.

" + }, + "RemainingLife" : { + "shape" : "__doubleMin0Max100", + "locationName" : "remainingLife", + "documentation" : "

A value between 0 and 1 inclusive, representing the fraction of life remaining for\n the device.

" + }, + "Type" : { + "shape" : "__string", + "locationName" : "type", + "documentation" : "

The type of the device, such as \"button\".

" + } + } + }, + "DeviceEvent" : { + "type" : "structure", + "members" : { + "Device" : { + "shape" : "Device", + "locationName" : "device", + "documentation" : "

An object representing the device associated with the event.

" + }, + "StdEvent" : { + "shape" : "__string", + "locationName" : "stdEvent", + "documentation" : "

A serialized JSON object representing the device-type specific event.

" + } + } + }, + "DeviceEventsResponse" : { + "type" : "structure", + "members" : { + "Events" : { + "shape" : "__listOfDeviceEvent", + "locationName" : "events", + "documentation" : "

An array of zero or more elements describing the event(s) associated with the\n device.

" + }, + "NextToken" : { + "shape" : "__string", + "locationName" : "nextToken", + "documentation" : "

The token to retrieve the next set of results.

" + } + } + }, + "DeviceMethod" : { + "type" : "structure", + "members" : { + "DeviceType" : { + "shape" : "__string", + "locationName" : "deviceType", + "documentation" : "

The type of the device, such as \"button\".

" + }, + "MethodName" : { + "shape" : "__string", + "locationName" : "methodName", + "documentation" : "

The name of the method applicable to the deviceType.

" + } + } + }, + "Empty" : { + "type" : "structure", + "members" : { }, + "documentation" : "

On success, an empty object is returned.

" + }, + "FinalizeDeviceClaimRequest" : { + "type" : "structure", + "members" : { + "DeviceId" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "deviceId", + "documentation" : "

The unique identifier of the device.

" + } + }, + "required" : [ "DeviceId" ] + }, + "FinalizeDeviceClaimResponse" : { + "type" : "structure", + "members" : { + "State" : { + "shape" : "__string", + "locationName" : "state", + "documentation" : "

The device's final claim state.

" + } + } + }, + "ForbiddenException" : { + "type" : "structure", + "members" : { + "Code" : { + "shape" : "__string", + "locationName" : "code", + "documentation" : "

403

" + }, + "Message" : { + "shape" : "__string", + "locationName" : "message", + "documentation" : "

The 403 error message returned by the web server.

" + } + }, + "exception" : true, + "error" : { + "httpStatusCode" : 403 + } + }, + "GetDeviceMethodsRequest" : { + "type" : "structure", + "members" : { + "DeviceId" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "deviceId", + "documentation" : "

The unique identifier of the device.

" + } + }, + "required" : [ "DeviceId" ] + }, + "GetDeviceMethodsResponse" : { + "type" : "structure", + "members" : { + "DeviceMethods" : { + "shape" : "__listOfDeviceMethod", + "locationName" : "deviceMethods", + "documentation" : "

List of available device APIs.

" + } + } + }, + "InitiateDeviceClaimRequest" : { + "type" : "structure", + "members" : { + "DeviceId" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "deviceId", + "documentation" : "

The unique identifier of the device.

" + } + }, + "required" : [ "DeviceId" ] + }, + "InitiateDeviceClaimResponse" : { + "type" : "structure", + "members" : { + "State" : { + "shape" : "__string", + "locationName" : "state", + "documentation" : "

The device's final claim state.

" + } + } + }, + "InternalFailureException" : { + "type" : "structure", + "members" : { + "Code" : { + "shape" : "__string", + "locationName" : "code", + "documentation" : "

500

" + }, + "Message" : { + "shape" : "__string", + "locationName" : "message", + "documentation" : "

The 500 error message returned by the web server.

" + } + }, + "exception" : true, + "error" : { + "httpStatusCode" : 500 + } + }, + "InvalidRequestException" : { + "type" : "structure", + "members" : { + "Code" : { + "shape" : "__string", + "locationName" : "code", + "documentation" : "

400

" + }, + "Message" : { + "shape" : "__string", + "locationName" : "message", + "documentation" : "

The 400 error message returned by the web server.

" + } + }, + "exception" : true, + "error" : { + "httpStatusCode" : 400 + } + }, + "InvokeDeviceMethodRequest" : { + "type" : "structure", + "members" : { + "DeviceId" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "deviceId", + "documentation" : "

The unique identifier of the device.

" + }, + "DeviceMethod" : { + "shape" : "DeviceMethod", + "locationName" : "deviceMethod", + "documentation" : "

The device method to invoke.

" + }, + "DeviceMethodParameters" : { + "shape" : "__string", + "locationName" : "deviceMethodParameters", + "documentation" : "

A JSON encoded string containing the device method request parameters.

" + } + }, + "required" : [ "DeviceId" ] + }, + "InvokeDeviceMethodResponse" : { + "type" : "structure", + "members" : { + "DeviceMethodResponse" : { + "shape" : "__string", + "locationName" : "deviceMethodResponse", + "documentation" : "

A JSON encoded string containing the device method response.

" + } + } + }, + "ListDeviceEventsRequest" : { + "type" : "structure", + "members" : { + "DeviceId" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "deviceId", + "documentation" : "

The unique identifier of the device.

" + }, + "FromTimeStamp" : { + "shape" : "__timestampIso8601", + "location" : "querystring", + "locationName" : "fromTimeStamp", + "documentation" : "

The start date for the device event query, in ISO8061 format. For example,\n 2018-03-28T15:45:12.880Z\n

" + }, + "MaxResults" : { + "shape" : "MaxResults", + "location" : "querystring", + "locationName" : "maxResults", + "documentation" : "

The maximum number of results to return per request. If not set, a default value\n of 100 is used.

" + }, + "NextToken" : { + "shape" : "__string", + "location" : "querystring", + "locationName" : "nextToken", + "documentation" : "

The token to retrieve the next set of results.

" + }, + "ToTimeStamp" : { + "shape" : "__timestampIso8601", + "location" : "querystring", + "locationName" : "toTimeStamp", + "documentation" : "

The end date for the device event query, in ISO8061 format. For example,\n 2018-03-28T15:45:12.880Z\n

" + } + }, + "required" : [ "DeviceId", "FromTimeStamp", "ToTimeStamp" ] + }, + "ListDeviceEventsResponse" : { + "type" : "structure", + "members" : { + "Events" : { + "shape" : "__listOfDeviceEvent", + "locationName" : "events", + "documentation" : "

An array of zero or more elements describing the event(s) associated with the\n device.

" + }, + "NextToken" : { + "shape" : "__string", + "locationName" : "nextToken", + "documentation" : "

The token to retrieve the next set of results.

" + } + } + }, + "ListDevicesRequest" : { + "type" : "structure", + "members" : { + "DeviceType" : { + "shape" : "__string", + "location" : "querystring", + "locationName" : "deviceType", + "documentation" : "

The type of the device, such as \"button\".

" + }, + "MaxResults" : { + "shape" : "MaxResults", + "location" : "querystring", + "locationName" : "maxResults", + "documentation" : "

The maximum number of results to return per request. If not set, a default value\n of 100 is used.

" + }, + "NextToken" : { + "shape" : "__string", + "location" : "querystring", + "locationName" : "nextToken", + "documentation" : "

The token to retrieve the next set of results.

" + } + } + }, + "ListDevicesResponse" : { + "type" : "structure", + "members" : { + "Devices" : { + "shape" : "__listOfDeviceDescription", + "locationName" : "devices", + "documentation" : "

A list of devices.

" + }, + "NextToken" : { + "shape" : "__string", + "locationName" : "nextToken", + "documentation" : "

The token to retrieve the next set of results.

" + } + } + }, + "MaxResults" : { + "type" : "integer", + "min" : 1, + "max" : 250 + }, + "PreconditionFailedException" : { + "type" : "structure", + "members" : { + "Code" : { + "shape" : "__string", + "locationName" : "code", + "documentation" : "

412

" + }, + "Message" : { + "shape" : "__string", + "locationName" : "message", + "documentation" : "

An error message explaining the error or its remedy.

" + } + }, + "exception" : true, + "error" : { + "httpStatusCode" : 412 + } + }, + "RangeNotSatisfiableException" : { + "type" : "structure", + "members" : { + "Code" : { + "shape" : "__string", + "locationName" : "code", + "documentation" : "

416

" + }, + "Message" : { + "shape" : "__string", + "locationName" : "message", + "documentation" : "

The requested number of results specified by nextToken cannot be\n satisfied.

" + } + }, + "exception" : true, + "error" : { + "httpStatusCode" : 416 + } + }, + "ResourceConflictException" : { + "type" : "structure", + "members" : { + "Code" : { + "shape" : "__string", + "locationName" : "code", + "documentation" : "

409

" + }, + "Message" : { + "shape" : "__string", + "locationName" : "message", + "documentation" : "

An error message explaining the error or its remedy.

" + } + }, + "exception" : true, + "error" : { + "httpStatusCode" : 409 + } + }, + "ResourceNotFoundException" : { + "type" : "structure", + "members" : { + "Code" : { + "shape" : "__string", + "locationName" : "code", + "documentation" : "

404

" + }, + "Message" : { + "shape" : "__string", + "locationName" : "message", + "documentation" : "

The requested device could not be found.

" + } + }, + "exception" : true, + "error" : { + "httpStatusCode" : 404 + } + }, + "UnclaimDeviceRequest" : { + "type" : "structure", + "members" : { + "DeviceId" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "deviceId", + "documentation" : "

The unique identifier of the device.

" + } + }, + "required" : [ "DeviceId" ] + }, + "UnclaimDeviceResponse" : { + "type" : "structure", + "members" : { + "State" : { + "shape" : "__string", + "locationName" : "state", + "documentation" : "

The device's final claim state.

" + } + } + }, + "UpdateDeviceStateRequest" : { + "type" : "structure", + "members" : { + "DeviceId" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "deviceId", + "documentation" : "

The unique identifier of the device.

" + }, + "Enabled" : { + "shape" : "__boolean", + "locationName" : "enabled", + "documentation" : "

If true, the device is enabled. If false, the device is\n disabled.

" + } + }, + "required" : [ "DeviceId" ] + }, + "UpdateDeviceStateResponse" : { + "type" : "structure", + "members" : { } + }, + "__boolean" : { + "type" : "boolean" + }, + "__double" : { + "type" : "double" + }, + "__doubleMin0Max100" : { + "type" : "double" + }, + "__integer" : { + "type" : "integer" + }, + "__listOfDeviceDescription" : { + "type" : "list", + "member" : { + "shape" : "DeviceDescription" + } + }, + "__listOfDeviceEvent" : { + "type" : "list", + "member" : { + "shape" : "DeviceEvent" + } + }, + "__listOfDeviceMethod" : { + "type" : "list", + "member" : { + "shape" : "DeviceMethod" + } + }, + "__long" : { + "type" : "long" + }, + "__string" : { + "type" : "string" + }, + "__stringMin12Max40" : { + "type" : "string", + "min" : 12, + "max" : 40 + }, + "__timestampIso8601" : { + "type" : "timestamp", + "timestampFormat" : "iso8601" + }, + "__timestampUnix" : { + "type" : "timestamp", + "timestampFormat" : "unixTimestamp" + } + }, + "documentation" : "

Stub description

" +} \ No newline at end of file diff --git a/botocore/data/iot1click-projects/2018-05-14/paginators-1.json b/botocore/data/iot1click-projects/2018-05-14/paginators-1.json new file mode 100644 index 00000000..ea142457 --- /dev/null +++ b/botocore/data/iot1click-projects/2018-05-14/paginators-1.json @@ -0,0 +1,3 @@ +{ + "pagination": {} +} diff --git a/botocore/data/iot1click-projects/2018-05-14/service-2.json b/botocore/data/iot1click-projects/2018-05-14/service-2.json new file mode 100644 index 00000000..b918b054 --- /dev/null +++ b/botocore/data/iot1click-projects/2018-05-14/service-2.json @@ -0,0 +1,907 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-05-14", + "endpointPrefix":"projects.iot1click", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"AWS IoT 1-Click Projects", + "serviceFullName":"AWS IoT 1-Click Projects Service", + "serviceId":"IoT 1Click Projects", + "signatureVersion":"v4", + "signingName":"iot1click", + "uid":"iot1click-projects-2018-05-14" + }, + "operations":{ + "AssociateDeviceWithPlacement":{ + "name":"AssociateDeviceWithPlacement", + "http":{ + "method":"PUT", + "requestUri":"/projects/{projectName}/placements/{placementName}/devices/{deviceTemplateName}" + }, + "input":{"shape":"AssociateDeviceWithPlacementRequest"}, + "output":{"shape":"AssociateDeviceWithPlacementResponse"}, + "errors":[ + {"shape":"InternalFailureException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceConflictException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Associates a physical device with a placement.

" + }, + "CreatePlacement":{ + "name":"CreatePlacement", + "http":{ + "method":"POST", + "requestUri":"/projects/{projectName}/placements" + }, + "input":{"shape":"CreatePlacementRequest"}, + "output":{"shape":"CreatePlacementResponse"}, + "errors":[ + {"shape":"InternalFailureException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceConflictException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Creates an empty placement.

" + }, + "CreateProject":{ + "name":"CreateProject", + "http":{ + "method":"POST", + "requestUri":"/projects" + }, + "input":{"shape":"CreateProjectRequest"}, + "output":{"shape":"CreateProjectResponse"}, + "errors":[ + {"shape":"InternalFailureException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceConflictException"} + ], + "documentation":"

Creates an empty project with a placement template. A project contains zero or more placements that adhere to the placement template defined in the project.

" + }, + "DeletePlacement":{ + "name":"DeletePlacement", + "http":{ + "method":"DELETE", + "requestUri":"/projects/{projectName}/placements/{placementName}" + }, + "input":{"shape":"DeletePlacementRequest"}, + "output":{"shape":"DeletePlacementResponse"}, + "errors":[ + {"shape":"InternalFailureException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Deletes a placement. To delete a placement, it must not have any devices associated with it.

When you delete a placement, all associated data becomes irretrievable.

" + }, + "DeleteProject":{ + "name":"DeleteProject", + "http":{ + "method":"DELETE", + "requestUri":"/projects/{projectName}" + }, + "input":{"shape":"DeleteProjectRequest"}, + "output":{"shape":"DeleteProjectResponse"}, + "errors":[ + {"shape":"InternalFailureException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Deletes a project. To delete a project, it must not have any placements associated with it.

When you delete a project, all associated data becomes irretrievable.

" + }, + "DescribePlacement":{ + "name":"DescribePlacement", + "http":{ + "method":"GET", + "requestUri":"/projects/{projectName}/placements/{placementName}" + }, + "input":{"shape":"DescribePlacementRequest"}, + "output":{"shape":"DescribePlacementResponse"}, + "errors":[ + {"shape":"InternalFailureException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Describes a placement in a project.

" + }, + "DescribeProject":{ + "name":"DescribeProject", + "http":{ + "method":"GET", + "requestUri":"/projects/{projectName}" + }, + "input":{"shape":"DescribeProjectRequest"}, + "output":{"shape":"DescribeProjectResponse"}, + "errors":[ + {"shape":"InternalFailureException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Returns an object describing a project.

" + }, + "DisassociateDeviceFromPlacement":{ + "name":"DisassociateDeviceFromPlacement", + "http":{ + "method":"DELETE", + "requestUri":"/projects/{projectName}/placements/{placementName}/devices/{deviceTemplateName}" + }, + "input":{"shape":"DisassociateDeviceFromPlacementRequest"}, + "output":{"shape":"DisassociateDeviceFromPlacementResponse"}, + "errors":[ + {"shape":"InternalFailureException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Removes a physical device from a placement.

" + }, + "GetDevicesInPlacement":{ + "name":"GetDevicesInPlacement", + "http":{ + "method":"GET", + "requestUri":"/projects/{projectName}/placements/{placementName}/devices" + }, + "input":{"shape":"GetDevicesInPlacementRequest"}, + "output":{"shape":"GetDevicesInPlacementResponse"}, + "errors":[ + {"shape":"InternalFailureException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Returns an object enumerating the devices in a placement.

" + }, + "ListPlacements":{ + "name":"ListPlacements", + "http":{ + "method":"GET", + "requestUri":"/projects/{projectName}/placements" + }, + "input":{"shape":"ListPlacementsRequest"}, + "output":{"shape":"ListPlacementsResponse"}, + "errors":[ + {"shape":"InternalFailureException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Lists the placement(s) of a project.

" + }, + "ListProjects":{ + "name":"ListProjects", + "http":{ + "method":"GET", + "requestUri":"/projects" + }, + "input":{"shape":"ListProjectsRequest"}, + "output":{"shape":"ListProjectsResponse"}, + "errors":[ + {"shape":"InternalFailureException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Lists the AWS IoT 1-Click project(s) associated with your AWS account and region.

" + }, + "UpdatePlacement":{ + "name":"UpdatePlacement", + "http":{ + "method":"PUT", + "requestUri":"/projects/{projectName}/placements/{placementName}" + }, + "input":{"shape":"UpdatePlacementRequest"}, + "output":{"shape":"UpdatePlacementResponse"}, + "errors":[ + {"shape":"InternalFailureException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Updates a placement with the given attributes. To clear an attribute, pass an empty value (i.e., \"\").

" + }, + "UpdateProject":{ + "name":"UpdateProject", + "http":{ + "method":"PUT", + "requestUri":"/projects/{projectName}" + }, + "input":{"shape":"UpdateProjectRequest"}, + "output":{"shape":"UpdateProjectResponse"}, + "errors":[ + {"shape":"InternalFailureException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Updates a project associated with your AWS account and region. With the exception of device template names, you can pass just the values that need to be updated because the update request will change only the values that are provided. To clear a value, pass the empty string (i.e., \"\").

" + } + }, + "shapes":{ + "AssociateDeviceWithPlacementRequest":{ + "type":"structure", + "required":[ + "projectName", + "placementName", + "deviceId", + "deviceTemplateName" + ], + "members":{ + "projectName":{ + "shape":"ProjectName", + "documentation":"

The name of the project containing the placement in which to associate the device.

", + "location":"uri", + "locationName":"projectName" + }, + "placementName":{ + "shape":"PlacementName", + "documentation":"

The name of the placement in which to associate the device.

", + "location":"uri", + "locationName":"placementName" + }, + "deviceId":{ + "shape":"DeviceId", + "documentation":"

The ID of the physical device to be associated with the given placement in the project. Note that a mandatory 4 character prefix is required for all deviceId values.

" + }, + "deviceTemplateName":{ + "shape":"DeviceTemplateName", + "documentation":"

The device template name to associate with the device ID.

", + "location":"uri", + "locationName":"deviceTemplateName" + } + } + }, + "AssociateDeviceWithPlacementResponse":{ + "type":"structure", + "members":{ + } + }, + "AttributeDefaultValue":{ + "type":"string", + "max":800 + }, + "AttributeName":{ + "type":"string", + "max":128, + "min":1 + }, + "AttributeValue":{ + "type":"string", + "max":800 + }, + "Code":{"type":"string"}, + "CreatePlacementRequest":{ + "type":"structure", + "required":[ + "placementName", + "projectName" + ], + "members":{ + "placementName":{ + "shape":"PlacementName", + "documentation":"

The name of the placement to be created.

" + }, + "projectName":{ + "shape":"ProjectName", + "documentation":"

The name of the project in which to create the placement.

", + "location":"uri", + "locationName":"projectName" + }, + "attributes":{ + "shape":"PlacementAttributeMap", + "documentation":"

Optional user-defined key/value pairs providing contextual data (such as location or function) for the placement.

" + } + } + }, + "CreatePlacementResponse":{ + "type":"structure", + "members":{ + } + }, + "CreateProjectRequest":{ + "type":"structure", + "required":["projectName"], + "members":{ + "projectName":{ + "shape":"ProjectName", + "documentation":"

The name of the project to create.

" + }, + "description":{ + "shape":"Description", + "documentation":"

An optional description for the project.

" + }, + "placementTemplate":{ + "shape":"PlacementTemplate", + "documentation":"

The schema defining the placement to be created. A placement template defines placement default attributes and device templates. You cannot add or remove device templates after the project has been created. However, you can update callbackOverrides for the device templates using the UpdateProject API.

" + } + } + }, + "CreateProjectResponse":{ + "type":"structure", + "members":{ + } + }, + "DefaultPlacementAttributeMap":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"AttributeDefaultValue"} + }, + "DeletePlacementRequest":{ + "type":"structure", + "required":[ + "placementName", + "projectName" + ], + "members":{ + "placementName":{ + "shape":"PlacementName", + "documentation":"

The name of the empty placement to delete.

", + "location":"uri", + "locationName":"placementName" + }, + "projectName":{ + "shape":"ProjectName", + "documentation":"

The project containing the empty placement to delete.

", + "location":"uri", + "locationName":"projectName" + } + } + }, + "DeletePlacementResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteProjectRequest":{ + "type":"structure", + "required":["projectName"], + "members":{ + "projectName":{ + "shape":"ProjectName", + "documentation":"

The name of the empty project to delete.

", + "location":"uri", + "locationName":"projectName" + } + } + }, + "DeleteProjectResponse":{ + "type":"structure", + "members":{ + } + }, + "DescribePlacementRequest":{ + "type":"structure", + "required":[ + "placementName", + "projectName" + ], + "members":{ + "placementName":{ + "shape":"PlacementName", + "documentation":"

The name of the placement within a project.

", + "location":"uri", + "locationName":"placementName" + }, + "projectName":{ + "shape":"ProjectName", + "documentation":"

The project containing the placement to be described.

", + "location":"uri", + "locationName":"projectName" + } + } + }, + "DescribePlacementResponse":{ + "type":"structure", + "required":["placement"], + "members":{ + "placement":{ + "shape":"PlacementDescription", + "documentation":"

An object describing the placement.

" + } + } + }, + "DescribeProjectRequest":{ + "type":"structure", + "required":["projectName"], + "members":{ + "projectName":{ + "shape":"ProjectName", + "documentation":"

The name of the project to be described.

", + "location":"uri", + "locationName":"projectName" + } + } + }, + "DescribeProjectResponse":{ + "type":"structure", + "required":["project"], + "members":{ + "project":{ + "shape":"ProjectDescription", + "documentation":"

An object describing the project.

" + } + } + }, + "Description":{ + "type":"string", + "max":500, + "min":0 + }, + "DeviceCallbackKey":{ + "type":"string", + "max":128, + "min":1 + }, + "DeviceCallbackOverrideMap":{ + "type":"map", + "key":{"shape":"DeviceCallbackKey"}, + "value":{"shape":"DeviceCallbackValue"} + }, + "DeviceCallbackValue":{ + "type":"string", + "max":200 + }, + "DeviceId":{ + "type":"string", + "max":32, + "min":1 + }, + "DeviceMap":{ + "type":"map", + "key":{"shape":"DeviceTemplateName"}, + "value":{"shape":"DeviceId"} + }, + "DeviceTemplate":{ + "type":"structure", + "members":{ + "deviceType":{ + "shape":"DeviceType", + "documentation":"

The device type, which currently must be \"button\".

" + }, + "callbackOverrides":{ + "shape":"DeviceCallbackOverrideMap", + "documentation":"

An optional Lambda function to invoke instead of the default Lambda function provided by the placement template.

" + } + }, + "documentation":"

An object representing a device for a placement template (see PlacementTemplate).

" + }, + "DeviceTemplateMap":{ + "type":"map", + "key":{"shape":"DeviceTemplateName"}, + "value":{"shape":"DeviceTemplate"} + }, + "DeviceTemplateName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[a-zA-Z0-9_-]+$" + }, + "DeviceType":{ + "type":"string", + "max":128 + }, + "DisassociateDeviceFromPlacementRequest":{ + "type":"structure", + "required":[ + "projectName", + "placementName", + "deviceTemplateName" + ], + "members":{ + "projectName":{ + "shape":"ProjectName", + "documentation":"

The name of the project that contains the placement.

", + "location":"uri", + "locationName":"projectName" + }, + "placementName":{ + "shape":"PlacementName", + "documentation":"

The name of the placement that the device should be removed from.

", + "location":"uri", + "locationName":"placementName" + }, + "deviceTemplateName":{ + "shape":"DeviceTemplateName", + "documentation":"

The device ID that should be removed from the placement.

", + "location":"uri", + "locationName":"deviceTemplateName" + } + } + }, + "DisassociateDeviceFromPlacementResponse":{ + "type":"structure", + "members":{ + } + }, + "GetDevicesInPlacementRequest":{ + "type":"structure", + "required":[ + "projectName", + "placementName" + ], + "members":{ + "projectName":{ + "shape":"ProjectName", + "documentation":"

The name of the project containing the placement.

", + "location":"uri", + "locationName":"projectName" + }, + "placementName":{ + "shape":"PlacementName", + "documentation":"

The name of the placement to get the devices from.

", + "location":"uri", + "locationName":"placementName" + } + } + }, + "GetDevicesInPlacementResponse":{ + "type":"structure", + "required":["devices"], + "members":{ + "devices":{ + "shape":"DeviceMap", + "documentation":"

An object containing the devices (zero or more) within the placement.

" + } + } + }, + "InternalFailureException":{ + "type":"structure", + "required":[ + "code", + "message" + ], + "members":{ + "code":{"shape":"Code"}, + "message":{"shape":"Message"} + }, + "documentation":"

", + "error":{"httpStatusCode":500}, + "exception":true + }, + "InvalidRequestException":{ + "type":"structure", + "required":[ + "code", + "message" + ], + "members":{ + "code":{"shape":"Code"}, + "message":{"shape":"Message"} + }, + "documentation":"

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ListPlacementsRequest":{ + "type":"structure", + "required":["projectName"], + "members":{ + "projectName":{ + "shape":"ProjectName", + "documentation":"

The project containing the placements to be listed.

", + "location":"uri", + "locationName":"projectName" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to retrieve the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return per request. If not set, a default value of 100 is used.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListPlacementsResponse":{ + "type":"structure", + "required":["placements"], + "members":{ + "placements":{ + "shape":"PlacementSummaryList", + "documentation":"

An object listing the requested placements.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token used to retrieve the next set of results - will be effectively empty if there are no further results.

" + } + } + }, + "ListProjectsRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to retrieve the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return per request. If not set, a default value of 100 is used.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListProjectsResponse":{ + "type":"structure", + "required":["projects"], + "members":{ + "projects":{ + "shape":"ProjectSummaryList", + "documentation":"

An object containing the list of projects.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token used to retrieve the next set of results - will be effectively empty if there are no further results.

" + } + } + }, + "MaxResults":{ + "type":"integer", + "max":250, + "min":1 + }, + "Message":{"type":"string"}, + "NextToken":{ + "type":"string", + "max":1024, + "min":1 + }, + "PlacementAttributeMap":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"AttributeValue"} + }, + "PlacementDescription":{ + "type":"structure", + "required":[ + "projectName", + "placementName", + "attributes", + "createdDate", + "updatedDate" + ], + "members":{ + "projectName":{ + "shape":"ProjectName", + "documentation":"

The name of the project containing the placement.

" + }, + "placementName":{ + "shape":"PlacementName", + "documentation":"

The name of the placement.

" + }, + "attributes":{ + "shape":"PlacementAttributeMap", + "documentation":"

The user-defined attributes associated with the placement.

" + }, + "createdDate":{ + "shape":"Time", + "documentation":"

The date when the placement was initially created, in UNIX epoch time format.

" + }, + "updatedDate":{ + "shape":"Time", + "documentation":"

The date when the placement was last updated, in UNIX epoch time format. If the placement was not updated, then createdDate and updatedDate are the same.

" + } + }, + "documentation":"

An object describing a project's placement.

" + }, + "PlacementName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[a-zA-Z0-9_-]+$" + }, + "PlacementSummary":{ + "type":"structure", + "required":[ + "projectName", + "placementName", + "createdDate", + "updatedDate" + ], + "members":{ + "projectName":{ + "shape":"ProjectName", + "documentation":"

The name of the project containing the placement.

" + }, + "placementName":{ + "shape":"PlacementName", + "documentation":"

The name of the placement being summarized.

" + }, + "createdDate":{ + "shape":"Time", + "documentation":"

The date when the placement was originally created, in UNIX epoch time format.

" + }, + "updatedDate":{ + "shape":"Time", + "documentation":"

The date when the placement was last updated, in UNIX epoch time format. If the placement was not updated, then createdDate and updatedDate are the same.

" + } + }, + "documentation":"

An object providing summary information for a particular placement.

" + }, + "PlacementSummaryList":{ + "type":"list", + "member":{"shape":"PlacementSummary"} + }, + "PlacementTemplate":{ + "type":"structure", + "members":{ + "defaultAttributes":{ + "shape":"DefaultPlacementAttributeMap", + "documentation":"

The default attributes (key/value pairs) to be applied to all placements using this template.

" + }, + "deviceTemplates":{ + "shape":"DeviceTemplateMap", + "documentation":"

An object specifying the DeviceTemplate for all placements using this (PlacementTemplate) template.

" + } + }, + "documentation":"

An object defining the template for a placement.

" + }, + "ProjectDescription":{ + "type":"structure", + "required":[ + "projectName", + "createdDate", + "updatedDate" + ], + "members":{ + "projectName":{ + "shape":"ProjectName", + "documentation":"

The name of the project for which to obtain information from.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The description of the project.

" + }, + "createdDate":{ + "shape":"Time", + "documentation":"

The date when the project was originally created, in UNIX epoch time format.

" + }, + "updatedDate":{ + "shape":"Time", + "documentation":"

The date when the project was last updated, in UNIX epoch time format. If the project was not updated, then createdDate and updatedDate are the same.

" + }, + "placementTemplate":{ + "shape":"PlacementTemplate", + "documentation":"

An object describing the project's placement specifications.

" + } + }, + "documentation":"

An object providing detailed information for a particular project associated with an AWS account and region.

" + }, + "ProjectName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[0-9A-Za-z_-]+$" + }, + "ProjectSummary":{ + "type":"structure", + "required":[ + "projectName", + "createdDate", + "updatedDate" + ], + "members":{ + "projectName":{ + "shape":"ProjectName", + "documentation":"

The name of the project being summarized.

" + }, + "createdDate":{ + "shape":"Time", + "documentation":"

The date when the project was originally created, in UNIX epoch time format.

" + }, + "updatedDate":{ + "shape":"Time", + "documentation":"

The date when the project was last updated, in UNIX epoch time format. If the project was not updated, then createdDate and updatedDate are the same.

" + } + }, + "documentation":"

An object providing summary information for a particular project for an associated AWS account and region.

" + }, + "ProjectSummaryList":{ + "type":"list", + "member":{"shape":"ProjectSummary"} + }, + "ResourceConflictException":{ + "type":"structure", + "required":[ + "code", + "message" + ], + "members":{ + "code":{"shape":"Code"}, + "message":{"shape":"Message"} + }, + "documentation":"

", + "error":{"httpStatusCode":409}, + "exception":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "code", + "message" + ], + "members":{ + "code":{"shape":"Code"}, + "message":{"shape":"Message"} + }, + "documentation":"

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "Time":{"type":"timestamp"}, + "TooManyRequestsException":{ + "type":"structure", + "required":[ + "code", + "message" + ], + "members":{ + "code":{"shape":"Code"}, + "message":{"shape":"Message"} + }, + "documentation":"

", + "error":{"httpStatusCode":429}, + "exception":true + }, + "UpdatePlacementRequest":{ + "type":"structure", + "required":[ + "placementName", + "projectName" + ], + "members":{ + "placementName":{ + "shape":"PlacementName", + "documentation":"

The name of the placement to update.

", + "location":"uri", + "locationName":"placementName" + }, + "projectName":{ + "shape":"ProjectName", + "documentation":"

The name of the project containing the placement to be updated.

", + "location":"uri", + "locationName":"projectName" + }, + "attributes":{ + "shape":"PlacementAttributeMap", + "documentation":"

The user-defined object of attributes used to update the placement. The maximum number of key/value pairs is 50.

" + } + } + }, + "UpdatePlacementResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateProjectRequest":{ + "type":"structure", + "required":["projectName"], + "members":{ + "projectName":{ + "shape":"ProjectName", + "documentation":"

The name of the project to be updated.

", + "location":"uri", + "locationName":"projectName" + }, + "description":{ + "shape":"Description", + "documentation":"

An optional user-defined description for the project.

" + }, + "placementTemplate":{ + "shape":"PlacementTemplate", + "documentation":"

An object defining the project update. Once a project has been created, you cannot add device template names to the project. However, for a given placementTemplate, you can update the associated callbackOverrides for the device definition using this API.

" + } + } + }, + "UpdateProjectResponse":{ + "type":"structure", + "members":{ + } + } + }, + "documentation":"

The AWS IoT 1-Click Project API Reference

" +} diff --git a/botocore/data/iotanalytics/2017-11-27/service-2.json b/botocore/data/iotanalytics/2017-11-27/service-2.json index 6afad021..f0b04f31 100644 --- a/botocore/data/iotanalytics/2017-11-27/service-2.json +++ b/botocore/data/iotanalytics/2017-11-27/service-2.json @@ -389,6 +389,24 @@ ], "documentation":"

Retrieves a list of pipelines.

" }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Lists the tags (metadata) which you have assigned to the resource.

" + }, "PutLoggingOptions":{ "name":"PutLoggingOptions", "http":{ @@ -402,7 +420,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Sets or updates the AWS IoT Analytics logging options.

" + "documentation":"

Sets or updates the AWS IoT Analytics logging options.

Note that if you update the value of any loggingOptions field, it takes up to one minute for the change to take effect. Also, if you change the policy attached to the role you specified in the roleArn field (for example, to correct an invalid policy) it takes up to 5 minutes for that change to take effect.

" }, "RunPipelineActivity":{ "name":"RunPipelineActivity", @@ -455,6 +473,44 @@ ], "documentation":"

Starts the reprocessing of raw message data through the pipeline.

" }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags", + "responseCode":204 + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Adds to or modifies the tags of the given resource. Tags are metadata which can be used to manage a resource.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags", + "responseCode":204 + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Removes the given tags (metadata) from the resource.

" + }, "UpdateChannel":{ "name":"UpdateChannel", "http":{ @@ -750,6 +806,10 @@ "retentionPeriod":{ "shape":"RetentionPeriod", "documentation":"

How long, in days, message data is kept for the channel.

" + }, + "tags":{ + "shape":"TagList", + "documentation":"

Metadata which can be used to manage the channel.

" } } }, @@ -800,6 +860,10 @@ "triggers":{ "shape":"DatasetTriggers", "documentation":"

A list of triggers. A trigger causes data set content to be populated at a specified time or time interval. The list of triggers can be empty or contain up to five DataSetTrigger objects.

" + }, + "tags":{ + "shape":"TagList", + "documentation":"

Metadata which can be used to manage the data set.

" } } }, @@ -827,6 +891,10 @@ "retentionPeriod":{ "shape":"RetentionPeriod", "documentation":"

How long, in days, message data is kept for the data store.

" + }, + "tags":{ + "shape":"TagList", + "documentation":"

Metadata which can be used to manage the data store.

" } } }, @@ -861,6 +929,10 @@ "pipelineActivities":{ "shape":"PipelineActivities", "documentation":"

A list of pipeline activities.

The list can be 1-25 PipelineActivity objects. Activities perform transformations on your messages, such as removing, renaming, or adding message attributes; filtering messages based on attribute values; invoking your Lambda functions on messages for advanced processing; or performing mathematical transformations to normalize device data.

" + }, + "tags":{ + "shape":"TagList", + "documentation":"

Metadata which can be used to manage the pipeline.

" } } }, @@ -1599,6 +1671,27 @@ } } }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

The ARN of the resource whose tags you want to list.

", + "location":"querystring", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagList", + "documentation":"

The tags (metadata) which you have assigned to the resource.

" + } + } + }, "LogResult":{"type":"string"}, "LoggingEnabled":{"type":"boolean"}, "LoggingLevel":{ @@ -1679,7 +1772,7 @@ "members":{ "messageId":{ "shape":"MessageId", - "documentation":"

The ID you wish to assign to the message.

" + "documentation":"

The ID you wish to assign to the message. Each \"messageId\" must be unique within each batch sent.

" }, "payload":{ "shape":"MessagePayload", @@ -1903,6 +1996,11 @@ "error":{"httpStatusCode":409}, "exception":true }, + "ResourceArn":{ + "type":"string", + "max":2048, + "min":20 + }, "ResourceNotFoundException":{ "type":"structure", "members":{ @@ -2089,6 +2187,70 @@ } }, "StartTime":{"type":"timestamp"}, + "Tag":{ + "type":"structure", + "required":[ + "key", + "value" + ], + "members":{ + "key":{ + "shape":"TagKey", + "documentation":"

The tag's key.

" + }, + "value":{ + "shape":"TagValue", + "documentation":"

The tag's value.

" + } + }, + "documentation":"

A set of key/value pairs which are used to manage the resource.

" + }, + "TagKey":{ + "type":"string", + "max":256, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":1 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":50, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

The ARN of the resource whose tags will be modified.

", + "location":"querystring", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"TagList", + "documentation":"

The new or modified tags for the resource.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":1 + }, "ThrottlingException":{ "type":"structure", "members":{ @@ -2100,6 +2262,32 @@ }, "Timestamp":{"type":"timestamp"}, "UnlimitedRetentionPeriod":{"type":"boolean"}, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

The ARN of the resource whose tags will be removed.

", + "location":"querystring", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

The keys of those tags which will be removed.

", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateChannelRequest":{ "type":"structure", "required":["channelName"], @@ -2178,5 +2366,5 @@ "resourceArn":{"type":"string"}, "resourceId":{"type":"string"} }, - "documentation":"

AWS IoT Analytics provides advanced data analysis for AWS IoT. It allows you to collect large amounts of device data, process messages, store them, and then query the data and run sophisticated analytics to make accurate decisions in your IoT applications and machine learning use cases. AWS IoT Analytics enables advanced data exploration through integration with Jupyter Notebooks and data visualization through integration with Amazon QuickSight.

Traditional analytics and business intelligence tools are designed to process structured data. IoT data often comes from devices that record noisy processes (such as temperature, motion, or sound). As a result, the data from these devices can have significant gaps, corrupted messages, and false readings that must be cleaned up before analysis can occur. Also, IoT data is often only meaningful in the context of other data from external sources.

AWS IoT Analytics automates each of the steps required to analyze data from IoT devices. AWS IoT Analytics filters, transforms, and enriches IoT data before storing it in a time-series data store for analysis. You can set up the service to collect only the data you need from your devices, apply mathematical transforms to process the data, and enrich the data with device-specific metadata such as device type and location before storing it. Then, you can analyze your data by running queries using the built-in SQL query engine, or perform more complex analytics and machine learning inference. AWS IoT Analytics includes models for common IoT use cases so you can answer questions like which devices are about to fail or which customers are at risk of abandoning their wearable devices.

" + "documentation":"

AWS IoT Analytics allows you to collect large amounts of device data, process messages, and store them. You can then query the data and run sophisticated analytics on it. AWS IoT Analytics enables advanced data exploration through integration with Jupyter Notebooks and data visualization through integration with Amazon QuickSight.

Traditional analytics and business intelligence tools are designed to process structured data. IoT data often comes from devices that record noisy processes (such as temperature, motion, or sound). As a result the data from these devices can have significant gaps, corrupted messages, and false readings that must be cleaned up before analysis can occur. Also, IoT data is often only meaningful in the context of other data from external sources.

AWS IoT Analytics automates the steps required to analyze data from IoT devices. AWS IoT Analytics filters, transforms, and enriches IoT data before storing it in a time-series data store for analysis. You can set up the service to collect only the data you need from your devices, apply mathematical transforms to process the data, and enrich the data with device-specific metadata such as device type and location before storing it. Then, you can analyze your data by running queries using the built-in SQL query engine, or perform more complex analytics and machine learning inference. AWS IoT Analytics includes pre-built models for common IoT use cases so you can answer questions like which devices are about to fail or which customers are at risk of abandoning their wearable devices.

" } diff --git a/botocore/data/lambda/2015-03-31/service-2.json b/botocore/data/lambda/2015-03-31/service-2.json index 46ab59e0..6d406f88 100644 --- a/botocore/data/lambda/2015-03-31/service-2.json +++ b/botocore/data/lambda/2015-03-31/service-2.json @@ -64,7 +64,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Identifies a stream as an event source for a Lambda function. It can be either an Amazon Kinesis stream or an Amazon DynamoDB stream. AWS Lambda invokes the specified function when records are posted to the stream.

This association between a stream source and a Lambda function is called the event source mapping.

You provide mapping information (for example, which stream to read from and which Lambda function to invoke) in the request body.

Each event source, such as an Amazon Kinesis or a DynamoDB stream, can be associated with multiple AWS Lambda functions. A given Lambda function can be associated with multiple AWS event sources.

If you are using versioning, you can specify a specific function version or an alias via the function name parameter. For more information about versioning, see AWS Lambda Function Versioning and Aliases.

This operation requires permission for the lambda:CreateEventSourceMapping action.

" + "documentation":"

Identifies a poll-based event source for a Lambda function. It can be either an Amazon Kinesis or DynamoDB stream, or an Amazon SQS queue. AWS Lambda invokes the specified function when records are posted to the event source.

This association between a poll-based source and a Lambda function is called the event source mapping.

You provide mapping information (for example, which stream or SQS queue to read from and which Lambda function to invoke) in the request body.

Amazon Kinesis or DynamoDB stream event sources can be associated with multiple AWS Lambda functions and a given Lambda function can be associated with multiple AWS event sources. For Amazon SQS, you can configure multiple queues as event sources for a single Lambda function, but an SQS queue can be mapped only to a single Lambda function.

If you are using versioning, you can specify a specific function version or an alias via the function name parameter. For more information about versioning, see AWS Lambda Function Versioning and Aliases.

This operation requires permission for the lambda:CreateEventSourceMapping action.

" }, "CreateFunction":{ "name":"CreateFunction", @@ -113,7 +113,8 @@ {"shape":"ServiceException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"} + {"shape":"TooManyRequestsException"}, + {"shape":"ResourceInUseException"} ], "documentation":"

Removes an event source mapping. This means AWS Lambda will no longer invoke the function for events in the associated source.

This operation requires permission for the lambda:DeleteEventSourceMapping action.

" }, @@ -500,7 +501,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, {"shape":"TooManyRequestsException"}, - {"shape":"ResourceConflictException"} + {"shape":"ResourceConflictException"}, + {"shape":"ResourceInUseException"} ], "documentation":"

You can update an event source mapping. This is useful if you want to change the parameters of the existing mapping without losing your position in the stream. You can change which function will receive the stream records, but to change the stream itself, you must create a new mapping.

If you are using the versioning feature, you can update the event source mapping to map to a specific Lambda function version or alias as described in the FunctionName parameter. For information about the versioning feature, see AWS Lambda Function Versioning and Aliases.

If you disable the event source mapping, AWS Lambda stops polling. If you enable again, it will resume polling from the time it had stopped polling, so you don't lose processing of any records. However, if you delete event source mapping and create it again, it will reset.

This operation requires permission for the lambda:UpdateEventSourceMapping action.

" }, @@ -787,13 +789,12 @@ "type":"structure", "required":[ "EventSourceArn", - "FunctionName", - "StartingPosition" + "FunctionName" ], "members":{ "EventSourceArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the Amazon Kinesis or the Amazon DynamoDB stream that is the event source. Any record added to this stream could cause AWS Lambda to invoke your Lambda function, it depends on the BatchSize. AWS Lambda POSTs the Amazon Kinesis event, containing records, to your Lambda function as JSON.

" + "documentation":"

The Amazon Resource Name (ARN) of the event source. Any record added to this source could cause AWS Lambda to invoke your Lambda function, it depends on the BatchSize. AWS Lambda POSTs the event's records to your Lambda function as JSON.

" }, "FunctionName":{ "shape":"FunctionName", @@ -805,7 +806,7 @@ }, "BatchSize":{ "shape":"BatchSize", - "documentation":"

The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. Your function receives an event with all the retrieved records. The default is 100 records.

" + "documentation":"

The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. Your function receives an event with all the retrieved records. The default for Amazon Kinesis and Amazon DynamoDB is 100 records. For SQS, the default is 1.

" }, "StartingPosition":{ "shape":"EventSourcePosition", @@ -1073,11 +1074,11 @@ }, "EventSourceArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the Amazon Kinesis stream that is the source of events.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Kinesis or DynamoDB stream or the SQS queue that is the source of events.

" }, "FunctionArn":{ "shape":"FunctionArn", - "documentation":"

The Lambda function to invoke when AWS Lambda detects an event on the stream.

" + "documentation":"

The Lambda function to invoke when AWS Lambda detects an event on the poll-based source.

" }, "LastModified":{ "shape":"Date", @@ -1096,7 +1097,7 @@ "documentation":"

The reason the event source mapping is in its current state. It is either user-requested or an AWS Lambda-initiated state transition.

" } }, - "documentation":"

Describes mapping between an Amazon Kinesis stream and a Lambda function.

" + "documentation":"

Describes mapping between an Amazon Kinesis or DynamoDB stream or an Amazon SQS queue and a Lambda function.

" }, "EventSourceMappingsList":{ "type":"list", @@ -1674,7 +1675,7 @@ "members":{ "EventSourceArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the Amazon Kinesis stream. (This parameter is optional.)

", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Kinesis or DynamoDB stream, or an SQS queue. (This parameter is optional.)

", "location":"querystring", "locationName":"EventSourceArn" }, @@ -2003,6 +2004,16 @@ "error":{"httpStatusCode":409}, "exception":true }, + "ResourceInUseException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "Message":{"shape":"String"} + }, + "documentation":"

The operation conflicts with the resource's availability. For example, you attempted to update an EventSoure Mapping in CREATING, or tried to delete a EventSoure mapping currently in the UPDATING state.

", + "error":{"httpStatusCode":400}, + "exception":true + }, "ResourceNotFoundException":{ "type":"structure", "members":{ @@ -2029,6 +2040,7 @@ "python3.6", "dotnetcore1.0", "dotnetcore2.0", + "dotnetcore2.1", "nodejs4.3-edge", "go1.x" ] diff --git a/botocore/data/logs/2014-03-28/service-2.json b/botocore/data/logs/2014-03-28/service-2.json index 2a8bc3e4..1a478350 100644 --- a/botocore/data/logs/2014-03-28/service-2.json +++ b/botocore/data/logs/2014-03-28/service-2.json @@ -6,6 +6,7 @@ "jsonVersion":"1.1", "protocol":"json", "serviceFullName":"Amazon CloudWatch Logs", + "serviceId":"CloudWatch Logs", "signatureVersion":"v4", "targetPrefix":"Logs_20140328", "uid":"logs-2014-03-28" diff --git a/botocore/data/machinelearning/2014-12-12/service-2.json b/botocore/data/machinelearning/2014-12-12/service-2.json index 8fb0d8f2..5e849383 100644 --- a/botocore/data/machinelearning/2014-12-12/service-2.json +++ b/botocore/data/machinelearning/2014-12-12/service-2.json @@ -6,6 +6,7 @@ "endpointPrefix":"machinelearning", "jsonVersion":"1.1", "serviceFullName":"Amazon Machine Learning", + "serviceId":"Machine Learning", "signatureVersion":"v4", "targetPrefix":"AmazonML_20141212", "protocol":"json" diff --git a/botocore/data/macie/2017-12-19/paginators-1.json b/botocore/data/macie/2017-12-19/paginators-1.json new file mode 100644 index 00000000..ea142457 --- /dev/null +++ b/botocore/data/macie/2017-12-19/paginators-1.json @@ -0,0 +1,3 @@ +{ + "pagination": {} +} diff --git a/botocore/data/macie/2017-12-19/service-2.json b/botocore/data/macie/2017-12-19/service-2.json new file mode 100644 index 00000000..c3114b69 --- /dev/null +++ b/botocore/data/macie/2017-12-19/service-2.json @@ -0,0 +1,495 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2017-12-19", + "endpointPrefix":"macie", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"Amazon Macie", + "serviceId":"Macie", + "signatureVersion":"v4", + "targetPrefix":"MacieService", + "uid":"macie-2017-12-19" + }, + "operations":{ + "AssociateMemberAccount":{ + "name":"AssociateMemberAccount", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateMemberAccountRequest"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalException"} + ], + "documentation":"

Associates a specified AWS account with Amazon Macie as a member account.

" + }, + "AssociateS3Resources":{ + "name":"AssociateS3Resources", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateS3ResourcesRequest"}, + "output":{"shape":"AssociateS3ResourcesResult"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalException"} + ], + "documentation":"

Associates specified S3 resources with Amazon Macie for monitoring and data classification. If memberAccountId isn't specified, the action associates specified S3 resources with Macie for the current master account. If memberAccountId is specified, the action associates specified S3 resources with Macie for the specified member account.

" + }, + "DisassociateMemberAccount":{ + "name":"DisassociateMemberAccount", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateMemberAccountRequest"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"InternalException"} + ], + "documentation":"

Removes the specified member account from Amazon Macie.

" + }, + "DisassociateS3Resources":{ + "name":"DisassociateS3Resources", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateS3ResourcesRequest"}, + "output":{"shape":"DisassociateS3ResourcesResult"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalException"} + ], + "documentation":"

Removes specified S3 resources from being monitored by Amazon Macie. If memberAccountId isn't specified, the action removes specified S3 resources from Macie for the current master account. If memberAccountId is specified, the action removes specified S3 resources from Macie for the specified member account.

" + }, + "ListMemberAccounts":{ + "name":"ListMemberAccounts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListMemberAccountsRequest"}, + "output":{"shape":"ListMemberAccountsResult"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

Lists all Amazon Macie member accounts for the current Amazon Macie master account.

" + }, + "ListS3Resources":{ + "name":"ListS3Resources", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListS3ResourcesRequest"}, + "output":{"shape":"ListS3ResourcesResult"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalException"} + ], + "documentation":"

Lists all the S3 resources associated with Amazon Macie. If memberAccountId isn't specified, the action lists the S3 resources associated with Amazon Macie for the current master account. If memberAccountId is specified, the action lists the S3 resources associated with Amazon Macie for the specified member account.

" + }, + "UpdateS3Resources":{ + "name":"UpdateS3Resources", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateS3ResourcesRequest"}, + "output":{"shape":"UpdateS3ResourcesResult"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalException"} + ], + "documentation":"

Updates the classification types for the specified S3 resources. If memberAccountId isn't specified, the action updates the classification types of the S3 resources associated with Amazon Macie for the current master account. If memberAccountId is specified, the action updates the classification types of the S3 resources associated with Amazon Macie for the specified member account.

" + } + }, + "shapes":{ + "AWSAccountId":{ + "type":"string", + "pattern":"[0-9]{12}" + }, + "AccessDeniedException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"}, + "resourceType":{"shape":"ResourceType"} + }, + "documentation":"

You do not have required permissions to access the requested resource.

", + "exception":true + }, + "AssociateMemberAccountRequest":{ + "type":"structure", + "required":["memberAccountId"], + "members":{ + "memberAccountId":{ + "shape":"AWSAccountId", + "documentation":"

The ID of the AWS account that you want to associate with Amazon Macie as a member account.

" + } + } + }, + "AssociateS3ResourcesRequest":{ + "type":"structure", + "required":["s3Resources"], + "members":{ + "memberAccountId":{ + "shape":"AWSAccountId", + "documentation":"

The ID of the Amazon Macie member account whose resources you want to associate with Macie.

" + }, + "s3Resources":{ + "shape":"S3ResourcesClassification", + "documentation":"

The S3 resources that you want to associate with Amazon Macie for monitoring and data classification.

" + } + } + }, + "AssociateS3ResourcesResult":{ + "type":"structure", + "members":{ + "failedS3Resources":{ + "shape":"FailedS3Resources", + "documentation":"

S3 resources that couldn't be associated with Amazon Macie. An error code and an error message are provided for each failed item.

" + } + } + }, + "BucketName":{ + "type":"string", + "max":500 + }, + "ClassificationType":{ + "type":"structure", + "required":[ + "oneTime", + "continuous" + ], + "members":{ + "oneTime":{ + "shape":"S3OneTimeClassificationType", + "documentation":"

A one-time classification of all of the existing objects in a specified S3 bucket.

" + }, + "continuous":{ + "shape":"S3ContinuousClassificationType", + "documentation":"

A continuous classification of the objects that are added to a specified S3 bucket. Amazon Macie begins performing continuous classification after a bucket is successfully associated with Amazon Macie.

" + } + }, + "documentation":"

The classification type that Amazon Macie applies to the associated S3 resources.

" + }, + "ClassificationTypeUpdate":{ + "type":"structure", + "members":{ + "oneTime":{ + "shape":"S3OneTimeClassificationType", + "documentation":"

A one-time classification of all of the existing objects in a specified S3 bucket.

" + }, + "continuous":{ + "shape":"S3ContinuousClassificationType", + "documentation":"

A continuous classification of the objects that are added to a specified S3 bucket. Amazon Macie begins performing continuous classification after a bucket is successfully associated with Amazon Macie.

" + } + }, + "documentation":"

The classification type that Amazon Macie applies to the associated S3 resources. At least one of the classification types (oneTime or continuous) must be specified.

" + }, + "DisassociateMemberAccountRequest":{ + "type":"structure", + "required":["memberAccountId"], + "members":{ + "memberAccountId":{ + "shape":"AWSAccountId", + "documentation":"

The ID of the member account that you want to remove from Amazon Macie.

" + } + } + }, + "DisassociateS3ResourcesRequest":{ + "type":"structure", + "required":["associatedS3Resources"], + "members":{ + "memberAccountId":{ + "shape":"AWSAccountId", + "documentation":"

The ID of the Amazon Macie member account whose resources you want to remove from being monitored by Amazon Macie.

" + }, + "associatedS3Resources":{ + "shape":"S3Resources", + "documentation":"

The S3 resources (buckets or prefixes) that you want to remove from being monitored and classified by Amazon Macie.

" + } + } + }, + "DisassociateS3ResourcesResult":{ + "type":"structure", + "members":{ + "failedS3Resources":{ + "shape":"FailedS3Resources", + "documentation":"

S3 resources that couldn't be removed from being monitored and classified by Amazon Macie. An error code and an error message are provided for each failed item.

" + } + } + }, + "ErrorCode":{ + "type":"string", + "documentation":"Error code for the exception", + "max":10 + }, + "ExceptionMessage":{ + "type":"string", + "max":10000 + }, + "FailedS3Resource":{ + "type":"structure", + "members":{ + "failedItem":{ + "shape":"S3Resource", + "documentation":"

The failed S3 resources.

" + }, + "errorCode":{ + "shape":"ErrorCode", + "documentation":"

The status code of a failed item.

" + }, + "errorMessage":{ + "shape":"ExceptionMessage", + "documentation":"

The error message of a failed item.

" + } + }, + "documentation":"

Includes details about the failed S3 resources.

" + }, + "FailedS3Resources":{ + "type":"list", + "member":{"shape":"FailedS3Resource"} + }, + "FieldName":{ + "type":"string", + "documentation":"Field that has invalid input", + "max":1000 + }, + "InternalException":{ + "type":"structure", + "members":{ + "errorCode":{"shape":"ErrorCode"}, + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

Internal server error.

", + "exception":true, + "fault":true + }, + "InvalidInputException":{ + "type":"structure", + "members":{ + "errorCode":{"shape":"ErrorCode"}, + "message":{"shape":"ExceptionMessage"}, + "fieldName":{"shape":"FieldName"} + }, + "documentation":"

The request was rejected because an invalid or out-of-range value was supplied for an input parameter.

", + "exception":true + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "errorCode":{"shape":"ErrorCode"}, + "message":{"shape":"ExceptionMessage"}, + "resourceType":{"shape":"ResourceType"} + }, + "documentation":"

The request was rejected because it attempted to create resources beyond the current AWS account limits. The error code describes the limit exceeded.

", + "exception":true + }, + "ListMemberAccountsRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

Use this parameter when paginating results. Set the value of this parameter to null on your first call to the ListMemberAccounts action. Subsequent calls to the action fill nextToken in the request with the value of nextToken from the previous response to continue listing data.

" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

Use this parameter to indicate the maximum number of items that you want in the response. The default value is 250.

" + } + } + }, + "ListMemberAccountsResult":{ + "type":"structure", + "members":{ + "memberAccounts":{ + "shape":"MemberAccounts", + "documentation":"

A list of the Amazon Macie member accounts returned by the action. The current master account is also included in this list.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to null.

" + } + } + }, + "ListS3ResourcesRequest":{ + "type":"structure", + "members":{ + "memberAccountId":{ + "shape":"AWSAccountId", + "documentation":"

The Amazon Macie member account ID whose associated S3 resources you want to list.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

Use this parameter when paginating results. Set its value to null on your first call to the ListS3Resources action. Subsequent calls to the action fill nextToken in the request with the value of nextToken from the previous response to continue listing data.

" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

Use this parameter to indicate the maximum number of items that you want in the response. The default value is 250.

" + } + } + }, + "ListS3ResourcesResult":{ + "type":"structure", + "members":{ + "s3Resources":{ + "shape":"S3ResourcesClassification", + "documentation":"

A list of the associated S3 resources returned by the action.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

When a response is generated, if there is more data to be listed, this parameter is present in the response and contains the value to use for the nextToken parameter in a subsequent pagination request. If there is no more data to be listed, this parameter is set to null.

" + } + } + }, + "MaxResults":{ + "type":"integer", + "box":true, + "max":250 + }, + "MemberAccount":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"AWSAccountId", + "documentation":"

The AWS account ID of the Amazon Macie member account.

" + } + }, + "documentation":"

Contains information about the Amazon Macie member account.

" + }, + "MemberAccounts":{ + "type":"list", + "member":{"shape":"MemberAccount"} + }, + "NextToken":{ + "type":"string", + "max":500 + }, + "Prefix":{ + "type":"string", + "max":10000 + }, + "ResourceType":{ + "type":"string", + "documentation":"Resource type that caused the exception", + "max":1000 + }, + "S3ContinuousClassificationType":{ + "type":"string", + "enum":["FULL"] + }, + "S3OneTimeClassificationType":{ + "type":"string", + "enum":[ + "FULL", + "NONE" + ] + }, + "S3Resource":{ + "type":"structure", + "required":["bucketName"], + "members":{ + "bucketName":{ + "shape":"BucketName", + "documentation":"

The name of the S3 bucket.

" + }, + "prefix":{ + "shape":"Prefix", + "documentation":"

The prefix of the S3 bucket.

" + } + }, + "documentation":"

Contains information about the S3 resource. This data type is used as a request parameter in the DisassociateS3Resources action and can be used as a response parameter in the AssociateS3Resources and UpdateS3Resources actions.

" + }, + "S3ResourceClassification":{ + "type":"structure", + "required":[ + "bucketName", + "classificationType" + ], + "members":{ + "bucketName":{ + "shape":"BucketName", + "documentation":"

The name of the S3 bucket that you want to associate with Amazon Macie.

" + }, + "prefix":{ + "shape":"Prefix", + "documentation":"

The prefix of the S3 bucket that you want to associate with Amazon Macie.

" + }, + "classificationType":{ + "shape":"ClassificationType", + "documentation":"

The classification type that you want to specify for the resource associated with Amazon Macie.

" + } + }, + "documentation":"

The S3 resources that you want to associate with Amazon Macie for monitoring and data classification. This data type is used as a request parameter in the AssociateS3Resources action and a response parameter in the ListS3Resources action.

" + }, + "S3ResourceClassificationUpdate":{ + "type":"structure", + "required":[ + "bucketName", + "classificationTypeUpdate" + ], + "members":{ + "bucketName":{ + "shape":"BucketName", + "documentation":"

The name of the S3 bucket whose classification types you want to update.

" + }, + "prefix":{ + "shape":"Prefix", + "documentation":"

The prefix of the S3 bucket whose classification types you want to update.

" + }, + "classificationTypeUpdate":{ + "shape":"ClassificationTypeUpdate", + "documentation":"

The classification type that you want to update for the resource associated with Amazon Macie.

" + } + }, + "documentation":"

The S3 resources whose classification types you want to update. This data type is used as a request parameter in the UpdateS3Resources action.

" + }, + "S3Resources":{ + "type":"list", + "member":{"shape":"S3Resource"} + }, + "S3ResourcesClassification":{ + "type":"list", + "member":{"shape":"S3ResourceClassification"} + }, + "S3ResourcesClassificationUpdate":{ + "type":"list", + "member":{"shape":"S3ResourceClassificationUpdate"} + }, + "UpdateS3ResourcesRequest":{ + "type":"structure", + "required":["s3ResourcesUpdate"], + "members":{ + "memberAccountId":{ + "shape":"AWSAccountId", + "documentation":"

The AWS ID of the Amazon Macie member account whose S3 resources' classification types you want to update.

" + }, + "s3ResourcesUpdate":{ + "shape":"S3ResourcesClassificationUpdate", + "documentation":"

The S3 resources whose classification types you want to update.

" + } + } + }, + "UpdateS3ResourcesResult":{ + "type":"structure", + "members":{ + "failedS3Resources":{ + "shape":"FailedS3Resources", + "documentation":"

The S3 resources whose classification types can't be updated. An error code and an error message are provided for each failed item.

" + } + } + } + }, + "documentation":"Amazon Macie

Amazon Macie is a security service that uses machine learning to automatically discover, classify, and protect sensitive data in AWS. Macie recognizes sensitive data such as personally identifiable information (PII) or intellectual property, and provides you with dashboards and alerts that give visibility into how this data is being accessed or moved. For more information, see the Macie User Guide.

" +} diff --git a/botocore/data/marketplace-entitlement/2017-01-11/service-2.json b/botocore/data/marketplace-entitlement/2017-01-11/service-2.json index 4825b42b..acdee343 100644 --- a/botocore/data/marketplace-entitlement/2017-01-11/service-2.json +++ b/botocore/data/marketplace-entitlement/2017-01-11/service-2.json @@ -6,6 +6,7 @@ "jsonVersion":"1.1", "protocol":"json", "serviceFullName":"AWS Marketplace Entitlement Service", + "serviceId":"Marketplace Entitlement Service", "signatureVersion":"v4", "signingName":"aws-marketplace", "targetPrefix":"AWSMPEntitlementService", diff --git a/botocore/data/marketplacecommerceanalytics/2015-07-01/service-2.json b/botocore/data/marketplacecommerceanalytics/2015-07-01/service-2.json index e6107700..1b773f64 100644 --- a/botocore/data/marketplacecommerceanalytics/2015-07-01/service-2.json +++ b/botocore/data/marketplacecommerceanalytics/2015-07-01/service-2.json @@ -6,6 +6,7 @@ "jsonVersion":"1.1", "protocol":"json", "serviceFullName":"AWS Marketplace Commerce Analytics", + "serviceId":"Marketplace Commerce Analytics", "signatureVersion":"v4", "signingName":"marketplacecommerceanalytics", "targetPrefix":"MarketplaceCommerceAnalytics20150701", diff --git a/botocore/data/mediaconvert/2017-08-29/service-2.json b/botocore/data/mediaconvert/2017-08-29/service-2.json index 97ef8b8b..af56c605 100644 --- a/botocore/data/mediaconvert/2017-08-29/service-2.json +++ b/botocore/data/mediaconvert/2017-08-29/service-2.json @@ -709,6 +709,129 @@ ], "documentation": "Retrieve a JSON array of up to twenty of your queues. This will return the queues themselves, not just a list of them. To retrieve the next twenty queues, use the nextToken string returned with the array." }, + "ListTagsForResource": { + "name": "ListTagsForResource", + "http": { + "method": "GET", + "requestUri": "/2017-08-29/tags/{arn}", + "responseCode": 200 + }, + "input": { + "shape": "ListTagsForResourceRequest" + }, + "output": { + "shape": "ListTagsForResourceResponse" + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "The service can't process your request because of a problem in the request. Please check your request form and syntax." + }, + { + "shape": "InternalServerErrorException", + "documentation": "The service encountered an unexpected condition and cannot fulfill your request." + }, + { + "shape": "ForbiddenException", + "documentation": "You don't have permissions for this action with the credentials you sent." + }, + { + "shape": "NotFoundException", + "documentation": "The resource you requested does not exist." + }, + { + "shape": "TooManyRequestsException", + "documentation": "Too many requests have been sent in too short of a time. The service limits the rate at which it will accept requests." + }, + { + "shape": "ConflictException", + "documentation": "The service could not complete your request because there is a conflict with the current state of the resource." + } + ], + "documentation": "Retrieve the tags for a MediaConvert resource." + }, + "TagResource": { + "name": "TagResource", + "http": { + "method": "POST", + "requestUri": "/2017-08-29/tags", + "responseCode": 200 + }, + "input": { + "shape": "TagResourceRequest" + }, + "output": { + "shape": "TagResourceResponse" + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "The service can't process your request because of a problem in the request. Please check your request form and syntax." + }, + { + "shape": "InternalServerErrorException", + "documentation": "The service encountered an unexpected condition and cannot fulfill your request." + }, + { + "shape": "ForbiddenException", + "documentation": "You don't have permissions for this action with the credentials you sent." + }, + { + "shape": "NotFoundException", + "documentation": "The resource you requested does not exist." + }, + { + "shape": "TooManyRequestsException", + "documentation": "Too many requests have been sent in too short of a time. The service limits the rate at which it will accept requests." + }, + { + "shape": "ConflictException", + "documentation": "The service could not complete your request because there is a conflict with the current state of the resource." + } + ], + "documentation": "Tag a MediaConvert queue, preset, or job template. For information about these resource types, see the User Guide at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html" + }, + "UntagResource": { + "name": "UntagResource", + "http": { + "method": "DELETE", + "requestUri": "/2017-08-29/tags", + "responseCode": 200 + }, + "input": { + "shape": "UntagResourceRequest" + }, + "output": { + "shape": "UntagResourceResponse" + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "The service can't process your request because of a problem in the request. Please check your request form and syntax." + }, + { + "shape": "InternalServerErrorException", + "documentation": "The service encountered an unexpected condition and cannot fulfill your request." + }, + { + "shape": "ForbiddenException", + "documentation": "You don't have permissions for this action with the credentials you sent." + }, + { + "shape": "NotFoundException", + "documentation": "The resource you requested does not exist." + }, + { + "shape": "TooManyRequestsException", + "documentation": "Too many requests have been sent in too short of a time. The service limits the rate at which it will accept requests." + }, + { + "shape": "ConflictException", + "documentation": "The service could not complete your request because there is a conflict with the current state of the resource." + } + ], + "documentation": "Untag a MediaConvert queue, preset, or job template. For information about these resource types, see the User Guide at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html" + }, "UpdateJobTemplate": { "name": "UpdateJobTemplate", "http": { @@ -886,9 +1009,9 @@ "locationName": "audioDescriptionBroadcasterMix" }, "Bitrate": { - "shape": "__integer", + "shape": "__integerMin6000Max1024000", "locationName": "bitrate", - "documentation": "Average bitrate in bits/second. Valid values depend on rate control mode and profile." + "documentation": "Average bitrate in bits/second. Defaults and valid values depend on rate control mode and profile." }, "CodecProfile": { "shape": "AacCodecProfile", @@ -907,7 +1030,7 @@ "locationName": "rawFormat" }, "SampleRate": { - "shape": "__integer", + "shape": "__integerMin8000Max96000", "locationName": "sampleRate", "documentation": "Sample rate in Hz. Valid values depend on rate control mode and profile." }, @@ -920,7 +1043,11 @@ "locationName": "vbrQuality" } }, - "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value AAC." + "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value AAC. The service accepts one of two mutually exclusive groups of AAC settings--VBR and CBR. To select one of these modes, set the value of Bitrate control mode (rateControlMode) to \"VBR\" or \"CBR\". In VBR mode, you control the audio quality with the setting VBR quality (vbrQuality). In CBR mode, you use the setting Bitrate (bitrate). Defaults and valid values depend on the rate control mode.", + "required": [ + "CodingMode", + "SampleRate" + ] }, "AacSpecification": { "type": "string", @@ -992,7 +1119,7 @@ "type": "structure", "members": { "Bitrate": { - "shape": "__integer", + "shape": "__integerMin64000Max640000", "locationName": "bitrate", "documentation": "Average bitrate in bits/second. Valid bitrates depend on the coding mode." }, @@ -1005,7 +1132,7 @@ "locationName": "codingMode" }, "Dialnorm": { - "shape": "__integer", + "shape": "__integerMin1Max31", "locationName": "dialnorm", "documentation": "Sets the dialnorm for the output. If blank and input audio is Dolby Digital, dialnorm will be passed through." }, @@ -1022,7 +1149,7 @@ "locationName": "metadataControl" }, "SampleRate": { - "shape": "__integer", + "shape": "__integerMin48000Max48000", "locationName": "sampleRate", "documentation": "Sample rate in hz. Sample rate is always 48000." } @@ -1031,7 +1158,7 @@ }, "AfdSignaling": { "type": "string", - "documentation": "This setting only applies to H.264 and MPEG2 outputs. Use Insert AFD signaling (AfdSignaling) to whether there are AFD values in the output video data and what those values are. * Choose None to remove all AFD values from this output. * Choose Fixed to ignore input AFD values and instead encode the value specified in the job. * Choose Auto to calculate output AFD values based on the input AFD scaler data.", + "documentation": "This setting only applies to H.264 and MPEG2 outputs. Use Insert AFD signaling (AfdSignaling) to specify whether the service includes AFD values in the output video data and what those values are. * Choose None to remove all AFD values from this output. * Choose Fixed to ignore input AFD values and instead encode the value specified in the job. * Choose Auto to calculate output AFD values based on the input AFD scaler data.", "enum": [ "NONE", "AUTO", @@ -1042,17 +1169,17 @@ "type": "structure", "members": { "BitDepth": { - "shape": "__integer", + "shape": "__integerMin16Max24", "locationName": "bitDepth", "documentation": "Specify Bit depth (BitDepth), in bits per sample, to choose the encoding quality for this audio track." }, "Channels": { - "shape": "__integer", + "shape": "__integerMin1Max2", "locationName": "channels", "documentation": "Set Channels to specify the number of channels in this output audio track. Choosing Mono in the console will give you 1 output channel; choosing Stereo will give you 2. In the API, valid values are 1 and 2." }, "SampleRate": { - "shape": "__integer", + "shape": "__integerMin8000Max192000", "locationName": "sampleRate", "documentation": "Sample rate in hz." } @@ -1063,7 +1190,7 @@ "type": "structure", "members": { "SourceAncillaryChannelNumber": { - "shape": "__integer", + "shape": "__integerMin1Max4", "locationName": "sourceAncillaryChannelNumber", "documentation": "Specifies the 608 channel number in the ancillary data track from which to extract captions. Unused for passthrough." } @@ -1123,11 +1250,14 @@ "locationName": "wavSettings" } }, - "documentation": "Audio codec settings (CodecSettings) under (AudioDescriptions) contains the group of settings related to audio encoding. The settings in this group vary depending on the value you choose for Audio codec (Codec). For each codec enum you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * AAC, AacSettings * MP2, Mp2Settings * WAV, WavSettings * AIFF, AiffSettings * AC3, Ac3Settings * EAC3, Eac3Settings" + "documentation": "Audio codec settings (CodecSettings) under (AudioDescriptions) contains the group of settings related to audio encoding. The settings in this group vary depending on the value you choose for Audio codec (Codec). For each codec enum you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * AAC, AacSettings * MP2, Mp2Settings * WAV, WavSettings * AIFF, AiffSettings * AC3, Ac3Settings * EAC3, Eac3Settings", + "required": [ + "Codec" + ] }, "AudioDefaultSelection": { "type": "string", - "documentation": "When an \"Audio Description\":#audio_description specifies an AudioSelector or AudioSelectorGroup for which no matching source is found in the input, then the audio selector marked as DEFAULT will be used. If none are marked as default, silence will be inserted for the duration of the input.", + "documentation": "Enable this setting on one audio selector to set it as the default for the job. The service uses this default for outputs where it can't find the specified input audio. If you don't set a default, those outputs have no audio.", "enum": [ "DEFAULT", "NOT_DEFAULT" @@ -1146,7 +1276,7 @@ "documentation": "Specifies which audio data to use from each input. In the simplest case, specify an \"Audio Selector\":#inputs-audio_selector by name based on its order within each input. For example if you specify \"Audio Selector 3\", then the third audio selector will be used from each input. If an input does not have an \"Audio Selector 3\", then the audio selector marked as \"default\" in that input will be used. If there is no audio selector marked as \"default\", silence will be inserted for the duration of that input. Alternatively, an \"Audio Selector Group\":#inputs-audio_selector_group name may be specified, with similar default/silence behavior. If no audio_source_name is specified, then \"Audio Selector 1\" will be chosen automatically." }, "AudioType": { - "shape": "__integer", + "shape": "__integerMin0Max255", "locationName": "audioType", "documentation": "Applies only if Follow Input Audio Type is unchecked (false). A number between 0 and 255. The following are defined in ISO-IEC 13818-1: 0 = Undefined, 1 = Clean Effects, 2 = Hearing Impaired, 3 = Visually Impaired Commentary, 4-255 = Reserved." }, @@ -1158,6 +1288,11 @@ "shape": "AudioCodecSettings", "locationName": "codecSettings" }, + "CustomLanguageCode": { + "shape": "__stringMin3Max3PatternAZaZ3", + "locationName": "customLanguageCode", + "documentation": "Specify the language for this audio output track, using the ISO 639-2 or ISO 639-3 three-letter language code. The language specified will be used when 'Follow Input Language Code' is not selected or when 'Follow Input Language Code' is selected but there is no ISO 639 language code specified by the input." + }, "LanguageCode": { "shape": "LanguageCode", "locationName": "languageCode", @@ -1173,12 +1308,15 @@ "documentation": "Advanced audio remixing settings." }, "StreamName": { - "shape": "__string", + "shape": "__stringPatternWS", "locationName": "streamName", "documentation": "Used for MS Smooth and Apple HLS outputs. Indicates the name displayed by the player (eg. English, or Director Commentary). Alphanumeric characters, spaces, and underscore are legal." } }, - "documentation": "Description of audio output" + "documentation": "Description of audio output", + "required": [ + "CodecSettings" + ] }, "AudioLanguageCodeControl": { "type": "string", @@ -1232,7 +1370,7 @@ "locationName": "algorithmControl" }, "CorrectionGateLevel": { - "shape": "__integer", + "shape": "__integerMinNegative70Max0", "locationName": "correctionGateLevel", "documentation": "Content measuring above this level will be corrected to the target level. Content measuring below this level will not be corrected. Gating only applies when not using real_time_correction." }, @@ -1245,7 +1383,7 @@ "locationName": "peakCalculation" }, "TargetLkfs": { - "shape": "__double", + "shape": "__doubleMinNegative59Max0", "locationName": "targetLkfs", "documentation": "Target LKFS(loudness) to adjust volume to. If no value is entered, a default value will be used according to the chosen algorithm. The CALM Act (1770-1) recommends a target of -24 LKFS. The EBU R-128 specification (1770-2) recommends a target of -23 LKFS." } @@ -1255,12 +1393,17 @@ "AudioSelector": { "type": "structure", "members": { + "CustomLanguageCode": { + "shape": "__stringMin3Max3PatternAZaZ3", + "locationName": "customLanguageCode", + "documentation": "Selects a specific language code from within an audio source, using the ISO 639-2 or ISO 639-3 three-letter language code" + }, "DefaultSelection": { "shape": "AudioDefaultSelection", "locationName": "defaultSelection" }, "ExternalAudioFileInput": { - "shape": "__string", + "shape": "__stringPatternS3MM2VVMMPPEEGGAAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEE", "locationName": "externalAudioFileInput", "documentation": "Specifies audio data from an external file source." }, @@ -1270,33 +1413,33 @@ "documentation": "Selects a specific language code from within an audio source." }, "Offset": { - "shape": "__integer", + "shape": "__integerMinNegative2147483648Max2147483647", "locationName": "offset", "documentation": "Specifies a time delta in milliseconds to offset the audio from the input video." }, "Pids": { - "shape": "ListOf__integer", + "shape": "__listOf__integerMin1Max2147483647", "locationName": "pids", "documentation": "Selects a specific PID from within an audio source (e.g. 257 selects PID 0x101)." }, "ProgramSelection": { - "shape": "__integer", + "shape": "__integerMin0Max8", "locationName": "programSelection", - "documentation": "Applies only when input streams contain Dolby E. Enter the program ID (according to the metadata in the audio) of the Dolby E program to extract from the specified track. One program extracted per audio selector. To select multiple programs, create multiple selectors with the same Track and different Program numbers. \"All channels\" means to ignore the program IDs and include all the channels in this selector; useful if metadata is known to be incorrect." + "documentation": "Use this setting for input streams that contain Dolby E, to have the service extract specific program data from the track. To select multiple programs, create multiple selectors with the same Track and different Program numbers. In the console, this setting is visible when you set Selector type to Track. Choose the program number from the dropdown list. If you are sending a JSON file, provide the program ID, which is part of the audio metadata. If your input file has incorrect metadata, you can choose All channels instead of a program number to have the service ignore the program IDs and include all the programs in the track." }, "RemixSettings": { "shape": "RemixSettings", "locationName": "remixSettings", - "documentation": "Advanced audio remixing settings." + "documentation": "Use these settings to reorder the audio channels of one input to match those of another input. This allows you to combine the two files into a single output, one after the other." }, "SelectorType": { "shape": "AudioSelectorType", "locationName": "selectorType" }, "Tracks": { - "shape": "ListOf__integer", + "shape": "__listOf__integerMin1Max2147483647", "locationName": "tracks", - "documentation": "Identify the channel to include in this selector by entering the 1-based track index. To combine several tracks, enter a comma-separated list, e.g. \"1,2,3\" for tracks 1-3." + "documentation": "Identify a track from the input audio to include in this selector by entering the track index number. To include several tracks in a single audio selector, specify multiple tracks as follows. Using the console, enter a comma-separated list. For examle, type \"1,2,3\" to include tracks 1 through 3. Specifying directly in your JSON job file, provide the track numbers in an array. For example, \"tracks\": [1,2,3]." } }, "documentation": "Selector for Audio" @@ -1305,12 +1448,15 @@ "type": "structure", "members": { "AudioSelectorNames": { - "shape": "ListOf__string", + "shape": "__listOf__stringMin1", "locationName": "audioSelectorNames", - "documentation": "Name of an \"Audio Selector\":#inputs-audio_selector within the same input to include in the group. Audio selector names are standardized, based on their order within the input (e.g. \"Audio Selector 1\"). The audio_selector_name parameter can be repeated to add any number of audio selectors to the group." + "documentation": "Name of an Audio Selector within the same input to include in the group. Audio selector names are standardized, based on their order within the input (e.g., \"Audio Selector 1\"). The audio selector name parameter can be repeated to add any number of audio selectors to the group." } }, - "documentation": "Group of Audio Selectors" + "documentation": "Group of Audio Selectors", + "required": [ + "AudioSelectorNames" + ] }, "AudioSelectorType": { "type": "string", @@ -1333,7 +1479,7 @@ "type": "structure", "members": { "AvailBlankingImage": { - "shape": "__string", + "shape": "__stringMin14PatternS3BmpBMPPngPNG", "locationName": "availBlankingImage", "documentation": "Blanking image to be used. Leave empty for solid black. Only bmp and png images are supported." } @@ -1366,7 +1512,7 @@ "locationName": "backgroundColor" }, "BackgroundOpacity": { - "shape": "__integer", + "shape": "__integerMin0Max255", "locationName": "backgroundOpacity", "documentation": "Specifies the opacity of the background rectangle. 255 is opaque; 0 is transparent. Leaving this parameter blank is equivalent to setting it to 0 (transparent). All burn-in and DVB-Sub font settings must match." }, @@ -1375,17 +1521,17 @@ "locationName": "fontColor" }, "FontOpacity": { - "shape": "__integer", + "shape": "__integerMin0Max255", "locationName": "fontOpacity", "documentation": "Specifies the opacity of the burned-in captions. 255 is opaque; 0 is transparent.\nAll burn-in and DVB-Sub font settings must match." }, "FontResolution": { - "shape": "__integer", + "shape": "__integerMin96Max600", "locationName": "fontResolution", "documentation": "Font resolution in DPI (dots per inch); default is 96 dpi.\nAll burn-in and DVB-Sub font settings must match." }, "FontSize": { - "shape": "__integer", + "shape": "__integerMin0Max96", "locationName": "fontSize", "documentation": "A positive integer indicates the exact font size in points. Set to 0 for automatic font size selection. All burn-in and DVB-Sub font settings must match." }, @@ -1394,7 +1540,7 @@ "locationName": "outlineColor" }, "OutlineSize": { - "shape": "__integer", + "shape": "__integerMin0Max10", "locationName": "outlineSize", "documentation": "Specifies font outline size in pixels. This option is not valid for source captions that are either 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match." }, @@ -1403,17 +1549,17 @@ "locationName": "shadowColor" }, "ShadowOpacity": { - "shape": "__integer", + "shape": "__integerMin0Max255", "locationName": "shadowOpacity", "documentation": "Specifies the opacity of the shadow. 255 is opaque; 0 is transparent. Leaving this parameter blank is equivalent to setting it to 0 (transparent). All burn-in and DVB-Sub font settings must match." }, "ShadowXOffset": { - "shape": "__integer", + "shape": "__integerMinNegative2147483648Max2147483647", "locationName": "shadowXOffset", "documentation": "Specifies the horizontal offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels to the left. All burn-in and DVB-Sub font settings must match." }, "ShadowYOffset": { - "shape": "__integer", + "shape": "__integerMinNegative2147483648Max2147483647", "locationName": "shadowYOffset", "documentation": "Specifies the vertical offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels above the text. All burn-in and DVB-Sub font settings must match." }, @@ -1422,17 +1568,23 @@ "locationName": "teletextSpacing" }, "XPosition": { - "shape": "__integer", + "shape": "__integerMin0Max2147483647", "locationName": "xPosition", "documentation": "Specifies the horizontal position of the caption relative to the left side of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the left of the output. If no explicit x_position is provided, the horizontal caption position will be determined by the alignment parameter. This option is not valid for source captions that are STL, 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match." }, "YPosition": { - "shape": "__integer", + "shape": "__integerMin0Max2147483647", "locationName": "yPosition", "documentation": "Specifies the vertical position of the caption relative to the top of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the top of the output. If no explicit y_position is provided, the caption will be positioned towards the bottom of the output. This option is not valid for source captions that are STL, 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match." } }, - "documentation": "Burn-In Destination Settings." + "documentation": "Burn-In Destination Settings.", + "required": [ + "OutlineColor", + "Alignment", + "OutlineSize", + "FontOpacity" + ] }, "BurninSubtitleAlignment": { "type": "string", @@ -1486,7 +1638,7 @@ }, "BurninSubtitleTeletextSpacing": { "type": "string", - "documentation": "Controls whether a fixed grid size or proportional font spacing will be used to generate the output subtitles bitmap. Only applicable for Teletext inputs and DVB-Sub/Burn-in outputs.", + "documentation": "Only applies to jobs with input captions in Teletext or STL formats. Specify whether the spacing between letters in your captions is set by the captions grid or varies depending on letter width. Choose fixed grid to conform to the spacing specified in the captions file more accurately. Choose proportional to make the text easier to read if the captions are closed caption.", "enum": [ "FIXED_GRID", "PROPORTIONAL" @@ -1515,10 +1667,15 @@ "type": "structure", "members": { "CaptionSelectorName": { - "shape": "__string", + "shape": "__stringMin1", "locationName": "captionSelectorName", "documentation": "Specifies which \"Caption Selector\":#inputs-caption_selector to use from each input when generating captions. The name should be of the format \"Caption Selector \", which denotes that the Nth Caption Selector will be used from each input." }, + "CustomLanguageCode": { + "shape": "__stringMin3Max3PatternAZaZ3", + "locationName": "customLanguageCode", + "documentation": "Indicates the language of the caption output track, using the ISO 639-2 or ISO 639-3 three-letter language code" + }, "DestinationSettings": { "shape": "CaptionDestinationSettings", "locationName": "destinationSettings" @@ -1534,11 +1691,20 @@ "documentation": "Human readable information to indicate captions available for players (eg. English, or Spanish). Alphanumeric characters, spaces, and underscore are legal." } }, - "documentation": "Description of Caption output" + "documentation": "Description of Caption output", + "required": [ + "DestinationSettings", + "CaptionSelectorName" + ] }, "CaptionDescriptionPreset": { "type": "structure", "members": { + "CustomLanguageCode": { + "shape": "__stringMin3Max3PatternAZaZ3", + "locationName": "customLanguageCode", + "documentation": "Indicates the language of the caption output track, using the ISO 639-2 or ISO 639-3 three-letter language code" + }, "DestinationSettings": { "shape": "CaptionDestinationSettings", "locationName": "destinationSettings" @@ -1554,7 +1720,10 @@ "documentation": "Human readable information to indicate captions available for players (eg. English, or Spanish). Alphanumeric characters, spaces, and underscore are legal." } }, - "documentation": "Caption Description for preset" + "documentation": "Caption Description for preset", + "required": [ + "DestinationSettings" + ] }, "CaptionDestinationSettings": { "type": "structure", @@ -1584,7 +1753,10 @@ "locationName": "ttmlDestinationSettings" } }, - "documentation": "Specific settings required by destination type. Note that burnin_destination_settings are not available if the source of the caption data is Embedded or Teletext." + "documentation": "Specific settings required by destination type. Note that burnin_destination_settings are not available if the source of the caption data is Embedded or Teletext.", + "required": [ + "DestinationType" + ] }, "CaptionDestinationType": { "type": "string", @@ -1603,6 +1775,11 @@ "CaptionSelector": { "type": "structure", "members": { + "CustomLanguageCode": { + "shape": "__stringMin3Max3PatternAZaZ3", + "locationName": "customLanguageCode", + "documentation": "The specific language to extract from source, using the ISO 639-2 or ISO 639-3 three-letter language code. If input is SCTE-27, complete this field and/or PID to select the caption language to extract. If input is DVB-Sub and output is Burn-in or SMPTE-TT, complete this field and/or PID to select the caption language to extract. If input is DVB-Sub that is being passed through, omit this field (and PID field); there is no way to extract a specific language with pass-through captions." + }, "LanguageCode": { "shape": "LanguageCode", "locationName": "languageCode", @@ -1613,7 +1790,10 @@ "locationName": "sourceSettings" } }, - "documentation": "Caption inputs to be mapped to caption outputs." + "documentation": "Set up captions in your outputs by first selecting them from your input here.", + "required": [ + "SourceSettings" + ] }, "CaptionSourceSettings": { "type": "structure", @@ -1643,7 +1823,10 @@ "locationName": "teletextSourceSettings" } }, - "documentation": "Source settings (SourceSettings) contains the group of settings for captions in the input." + "documentation": "Source settings (SourceSettings) contains the group of settings for captions in the input.", + "required": [ + "SourceType" + ] }, "CaptionSourceType": { "type": "string", @@ -1664,18 +1847,209 @@ "type": "structure", "members": { "OutputChannels": { - "shape": "ListOfOutputChannelMapping", + "shape": "__listOfOutputChannelMapping", "locationName": "outputChannels", "documentation": "List of output channels" } }, - "documentation": "Channel mapping (ChannelMapping) contains the group of fields that hold the remixing value for each channel. Units are in dB. Acceptable values are within the range from -60 (mute) through 6. A setting of 0 passes the input channel unchanged to the output channel (no attenuation or amplification)." + "documentation": "Channel mapping (ChannelMapping) contains the group of fields that hold the remixing value for each channel. Units are in dB. Acceptable values are within the range from -60 (mute) through 6. A setting of 0 passes the input channel unchanged to the output channel (no attenuation or amplification).", + "required": [ + "OutputChannels" + ] + }, + "CmafClientCache": { + "type": "string", + "documentation": "When set to ENABLED, sets #EXT-X-ALLOW-CACHE:no tag, which prevents client from saving media segments for later replay.", + "enum": [ + "DISABLED", + "ENABLED" + ] + }, + "CmafCodecSpecification": { + "type": "string", + "documentation": "Specification to use (RFC-6381 or the default RFC-4281) during m3u8 playlist generation.", + "enum": [ + "RFC_6381", + "RFC_4281" + ] + }, + "CmafEncryptionSettings": { + "type": "structure", + "members": { + "ConstantInitializationVector": { + "shape": "__stringMin32Max32Pattern09aFAF32", + "locationName": "constantInitializationVector", + "documentation": "This is a 128-bit, 16-byte hex value represented by a 32-character text string. If this parameter is not set then the Initialization Vector will follow the segment number by default." + }, + "EncryptionMethod": { + "shape": "CmafEncryptionType", + "locationName": "encryptionMethod" + }, + "InitializationVectorInManifest": { + "shape": "CmafInitializationVectorInManifest", + "locationName": "initializationVectorInManifest" + }, + "StaticKeyProvider": { + "shape": "StaticKeyProvider", + "locationName": "staticKeyProvider" + }, + "Type": { + "shape": "CmafKeyProviderType", + "locationName": "type" + } + }, + "documentation": "Settings for CMAF encryption", + "required": [ + "Type" + ] + }, + "CmafEncryptionType": { + "type": "string", + "documentation": "Encrypts the segments with the given encryption scheme. Leave blank to disable. Selecting 'Disabled' in the web interface also disables encryption.", + "enum": [ + "SAMPLE_AES" + ] + }, + "CmafGroupSettings": { + "type": "structure", + "members": { + "BaseUrl": { + "shape": "__string", + "locationName": "baseUrl", + "documentation": "A partial URI prefix that will be put in the manifest file at the top level BaseURL element. Can be used if streams are delivered from a different URL than the manifest file." + }, + "ClientCache": { + "shape": "CmafClientCache", + "locationName": "clientCache" + }, + "CodecSpecification": { + "shape": "CmafCodecSpecification", + "locationName": "codecSpecification" + }, + "Destination": { + "shape": "__stringPatternS3", + "locationName": "destination", + "documentation": "Use Destination (Destination) to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file." + }, + "Encryption": { + "shape": "CmafEncryptionSettings", + "locationName": "encryption", + "documentation": "DRM settings." + }, + "FragmentLength": { + "shape": "__integerMin1Max2147483647", + "locationName": "fragmentLength", + "documentation": "Length of fragments to generate (in seconds). Fragment length must be compatible with GOP size and Framerate. Note that fragments will end on the next keyframe after this number of seconds, so actual fragment length may be longer. When Emit Single File is checked, the fragmentation is internal to a single output file and it does not cause the creation of many output files as in other output types." + }, + "ManifestCompression": { + "shape": "CmafManifestCompression", + "locationName": "manifestCompression" + }, + "ManifestDurationFormat": { + "shape": "CmafManifestDurationFormat", + "locationName": "manifestDurationFormat" + }, + "MinBufferTime": { + "shape": "__integerMin0Max2147483647", + "locationName": "minBufferTime", + "documentation": "Minimum time of initially buffered media that is needed to ensure smooth playout." + }, + "SegmentControl": { + "shape": "CmafSegmentControl", + "locationName": "segmentControl" + }, + "SegmentLength": { + "shape": "__integerMin1Max2147483647", + "locationName": "segmentLength", + "documentation": "Use this setting to specify the length, in seconds, of each individual CMAF segment. This value applies to the whole package; that is, to every output in the output group. Note that segments end on the first keyframe after this number of seconds, so the actual segment length might be slightly longer. If you set Segment control (CmafSegmentControl) to single file, the service puts the content of each output in a single file that has metadata that marks these segments. If you set it to segmented files, the service creates multiple files for each output, each with the content of one segment." + }, + "StreamInfResolution": { + "shape": "CmafStreamInfResolution", + "locationName": "streamInfResolution" + }, + "WriteDashManifest": { + "shape": "CmafWriteDASHManifest", + "locationName": "writeDashManifest" + }, + "WriteHlsManifest": { + "shape": "CmafWriteHLSManifest", + "locationName": "writeHlsManifest" + } + }, + "documentation": "Required when you set (Type) under (OutputGroups)>(OutputGroupSettings) to CMAF_GROUP_SETTINGS. Each output in a CMAF Output Group may only contain a single video, audio, or caption output.", + "required": [ + "FragmentLength", + "SegmentLength" + ] + }, + "CmafInitializationVectorInManifest": { + "type": "string", + "documentation": "The Initialization Vector is a 128-bit number used in conjunction with the key for encrypting blocks. If set to INCLUDE, Initialization Vector is listed in the manifest. Otherwise Initialization Vector is not in the manifest.", + "enum": [ + "INCLUDE", + "EXCLUDE" + ] + }, + "CmafKeyProviderType": { + "type": "string", + "documentation": "Indicates which type of key provider is used for encryption.", + "enum": [ + "STATIC_KEY" + ] + }, + "CmafManifestCompression": { + "type": "string", + "documentation": "When set to GZIP, compresses HLS playlist.", + "enum": [ + "GZIP", + "NONE" + ] + }, + "CmafManifestDurationFormat": { + "type": "string", + "documentation": "Indicates whether the output manifest should use floating point values for segment duration.", + "enum": [ + "FLOATING_POINT", + "INTEGER" + ] + }, + "CmafSegmentControl": { + "type": "string", + "documentation": "When set to SINGLE_FILE, a single output file is generated, which is internally segmented using the Fragment Length and Segment Length. When set to SEGMENTED_FILES, separate segment files will be created.", + "enum": [ + "SINGLE_FILE", + "SEGMENTED_FILES" + ] + }, + "CmafStreamInfResolution": { + "type": "string", + "documentation": "Include or exclude RESOLUTION attribute for video in EXT-X-STREAM-INF tag of variant manifest.", + "enum": [ + "INCLUDE", + "EXCLUDE" + ] + }, + "CmafWriteDASHManifest": { + "type": "string", + "documentation": "When set to ENABLED, a DASH MPD manifest will be generated for this output.", + "enum": [ + "DISABLED", + "ENABLED" + ] + }, + "CmafWriteHLSManifest": { + "type": "string", + "documentation": "When set to ENABLED, an Apple HLS manifest will be generated for this output.", + "enum": [ + "DISABLED", + "ENABLED" + ] }, "ColorCorrector": { "type": "structure", "members": { "Brightness": { - "shape": "__integer", + "shape": "__integerMin1Max100", "locationName": "brightness", "documentation": "Brightness level." }, @@ -1684,7 +2058,7 @@ "locationName": "colorSpaceConversion" }, "Contrast": { - "shape": "__integer", + "shape": "__integerMin1Max100", "locationName": "contrast", "documentation": "Contrast level." }, @@ -1693,12 +2067,12 @@ "locationName": "hdr10Metadata" }, "Hue": { - "shape": "__integer", + "shape": "__integerMinNegative180Max180", "locationName": "hue", "documentation": "Hue in degrees." }, "Saturation": { - "shape": "__integer", + "shape": "__integerMin1Max100", "locationName": "saturation", "documentation": "Saturation level." } @@ -1715,7 +2089,7 @@ }, "ColorSpace": { "type": "string", - "documentation": "Specifies the colorspace of an input. This setting works in tandem with \"Color Corrector\":#color_corrector > color_space_conversion to determine if any conversion will be performed.", + "documentation": "If your input video has accurate color space metadata, or if you don't know about color space, leave this set to the default value FOLLOW. The service will automatically detect your input color space. If your input video has metadata indicating the wrong color space, or if your input video is missing color space metadata that should be there, specify the accurate color space here. If you choose HDR10, you can also correct inaccurate color space coefficients, using the HDR master display information controls. You must also set Color space usage (ColorSpaceUsage) to FORCE for the service to use these values.", "enum": [ "FOLLOW", "REC_601", @@ -1737,7 +2111,7 @@ }, "ColorSpaceUsage": { "type": "string", - "documentation": "There are two sources for color metadata, the input file and the job configuration. This enum controls which takes precedence. FORCE: System will use color metadata supplied by user, if any. If the user does not supply color metadata the system will use data from the source. FALLBACK: System will use color metadata from the source. If source has no color metadata, the system will use user-supplied color metadata values if available.", + "documentation": "There are two sources for color metadata, the input file and the job configuration (in the Color space and HDR master display informaiton settings). The Color space usage setting controls which takes precedence. FORCE: The system will use color metadata supplied by user, if any. If the user does not supply color metadata, the system will use data from the source. FALLBACK: The system will use color metadata from the source. If source has no color metadata, the system will use user-supplied color metadata values if available.", "enum": [ "FORCE", "FALLBACK" @@ -1785,7 +2159,10 @@ "locationName": "mp4Settings" } }, - "documentation": "Container specific settings." + "documentation": "Container specific settings.", + "required": [ + "Container" + ] }, "ContainerType": { "type": "string", @@ -1795,6 +2172,7 @@ "ISMV", "M2TS", "M3U8", + "CMFC", "MOV", "MP4", "MPD", @@ -1831,11 +2209,15 @@ "locationName": "settings" }, "UserMetadata": { - "shape": "MapOf__string", + "shape": "__mapOf__string", "locationName": "userMetadata", "documentation": "User-defined metadata that you want to associate with an MediaConvert job. You specify metadata in key/value pairs." } - } + }, + "required": [ + "Role", + "Settings" + ] }, "CreateJobResponse": { "type": "structure", @@ -1872,8 +2254,17 @@ "Settings": { "shape": "JobTemplateSettings", "locationName": "settings" + }, + "Tags": { + "shape": "__mapOf__string", + "locationName": "tags", + "documentation": "The tags that you want to add to the resource. You can tag resources with a key-value pair or with only a key." } - } + }, + "required": [ + "Settings", + "Name" + ] }, "CreateJobTemplateResponse": { "type": "structure", @@ -1905,8 +2296,17 @@ "Settings": { "shape": "PresetSettings", "locationName": "settings" + }, + "Tags": { + "shape": "__mapOf__string", + "locationName": "tags", + "documentation": "The tags that you want to add to the resource. You can tag resources with a key-value pair or with only a key." } - } + }, + "required": [ + "Settings", + "Name" + ] }, "CreatePresetResponse": { "type": "structure", @@ -1929,8 +2329,16 @@ "shape": "__string", "locationName": "name", "documentation": "The name of the queue you are creating." + }, + "Tags": { + "shape": "__mapOf__string", + "locationName": "tags", + "documentation": "The tags that you want to add to the resource. You can tag resources with a key-value pair or with only a key." } - } + }, + "required": [ + "Name" + ] }, "CreateQueueResponse": { "type": "structure", @@ -1949,7 +2357,10 @@ "locationName": "spekeKeyProvider" } }, - "documentation": "Specifies DRM settings for DASH outputs." + "documentation": "Specifies DRM settings for DASH outputs.", + "required": [ + "SpekeKeyProvider" + ] }, "DashIsoGroupSettings": { "type": "structure", @@ -1960,7 +2371,7 @@ "documentation": "A partial URI prefix that will be put in the manifest (.mpd) file at the top level BaseURL element. Can be used if streams are delivered from a different URL than the manifest file." }, "Destination": { - "shape": "__string", + "shape": "__stringPatternS3", "locationName": "destination", "documentation": "Use Destination (Destination) to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file." }, @@ -1970,7 +2381,7 @@ "documentation": "DRM settings." }, "FragmentLength": { - "shape": "__integer", + "shape": "__integerMin1Max2147483647", "locationName": "fragmentLength", "documentation": "Length of fragments to generate (in seconds). Fragment length must be compatible with GOP size and Framerate. Note that fragments will end on the next keyframe after this number of seconds, so actual fragment length may be longer. When Emit Single File is checked, the fragmentation is internal to a single output file and it does not cause the creation of many output files as in other output types." }, @@ -1979,7 +2390,7 @@ "locationName": "hbbtvCompliance" }, "MinBufferTime": { - "shape": "__integer", + "shape": "__integerMin0Max2147483647", "locationName": "minBufferTime", "documentation": "Minimum time of initially buffered media that is needed to ensure smooth playout." }, @@ -1988,12 +2399,16 @@ "locationName": "segmentControl" }, "SegmentLength": { - "shape": "__integer", + "shape": "__integerMin1Max2147483647", "locationName": "segmentLength", "documentation": "Length of mpd segments to create (in seconds). Note that segments will end on the next keyframe after this number of seconds, so actual segment length may be longer. When Emit Single File is checked, the segmentation is internal to a single output file and it does not cause the creation of many output files as in other output types." } }, - "documentation": "Required when you set (Type) under (OutputGroups)>(OutputGroupSettings) to DASH_ISO_GROUP_SETTINGS." + "documentation": "Required when you set (Type) under (OutputGroups)>(OutputGroupSettings) to DASH_ISO_GROUP_SETTINGS.", + "required": [ + "SegmentLength", + "FragmentLength" + ] }, "DashIsoHbbtvCompliance": { "type": "string", @@ -2133,7 +2548,7 @@ "type": "structure", "members": { "Endpoints": { - "shape": "ListOfEndpoint", + "shape": "__listOfEndpoint", "locationName": "endpoints", "documentation": "List of endpoints" }, @@ -2156,22 +2571,27 @@ "type": "structure", "members": { "NetworkId": { - "shape": "__integer", + "shape": "__integerMin0Max65535", "locationName": "networkId", "documentation": "The numeric value placed in the Network Information Table (NIT)." }, "NetworkName": { - "shape": "__string", + "shape": "__stringMin1Max256", "locationName": "networkName", "documentation": "The network name text placed in the network_name_descriptor inside the Network Information Table. Maximum length is 256 characters." }, "NitInterval": { - "shape": "__integer", + "shape": "__integerMin25Max10000", "locationName": "nitInterval", "documentation": "The number of milliseconds between instances of this table in the output transport stream." } }, - "documentation": "Inserts DVB Network Information Table (NIT) at the specified table repetition interval." + "documentation": "Inserts DVB Network Information Table (NIT) at the specified table repetition interval.", + "required": [ + "NetworkName", + "NitInterval", + "NetworkId" + ] }, "DvbSdtSettings": { "type": "structure", @@ -2181,17 +2601,17 @@ "locationName": "outputSdt" }, "SdtInterval": { - "shape": "__integer", + "shape": "__integerMin25Max2000", "locationName": "sdtInterval", "documentation": "The number of milliseconds between instances of this table in the output transport stream." }, "ServiceName": { - "shape": "__string", + "shape": "__stringMin1Max256", "locationName": "serviceName", "documentation": "The service name placed in the service_descriptor in the Service Description Table. Maximum length is 256 characters." }, "ServiceProviderName": { - "shape": "__string", + "shape": "__stringMin1Max256", "locationName": "serviceProviderName", "documentation": "The service provider name placed in the service_descriptor in the Service Description Table. Maximum length is 256 characters." } @@ -2210,7 +2630,7 @@ "locationName": "backgroundColor" }, "BackgroundOpacity": { - "shape": "__integer", + "shape": "__integerMin0Max255", "locationName": "backgroundOpacity", "documentation": "Specifies the opacity of the background rectangle. 255 is opaque; 0 is transparent. Leaving this parameter blank is equivalent to setting it to 0 (transparent). All burn-in and DVB-Sub font settings must match." }, @@ -2219,17 +2639,17 @@ "locationName": "fontColor" }, "FontOpacity": { - "shape": "__integer", + "shape": "__integerMin0Max255", "locationName": "fontOpacity", "documentation": "Specifies the opacity of the burned-in captions. 255 is opaque; 0 is transparent.\nAll burn-in and DVB-Sub font settings must match." }, "FontResolution": { - "shape": "__integer", + "shape": "__integerMin96Max600", "locationName": "fontResolution", "documentation": "Font resolution in DPI (dots per inch); default is 96 dpi.\nAll burn-in and DVB-Sub font settings must match." }, "FontSize": { - "shape": "__integer", + "shape": "__integerMin0Max96", "locationName": "fontSize", "documentation": "A positive integer indicates the exact font size in points. Set to 0 for automatic font size selection. All burn-in and DVB-Sub font settings must match." }, @@ -2238,7 +2658,7 @@ "locationName": "outlineColor" }, "OutlineSize": { - "shape": "__integer", + "shape": "__integerMin0Max10", "locationName": "outlineSize", "documentation": "Specifies font outline size in pixels. This option is not valid for source captions that are either 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match." }, @@ -2247,17 +2667,17 @@ "locationName": "shadowColor" }, "ShadowOpacity": { - "shape": "__integer", + "shape": "__integerMin0Max255", "locationName": "shadowOpacity", "documentation": "Specifies the opacity of the shadow. 255 is opaque; 0 is transparent. Leaving this parameter blank is equivalent to setting it to 0 (transparent). All burn-in and DVB-Sub font settings must match." }, "ShadowXOffset": { - "shape": "__integer", + "shape": "__integerMinNegative2147483648Max2147483647", "locationName": "shadowXOffset", "documentation": "Specifies the horizontal offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels to the left. All burn-in and DVB-Sub font settings must match." }, "ShadowYOffset": { - "shape": "__integer", + "shape": "__integerMinNegative2147483648Max2147483647", "locationName": "shadowYOffset", "documentation": "Specifies the vertical offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels above the text. All burn-in and DVB-Sub font settings must match." }, @@ -2266,23 +2686,29 @@ "locationName": "teletextSpacing" }, "XPosition": { - "shape": "__integer", + "shape": "__integerMin0Max2147483647", "locationName": "xPosition", "documentation": "Specifies the horizontal position of the caption relative to the left side of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the left of the output. If no explicit x_position is provided, the horizontal caption position will be determined by the alignment parameter. This option is not valid for source captions that are STL, 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match." }, "YPosition": { - "shape": "__integer", + "shape": "__integerMin0Max2147483647", "locationName": "yPosition", "documentation": "Specifies the vertical position of the caption relative to the top of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the top of the output. If no explicit y_position is provided, the caption will be positioned towards the bottom of the output. This option is not valid for source captions that are STL, 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match." } }, - "documentation": "DVB-Sub Destination Settings" + "documentation": "DVB-Sub Destination Settings", + "required": [ + "OutlineColor", + "Alignment", + "OutlineSize", + "FontOpacity" + ] }, "DvbSubSourceSettings": { "type": "structure", "members": { "Pid": { - "shape": "__integer", + "shape": "__integerMin1Max2147483647", "locationName": "pid", "documentation": "When using DVB-Sub with Burn-In or SMPTE-TT, use this PID for the source content. Unused for DVB-Sub passthrough. All DVB-Sub content is passed through, regardless of selectors." } @@ -2341,7 +2767,7 @@ }, "DvbSubtitleTeletextSpacing": { "type": "string", - "documentation": "Controls whether a fixed grid size or proportional font spacing will be used to generate the output subtitles bitmap. Only applicable for Teletext inputs and DVB-Sub/Burn-in outputs.", + "documentation": "Only applies to jobs with input captions in Teletext or STL formats. Specify whether the spacing between letters in your captions is set by the captions grid or varies depending on letter width. Choose fixed grid to conform to the spacing specified in the captions file more accurately. Choose proportional to make the text easier to read if the captions are closed caption.", "enum": [ "FIXED_GRID", "PROPORTIONAL" @@ -2351,12 +2777,15 @@ "type": "structure", "members": { "TdtInterval": { - "shape": "__integer", + "shape": "__integerMin1000Max30000", "locationName": "tdtInterval", "documentation": "The number of milliseconds between instances of this table in the output transport stream." } }, - "documentation": "Inserts DVB Time and Date Table (TDT) at the specified table repetition interval." + "documentation": "Inserts DVB Time and Date Table (TDT) at the specified table repetition interval.", + "required": [ + "TdtInterval" + ] }, "Eac3AttenuationControl": { "type": "string", @@ -2466,7 +2895,7 @@ "locationName": "attenuationControl" }, "Bitrate": { - "shape": "__integer", + "shape": "__integerMin64000Max640000", "locationName": "bitrate", "documentation": "Average bitrate in bits/second. Valid bitrates depend on the coding mode." }, @@ -2483,7 +2912,7 @@ "locationName": "dcFilter" }, "Dialnorm": { - "shape": "__integer", + "shape": "__integerMin1Max31", "locationName": "dialnorm", "documentation": "Sets the dialnorm for the output. If blank and input audio is Dolby Digital Plus, dialnorm will be passed through." }, @@ -2504,22 +2933,22 @@ "locationName": "lfeFilter" }, "LoRoCenterMixLevel": { - "shape": "__double", + "shape": "__doubleMinNegative60Max3", "locationName": "loRoCenterMixLevel", "documentation": "Left only/Right only center mix level. Only used for 3/2 coding mode.\nValid values: 3.0, 1.5, 0.0, -1.5 -3.0 -4.5 -6.0 -60" }, "LoRoSurroundMixLevel": { - "shape": "__double", + "shape": "__doubleMinNegative60MaxNegative1", "locationName": "loRoSurroundMixLevel", "documentation": "Left only/Right only surround mix level. Only used for 3/2 coding mode.\nValid values: -1.5 -3.0 -4.5 -6.0 -60" }, "LtRtCenterMixLevel": { - "shape": "__double", + "shape": "__doubleMinNegative60Max3", "locationName": "ltRtCenterMixLevel", "documentation": "Left total/Right total center mix level. Only used for 3/2 coding mode.\nValid values: 3.0, 1.5, 0.0, -1.5 -3.0 -4.5 -6.0 -60" }, "LtRtSurroundMixLevel": { - "shape": "__double", + "shape": "__doubleMinNegative60MaxNegative1", "locationName": "ltRtSurroundMixLevel", "documentation": "Left total/Right total surround mix level. Only used for 3/2 coding mode.\nValid values: -1.5 -3.0 -4.5 -6.0 -60" }, @@ -2536,7 +2965,7 @@ "locationName": "phaseControl" }, "SampleRate": { - "shape": "__integer", + "shape": "__integerMin48000Max48000", "locationName": "sampleRate", "documentation": "Sample rate in hz. Sample rate is always 48000." }, @@ -2599,12 +3028,12 @@ "locationName": "convert608To708" }, "Source608ChannelNumber": { - "shape": "__integer", + "shape": "__integerMin1Max4", "locationName": "source608ChannelNumber", "documentation": "Specifies the 608/708 channel number within the video track from which to extract captions. Unused for passthrough." }, "Source608TrackNumber": { - "shape": "__integer", + "shape": "__integerMin1Max1", "locationName": "source608TrackNumber", "documentation": "Specifies the video track index used for extracting captions. The system only supports one input video track, so this should always be set to '1'." } @@ -2653,7 +3082,7 @@ "type": "structure", "members": { "Destination": { - "shape": "__string", + "shape": "__stringPatternS3", "locationName": "destination", "documentation": "Use Destination (Destination) to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file." } @@ -2676,17 +3105,20 @@ "locationName": "convert608To708" }, "SourceFile": { - "shape": "__string", + "shape": "__stringMin14PatternS3SccSCCTtmlTTMLDfxpDFXPStlSTLSrtSRTSmiSMI", "locationName": "sourceFile", "documentation": "External caption file used for loading captions. Accepted file extensions are 'scc', 'ttml', 'dfxp', 'stl', 'srt', and 'smi'." }, "TimeDelta": { - "shape": "__integer", + "shape": "__integerMinNegative2147483648Max2147483647", "locationName": "timeDelta", "documentation": "Specifies a time delta in seconds to offset the captions from the source file." } }, - "documentation": "Settings for File-based Captions in Source" + "documentation": "Settings for File-based Captions in Source", + "required": [ + "SourceFile" + ] }, "ForbiddenException": { "type": "structure", @@ -2706,22 +3138,22 @@ "type": "structure", "members": { "FramerateDenominator": { - "shape": "__integer", + "shape": "__integerMin1Max2147483647", "locationName": "framerateDenominator", "documentation": "Frame capture will encode the first frame of the output stream, then one frame every framerateDenominator/framerateNumerator seconds. For example, settings of framerateNumerator = 1 and framerateDenominator = 3 (a rate of 1/3 frame per second) will capture the first frame, then 1 frame every 3s. Files will be named as filename.n.jpg where n is the 0-based sequence number of each Capture." }, "FramerateNumerator": { - "shape": "__integer", + "shape": "__integerMin1Max2147483647", "locationName": "framerateNumerator", "documentation": "Frame capture will encode the first frame of the output stream, then one frame every framerateDenominator/framerateNumerator seconds. For example, settings of framerateNumerator = 1 and framerateDenominator = 3 (a rate of 1/3 frame per second) will capture the first frame, then 1 frame every 3s. Files will be named as filename.NNNNNNN.jpg where N is the 0-based frame sequence number zero padded to 7 decimal places." }, "MaxCaptures": { - "shape": "__integer", + "shape": "__integerMin1Max10000000", "locationName": "maxCaptures", "documentation": "Maximum number of captures (encoded jpg output files)." }, "Quality": { - "shape": "__integer", + "shape": "__integerMin1Max100", "locationName": "quality", "documentation": "JPEG Quality - a higher value equals higher quality." } @@ -2893,7 +3325,7 @@ }, "H264FramerateControl": { "type": "string", - "documentation": "Using the API, set FramerateControl to INITIALIZE_FROM_SOURCE if you want the service to use the framerate from the input. Using the console, do this by choosing INITIALIZE_FROM_SOURCE for Framerate.", + "documentation": "If you are using the console, use the Framerate setting to specify the framerate for this output. If you want to keep the same framerate as the input video, choose Follow source. If you want to do framerate conversion, choose a framerate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your framerate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the framerate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the framerate from the input. Choose SPECIFIED if you want the service to use the framerate you specify in the settings FramerateNumerator and FramerateDenominator.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -2925,7 +3357,7 @@ }, "H264InterlaceMode": { "type": "string", - "documentation": "Use Interlace mode (InterlaceMode) to choose the scan line type for the output. * Top Field First (TOP_FIELD) and Bottom Field First (BOTTOM_FIELD) produce interlaced output with the entire output having the same field polarity (top or bottom first). * Follow, Default Top (FOLLOw_TOP_FIELD) and Follow, Default Bottom (FOLLOW_BOTTOM_FIELD) use the same field polarity as the source. Therefore, behavior depends on the input scan type. - If the source is interlaced, the output will be interlaced with the same polarity as the source (it will follow the source). The output could therefore be a mix of \"top field first\" and \"bottom field first\". - If the source is progressive, the output will be interlaced with \"top field first\" or \"bottom field first\" polarity, depending on which of the Follow options you chose.", + "documentation": "Use Interlace mode (InterlaceMode) to choose the scan line type for the output. * Top Field First (TOP_FIELD) and Bottom Field First (BOTTOM_FIELD) produce interlaced output with the entire output having the same field polarity (top or bottom first). * Follow, Default Top (FOLLOW_TOP_FIELD) and Follow, Default Bottom (FOLLOW_BOTTOM_FIELD) use the same field polarity as the source. Therefore, behavior depends on the input scan type, as follows.\n - If the source is interlaced, the output will be interlaced with the same polarity as the source (it will follow the source). The output could therefore be a mix of \"top field first\" and \"bottom field first\".\n - If the source is progressive, the output will be interlaced with \"top field first\" or \"bottom field first\" polarity, depending on which of the Follow options you chose.", "enum": [ "PROGRESSIVE", "TOP_FIELD", @@ -2953,7 +3385,7 @@ }, "H264RateControlMode": { "type": "string", - "documentation": "Rate control mode. CQ uses constant quantizer (qp), ABR (average bitrate) does not write HRD parameters.", + "documentation": "Use this setting to specify whether this output has a variable bitrate (VBR) or constant bitrate (CBR).", "enum": [ "VBR", "CBR" @@ -2983,9 +3415,9 @@ "locationName": "adaptiveQuantization" }, "Bitrate": { - "shape": "__integer", + "shape": "__integerMin1000Max1152000000", "locationName": "bitrate", - "documentation": "Average bitrate in bits/second. Required for VBR, CBR, and ABR. Five megabits can be entered as 5000000 or 5m. Five hundred kilobits can be entered as 500000 or 0.5m. For MS Smooth outputs, bitrates must be unique when rounded down to the nearest multiple of 1000." + "documentation": "Average bitrate in bits/second. Required for VBR and CBR. For MS Smooth outputs, bitrates must be unique when rounded down to the nearest multiple of 1000." }, "CodecLevel": { "shape": "H264CodecLevel", @@ -3016,12 +3448,12 @@ "locationName": "framerateConversionAlgorithm" }, "FramerateDenominator": { - "shape": "__integer", + "shape": "__integerMin1Max2147483647", "locationName": "framerateDenominator", "documentation": "When you use the API for transcode jobs that use framerate conversion, specify the framerate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use FramerateDenominator to specify the denominator of this fraction. In this example, use 1001 for the value of FramerateDenominator. When you use the console for transcode jobs that use framerate conversion, provide the value as a decimal number for Framerate. In this example, specify 23.976." }, "FramerateNumerator": { - "shape": "__integer", + "shape": "__integerMin1Max2147483647", "locationName": "framerateNumerator", "documentation": "Framerate numerator - framerate is a fraction, e.g. 24000 / 1001 = 23.976 fps." }, @@ -3030,12 +3462,12 @@ "locationName": "gopBReference" }, "GopClosedCadence": { - "shape": "__integer", + "shape": "__integerMin0Max2147483647", "locationName": "gopClosedCadence", "documentation": "Frequency of closed GOPs. In streaming applications, it is recommended that this be set to 1 so a decoder joining mid-stream will receive an IDR frame as quickly as possible. Setting this value to 0 will break output segmenting." }, "GopSize": { - "shape": "__double", + "shape": "__doubleMin0", "locationName": "gopSize", "documentation": "GOP Length (keyframe interval) in frames or seconds. Must be greater than zero." }, @@ -3044,36 +3476,36 @@ "locationName": "gopSizeUnits" }, "HrdBufferInitialFillPercentage": { - "shape": "__integer", + "shape": "__integerMin0Max100", "locationName": "hrdBufferInitialFillPercentage", "documentation": "Percentage of the buffer that should initially be filled (HRD buffer model)." }, "HrdBufferSize": { - "shape": "__integer", + "shape": "__integerMin0Max1152000000", "locationName": "hrdBufferSize", - "documentation": "Size of buffer (HRD buffer model). Five megabits can be entered as 5000000 or 5m. Five hundred kilobits can be entered as 500000 or 0.5m." + "documentation": "Size of buffer (HRD buffer model) in bits. For example, enter five megabits as 5000000." }, "InterlaceMode": { "shape": "H264InterlaceMode", "locationName": "interlaceMode" }, "MaxBitrate": { - "shape": "__integer", + "shape": "__integerMin1000Max1152000000", "locationName": "maxBitrate", - "documentation": "Maximum bitrate in bits/second (for VBR mode only). Five megabits can be entered as 5000000 or 5m. Five hundred kilobits can be entered as 500000 or 0.5m." + "documentation": "Maximum bitrate in bits/second. For example, enter five megabits per second as 5000000." }, "MinIInterval": { - "shape": "__integer", + "shape": "__integerMin0Max30", "locationName": "minIInterval", "documentation": "Enforces separation between repeated (cadence) I-frames and I-frames inserted by Scene Change Detection. If a scene change I-frame is within I-interval frames of a cadence I-frame, the GOP is shrunk and/or stretched to the scene change I-frame. GOP stretch requires enabling lookahead as well as setting I-interval. The normal cadence resumes for the next GOP. This setting is only used when Scene Change Detect is enabled. Note: Maximum GOP stretch = GOP size + Min-I-interval - 1" }, "NumberBFramesBetweenReferenceFrames": { - "shape": "__integer", + "shape": "__integerMin0Max7", "locationName": "numberBFramesBetweenReferenceFrames", "documentation": "Number of B-frames between reference frames." }, "NumberReferenceFrames": { - "shape": "__integer", + "shape": "__integerMin1Max6", "locationName": "numberReferenceFrames", "documentation": "Number of reference frames to use. The encoder may use more than requested if using B-frames and/or interlaced encoding." }, @@ -3082,12 +3514,12 @@ "locationName": "parControl" }, "ParDenominator": { - "shape": "__integer", + "shape": "__integerMin1Max2147483647", "locationName": "parDenominator", "documentation": "Pixel Aspect Ratio denominator." }, "ParNumerator": { - "shape": "__integer", + "shape": "__integerMin1Max2147483647", "locationName": "parNumerator", "documentation": "Pixel Aspect Ratio numerator." }, @@ -3108,7 +3540,7 @@ "locationName": "sceneChangeDetect" }, "Slices": { - "shape": "__integer", + "shape": "__integerMin1Max32", "locationName": "slices", "documentation": "Number of slices per picture. Must be less than or equal to the number of macroblock rows for progressive pictures, and less than or equal to half the number of macroblock rows for interlaced pictures." }, @@ -3117,7 +3549,7 @@ "locationName": "slowPal" }, "Softness": { - "shape": "__integer", + "shape": "__integerMin0Max128", "locationName": "softness", "documentation": "Softness. Selects quantizer matrix, larger values reduce high-frequency content in the encoded image." }, @@ -3257,7 +3689,7 @@ }, "H265FramerateControl": { "type": "string", - "documentation": "Using the API, set FramerateControl to INITIALIZE_FROM_SOURCE if you want the service to use the framerate from the input. Using the console, do this by choosing INITIALIZE_FROM_SOURCE for Framerate.", + "documentation": "If you are using the console, use the Framerate setting to specify the framerate for this output. If you want to keep the same framerate as the input video, choose Follow source. If you want to do framerate conversion, choose a framerate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your framerate as a fraction. If you are creating your transcoding job sepecification as a JSON file without the console, use FramerateControl to specify which value the service uses for the framerate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the framerate from the input. Choose SPECIFIED if you want the service to use the framerate you specify in the settings FramerateNumerator and FramerateDenominator.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -3289,7 +3721,7 @@ }, "H265InterlaceMode": { "type": "string", - "documentation": "Use Interlace mode (InterlaceMode) to choose the scan line type for the output. * Top Field First (TOP_FIELD) and Bottom Field First (BOTTOM_FIELD) produce interlaced output with the entire output having the same field polarity (top or bottom first). * Follow, Default Top (FOLLOw_TOP_FIELD) and Follow, Default Bottom (FOLLOW_BOTTOM_FIELD) use the same field polarity as the source. Therefore, behavior depends on the input scan type. - If the source is interlaced, the output will be interlaced with the same polarity as the source (it will follow the source). The output could therefore be a mix of \"top field first\" and \"bottom field first\". - If the source is progressive, the output will be interlaced with \"top field first\" or \"bottom field first\" polarity, depending on which of the Follow options you chose.", + "documentation": "Use Interlace mode (InterlaceMode) to choose the scan line type for the output. * Top Field First (TOP_FIELD) and Bottom Field First (BOTTOM_FIELD) produce interlaced output with the entire output having the same field polarity (top or bottom first). * Follow, Default Top (FOLLOW_TOP_FIELD) and Follow, Default Bottom (FOLLOW_BOTTOM_FIELD) use the same field polarity as the source. Therefore, behavior depends on the input scan type.\n - If the source is interlaced, the output will be interlaced with the same polarity as the source (it will follow the source). The output could therefore be a mix of \"top field first\" and \"bottom field first\".\n - If the source is progressive, the output will be interlaced with \"top field first\" or \"bottom field first\" polarity, depending on which of the Follow options you chose.", "enum": [ "PROGRESSIVE", "TOP_FIELD", @@ -3317,7 +3749,7 @@ }, "H265RateControlMode": { "type": "string", - "documentation": "Rate control mode. CQ uses constant quantizer (qp), ABR (average bitrate) does not write HRD parameters.", + "documentation": "Use this setting to specify whether this output has a variable bitrate (VBR) or constant bitrate (CBR).", "enum": [ "VBR", "CBR" @@ -3352,9 +3784,9 @@ "locationName": "alternateTransferFunctionSei" }, "Bitrate": { - "shape": "__integer", + "shape": "__integerMin1000Max1466400000", "locationName": "bitrate", - "documentation": "Average bitrate in bits/second. Required for VBR, CBR, and ABR. Five megabits can be entered as 5000000 or 5m. Five hundred kilobits can be entered as 500000 or 0.5m. For MS Smooth outputs, bitrates must be unique when rounded down to the nearest multiple of 1000." + "documentation": "Average bitrate in bits/second. Required for VBR and CBR. For MS Smooth outputs, bitrates must be unique when rounded down to the nearest multiple of 1000." }, "CodecLevel": { "shape": "H265CodecLevel", @@ -3377,12 +3809,12 @@ "locationName": "framerateConversionAlgorithm" }, "FramerateDenominator": { - "shape": "__integer", + "shape": "__integerMin1Max2147483647", "locationName": "framerateDenominator", "documentation": "Framerate denominator." }, "FramerateNumerator": { - "shape": "__integer", + "shape": "__integerMin1Max2147483647", "locationName": "framerateNumerator", "documentation": "Framerate numerator - framerate is a fraction, e.g. 24000 / 1001 = 23.976 fps." }, @@ -3391,12 +3823,12 @@ "locationName": "gopBReference" }, "GopClosedCadence": { - "shape": "__integer", + "shape": "__integerMin0Max2147483647", "locationName": "gopClosedCadence", "documentation": "Frequency of closed GOPs. In streaming applications, it is recommended that this be set to 1 so a decoder joining mid-stream will receive an IDR frame as quickly as possible. Setting this value to 0 will break output segmenting." }, "GopSize": { - "shape": "__double", + "shape": "__doubleMin0", "locationName": "gopSize", "documentation": "GOP Length (keyframe interval) in frames or seconds. Must be greater than zero." }, @@ -3405,36 +3837,36 @@ "locationName": "gopSizeUnits" }, "HrdBufferInitialFillPercentage": { - "shape": "__integer", + "shape": "__integerMin0Max100", "locationName": "hrdBufferInitialFillPercentage", "documentation": "Percentage of the buffer that should initially be filled (HRD buffer model)." }, "HrdBufferSize": { - "shape": "__integer", + "shape": "__integerMin0Max1466400000", "locationName": "hrdBufferSize", - "documentation": "Size of buffer (HRD buffer model). Five megabits can be entered as 5000000 or 5m. Five hundred kilobits can be entered as 500000 or 0.5m." + "documentation": "Size of buffer (HRD buffer model) in bits. For example, enter five megabits as 5000000." }, "InterlaceMode": { "shape": "H265InterlaceMode", "locationName": "interlaceMode" }, "MaxBitrate": { - "shape": "__integer", + "shape": "__integerMin1000Max1466400000", "locationName": "maxBitrate", - "documentation": "Maximum bitrate in bits/second (for VBR mode only). Five megabits can be entered as 5000000 or 5m. Five hundred kilobits can be entered as 500000 or 0.5m." + "documentation": "Maximum bitrate in bits/second." }, "MinIInterval": { - "shape": "__integer", + "shape": "__integerMin0Max30", "locationName": "minIInterval", "documentation": "Enforces separation between repeated (cadence) I-frames and I-frames inserted by Scene Change Detection. If a scene change I-frame is within I-interval frames of a cadence I-frame, the GOP is shrunk and/or stretched to the scene change I-frame. GOP stretch requires enabling lookahead as well as setting I-interval. The normal cadence resumes for the next GOP. This setting is only used when Scene Change Detect is enabled. Note: Maximum GOP stretch = GOP size + Min-I-interval - 1" }, "NumberBFramesBetweenReferenceFrames": { - "shape": "__integer", + "shape": "__integerMin0Max7", "locationName": "numberBFramesBetweenReferenceFrames", "documentation": "Number of B-frames between reference frames." }, "NumberReferenceFrames": { - "shape": "__integer", + "shape": "__integerMin1Max6", "locationName": "numberReferenceFrames", "documentation": "Number of reference frames to use. The encoder may use more than requested if using B-frames and/or interlaced encoding." }, @@ -3443,12 +3875,12 @@ "locationName": "parControl" }, "ParDenominator": { - "shape": "__integer", + "shape": "__integerMin1Max2147483647", "locationName": "parDenominator", "documentation": "Pixel Aspect Ratio denominator." }, "ParNumerator": { - "shape": "__integer", + "shape": "__integerMin1Max2147483647", "locationName": "parNumerator", "documentation": "Pixel Aspect Ratio numerator." }, @@ -3469,7 +3901,7 @@ "locationName": "sceneChangeDetect" }, "Slices": { - "shape": "__integer", + "shape": "__integerMin1Max32", "locationName": "slices", "documentation": "Number of slices per picture. Must be less than or equal to the number of macroblock rows for progressive pictures, and less than or equal to half the number of macroblock rows for interlaced pictures." }, @@ -3500,6 +3932,10 @@ "UnregisteredSeiTimecode": { "shape": "H265UnregisteredSeiTimecode", "locationName": "unregisteredSeiTimecode" + }, + "WriteMp4PackagingType": { + "shape": "H265WriteMp4PackagingType", + "locationName": "writeMp4PackagingType" } }, "documentation": "Settings for H265 codec" @@ -3561,71 +3997,83 @@ "ENABLED" ] }, + "H265WriteMp4PackagingType": { + "type": "string", + "documentation": "If HVC1, output that is H.265 will be marked as HVC1 and adhere to the ISO-IECJTC1-SC29_N13798_Text_ISOIEC_FDIS_14496-15_3rd_E spec which states that parameter set NAL units will be stored in the sample headers but not in the samples directly. If HEV1, then H.265 will be marked as HEV1 and parameter set NAL units will be written into the samples.", + "enum": [ + "HVC1", + "HEV1" + ] + }, "Hdr10Metadata": { "type": "structure", "members": { "BluePrimaryX": { - "shape": "__integer", + "shape": "__integerMin0Max50000", "locationName": "bluePrimaryX", - "documentation": "HDR Master Display Information comes from the color grader and the color grading tools. Range is 0 to 50,000, each increment represents 0.00002 in CIE1931 color coordinate." + "documentation": "HDR Master Display Information must be provided by a color grader, using color grading tools. Range is 0 to 50,000, each increment represents 0.00002 in CIE1931 color coordinate. Note that this setting is not for color correction." }, "BluePrimaryY": { - "shape": "__integer", + "shape": "__integerMin0Max50000", "locationName": "bluePrimaryY", - "documentation": "HDR Master Display Information comes from the color grader and the color grading tools. Range is 0 to 50,000, each increment represents 0.00002 in CIE1931 color coordinate." + "documentation": "HDR Master Display Information must be provided by a color grader, using color grading tools. Range is 0 to 50,000, each increment represents 0.00002 in CIE1931 color coordinate. Note that this setting is not for color correction." }, "GreenPrimaryX": { - "shape": "__integer", + "shape": "__integerMin0Max50000", "locationName": "greenPrimaryX", - "documentation": "HDR Master Display Information comes from the color grader and the color grading tools. Range is 0 to 50,000, each increment represents 0.00002 in CIE1931 color coordinate." + "documentation": "HDR Master Display Information must be provided by a color grader, using color grading tools. Range is 0 to 50,000, each increment represents 0.00002 in CIE1931 color coordinate. Note that this setting is not for color correction." }, "GreenPrimaryY": { - "shape": "__integer", + "shape": "__integerMin0Max50000", "locationName": "greenPrimaryY", - "documentation": "HDR Master Display Information comes from the color grader and the color grading tools. Range is 0 to 50,000, each increment represents 0.00002 in CIE1931 color coordinate." + "documentation": "HDR Master Display Information must be provided by a color grader, using color grading tools. Range is 0 to 50,000, each increment represents 0.00002 in CIE1931 color coordinate. Note that this setting is not for color correction." }, "MaxContentLightLevel": { - "shape": "__integer", + "shape": "__integerMin0Max65535", "locationName": "maxContentLightLevel", "documentation": "Maximum light level among all samples in the coded video sequence, in units of candelas per square meter." }, "MaxFrameAverageLightLevel": { - "shape": "__integer", + "shape": "__integerMin0Max65535", "locationName": "maxFrameAverageLightLevel", "documentation": "Maximum average light level of any frame in the coded video sequence, in units of candelas per square meter." }, "MaxLuminance": { - "shape": "__integer", + "shape": "__integerMin0Max2147483647", "locationName": "maxLuminance", "documentation": "Nominal maximum mastering display luminance in units of of 0.0001 candelas per square meter." }, "MinLuminance": { - "shape": "__integer", + "shape": "__integerMin0Max2147483647", "locationName": "minLuminance", "documentation": "Nominal minimum mastering display luminance in units of of 0.0001 candelas per square meter" }, "RedPrimaryX": { - "shape": "__integer", + "shape": "__integerMin0Max50000", "locationName": "redPrimaryX", - "documentation": "HDR Master Display Information comes from the color grader and the color grading tools. Range is 0 to 50,000, each increment represents 0.00002 in CIE1931 color coordinate." + "documentation": "HDR Master Display Information must be provided by a color grader, using color grading tools. Range is 0 to 50,000, each increment represents 0.00002 in CIE1931 color coordinate. Note that this setting is not for color correction." }, "RedPrimaryY": { - "shape": "__integer", + "shape": "__integerMin0Max50000", "locationName": "redPrimaryY", - "documentation": "HDR Master Display Information comes from the color grader and the color grading tools. Range is 0 to 50,000, each increment represents 0.00002 in CIE1931 color coordinate." + "documentation": "HDR Master Display Information must be provided by a color grader, using color grading tools. Range is 0 to 50,000, each increment represents 0.00002 in CIE1931 color coordinate. Note that this setting is not for color correction." }, "WhitePointX": { - "shape": "__integer", + "shape": "__integerMin0Max50000", "locationName": "whitePointX", - "documentation": "HDR Master Display Information comes from the color grader and the color grading tools. Range is 0 to 50,000, each increment represents 0.00002 in CIE1931 color coordinate." + "documentation": "HDR Master Display Information must be provided by a color grader, using color grading tools. Range is 0 to 50,000, each increment represents 0.00002 in CIE1931 color coordinate. Note that this setting is not for color correction." }, "WhitePointY": { - "shape": "__integer", + "shape": "__integerMin0Max50000", "locationName": "whitePointY", - "documentation": "HDR Master Display Information comes from the color grader and the color grading tools. Range is 0 to 50,000, each increment represents 0.00002 in CIE1931 color coordinate." + "documentation": "HDR Master Display Information must be provided by a color grader, using color grading tools. Range is 0 to 50,000, each increment represents 0.00002 in CIE1931 color coordinate. Note that this setting is not for color correction." } }, - "documentation": "Use the HDR master display (Hdr10Metadata) settings to provide values for HDR color. These values vary depending on the input video and must be provided by a color grader. Range is 0 to 50,000, each increment represents 0.00002 in CIE1931 color coordinate." + "documentation": "Use the HDR master display (Hdr10Metadata) settings to correct HDR metadata or to provide missing metadata. These values vary depending on the input video and must be provided by a color grader. Range is 0 to 50,000, each increment represents 0.00002 in CIE1931 color coordinate. Note that these settings are not color correction. Note that if you are creating HDR outputs inside of an HLS CMAF package, to comply with the Apple specification, you must use the HVC1 for H.265 setting.", + "required": [ + "MaxContentLightLevel", + "MaxFrameAverageLightLevel" + ] }, "HlsAdMarkers": { "type": "string", @@ -3648,10 +4096,15 @@ "type": "structure", "members": { "CaptionChannel": { - "shape": "__integer", + "shape": "__integerMinNegative2147483648Max2147483647", "locationName": "captionChannel", "documentation": "Caption channel." }, + "CustomLanguageCode": { + "shape": "__stringMin3Max3PatternAZaZ3", + "locationName": "customLanguageCode", + "documentation": "Specify the language for this caption channel, using the ISO 639-2 or ISO 639-3 three-letter language code" + }, "LanguageCode": { "shape": "LanguageCode", "locationName": "languageCode" @@ -3701,7 +4154,7 @@ "type": "structure", "members": { "ConstantInitializationVector": { - "shape": "__string", + "shape": "__stringMin32Max32Pattern09aFAF32", "locationName": "constantInitializationVector", "documentation": "This is a 128-bit, 16-byte hex value represented by a 32-character text string. If this parameter is not set then the Initialization Vector will follow the segment number by default." }, @@ -3726,7 +4179,10 @@ "locationName": "type" } }, - "documentation": "Settings for HLS encryption" + "documentation": "Settings for HLS encryption", + "required": [ + "Type" + ] }, "HlsEncryptionType": { "type": "string", @@ -3740,7 +4196,7 @@ "type": "structure", "members": { "AdMarkers": { - "shape": "ListOfHlsAdMarkers", + "shape": "__listOfHlsAdMarkers", "locationName": "adMarkers", "documentation": "Choose one or more ad marker types to pass SCTE35 signals through to this group of Apple HLS outputs." }, @@ -3750,7 +4206,7 @@ "documentation": "A partial URI prefix that will be prepended to each output in the media .m3u8 file. Can be used if base manifest is delivered from a different URL than the main .m3u8 file." }, "CaptionLanguageMappings": { - "shape": "ListOfHlsCaptionLanguageMapping", + "shape": "__listOfHlsCaptionLanguageMapping", "locationName": "captionLanguageMappings", "documentation": "Language to be used on Caption outputs" }, @@ -3767,7 +4223,7 @@ "locationName": "codecSpecification" }, "Destination": { - "shape": "__string", + "shape": "__stringPatternS3", "locationName": "destination", "documentation": "Use Destination (Destination) to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file." }, @@ -3789,7 +4245,7 @@ "locationName": "manifestDurationFormat" }, "MinSegmentLength": { - "shape": "__integer", + "shape": "__integerMin0Max2147483647", "locationName": "minSegmentLength", "documentation": "When set, Minimum Segment Size is enforced by looking ahead and back within the specified range for a nearby avail and extending the segment size if needed." }, @@ -3802,7 +4258,7 @@ "locationName": "programDateTime" }, "ProgramDateTimePeriod": { - "shape": "__integer", + "shape": "__integerMin0Max3600", "locationName": "programDateTimePeriod", "documentation": "Period of insertion of EXT-X-PROGRAM-DATE-TIME entry, in seconds." }, @@ -3811,12 +4267,12 @@ "locationName": "segmentControl" }, "SegmentLength": { - "shape": "__integer", + "shape": "__integerMin1Max2147483647", "locationName": "segmentLength", "documentation": "Length of MPEG-2 Transport Stream segments to create (in seconds). Note that segments will end on the next keyframe after this number of seconds, so actual segment length may be longer." }, "SegmentsPerSubdirectory": { - "shape": "__integer", + "shape": "__integerMin1Max2147483647", "locationName": "segmentsPerSubdirectory", "documentation": "Number of segments to write to a subdirectory before starting a new one. directoryStructure must be SINGLE_DIRECTORY for this setting to have an effect." }, @@ -3829,17 +4285,21 @@ "locationName": "timedMetadataId3Frame" }, "TimedMetadataId3Period": { - "shape": "__integer", + "shape": "__integerMinNegative2147483648Max2147483647", "locationName": "timedMetadataId3Period", "documentation": "Timed Metadata interval in seconds." }, "TimestampDeltaMilliseconds": { - "shape": "__integer", + "shape": "__integerMinNegative2147483648Max2147483647", "locationName": "timestampDeltaMilliseconds", "documentation": "Provides an extra millisecond delta offset to fine tune the timestamps." } }, - "documentation": "Required when you set (Type) under (OutputGroups)>(OutputGroupSettings) to HLS_GROUP_SETTINGS." + "documentation": "Required when you set (Type) under (OutputGroups)>(OutputGroupSettings) to HLS_GROUP_SETTINGS.", + "required": [ + "MinSegmentLength", + "SegmentLength" + ] }, "HlsIFrameOnlyManifest": { "type": "string", @@ -3955,44 +4415,51 @@ "type": "structure", "members": { "Id3": { - "shape": "__string", + "shape": "__stringPatternAZaZ0902", "locationName": "id3", "documentation": "Use ID3 tag (Id3) to provide a tag value in base64-encode format." }, "Timecode": { - "shape": "__string", + "shape": "__stringPattern010920405090509092", "locationName": "timecode", "documentation": "Provide a Timecode (TimeCode) in HH:MM:SS:FF or HH:MM:SS;FF format." } }, - "documentation": "To insert ID3 tags in your output, specify two values. Use ID3 tag (Id3) to specify the base 64 encoded string and use Timecode (TimeCode) to specify the time when the tag should be inserted. To insert multiple ID3 tags in your output, create mulitple instances of ID3 insertion (Id3Insertion)." + "documentation": "To insert ID3 tags in your output, specify two values. Use ID3 tag (Id3) to specify the base 64 encoded string and use Timecode (TimeCode) to specify the time when the tag should be inserted. To insert multiple ID3 tags in your output, create multiple instances of ID3 insertion (Id3Insertion).", + "required": [ + "Timecode", + "Id3" + ] }, "ImageInserter": { "type": "structure", "members": { "InsertableImages": { - "shape": "ListOfInsertableImage", + "shape": "__listOfInsertableImage", "locationName": "insertableImages", "documentation": "Image to insert. Must be 32 bit windows BMP, PNG, or TGA file. Must not be larger than the output frames." } }, - "documentation": "Enable the Image inserter (ImageInserter) feature to include a graphic overlay on your video. Enable or disable this feature for each output individually. This setting is disabled by default." + "documentation": "Enable the Image inserter (ImageInserter) feature to include a graphic overlay on your video. Enable or disable this feature for each output individually. This setting is disabled by default.", + "required": [ + "InsertableImages" + ] }, "Input": { "type": "structure", "members": { "AudioSelectorGroups": { - "shape": "MapOfAudioSelectorGroup", + "shape": "__mapOfAudioSelectorGroup", "locationName": "audioSelectorGroups", "documentation": "Specifies set of audio selectors within an input to combine. An input may have multiple audio selector groups. See \"Audio Selector Group\":#inputs-audio_selector_group for more information." }, "AudioSelectors": { - "shape": "MapOfAudioSelector", + "shape": "__mapOfAudioSelector", "locationName": "audioSelectors", "documentation": "Use Audio selectors (AudioSelectors) to specify a track or set of tracks from the input that you will use in your outputs. You can use mutiple Audio selectors per input." }, "CaptionSelectors": { - "shape": "MapOfCaptionSelector", + "shape": "__mapOfCaptionSelector", "locationName": "captionSelectors", "documentation": "Use Captions selectors (CaptionSelectors) to specify the captions data from the input that you will use in your outputs. You can use mutiple captions selectors per input." }, @@ -4005,7 +4472,7 @@ "locationName": "denoiseFilter" }, "FileInput": { - "shape": "__string", + "shape": "__stringPatternS3MM2VVMMPPEEGGAAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MM", "locationName": "fileInput", "documentation": "Use Input (fileInput) to define the source file used in the transcode job. There can be multiple inputs in a job. These inputs are concantenated, in the order they are specified in the job, to create the output." }, @@ -4014,17 +4481,17 @@ "locationName": "filterEnable" }, "FilterStrength": { - "shape": "__integer", + "shape": "__integerMinNegative5Max5", "locationName": "filterStrength", "documentation": "Use Filter strength (FilterStrength) to adjust the magnitude the input filter settings (Deblock and Denoise). The range is -5 to 5. Default is 0." }, "InputClippings": { - "shape": "ListOfInputClipping", + "shape": "__listOfInputClipping", "locationName": "inputClippings", "documentation": "(InputClippings) contains sets of start and end times that together specify a portion of the input to be used in the outputs. If you provide only a start time, the clip will be the entire input from that point to the end. If you provide only an end time, it will be the entire input up to that point. When you specify more than one input clip, the transcoding service creates the job outputs by stringing the clips together in the order you specify them." }, "ProgramNumber": { - "shape": "__integer", + "shape": "__integerMin1Max2147483647", "locationName": "programNumber", "documentation": "Use Program (programNumber) to select a specific program from within a multi-program transport stream. Note that Quad 4K is not currently supported. Default is the first program within the transport stream. If the program you specify doesn't exist, the transcoding service will use this default." }, @@ -4041,23 +4508,26 @@ "locationName": "videoSelector" } }, - "documentation": "Specifies media input" + "documentation": "Specifies media input", + "required": [ + "FileInput" + ] }, "InputClipping": { "type": "structure", "members": { "EndTimecode": { - "shape": "__string", + "shape": "__stringPattern010920405090509092", "locationName": "endTimecode", - "documentation": "Set End timecode (EndTimecode) to the end of the portion of the input you are clipping. The frame corresponding to the End timecode value is included in the clip. Start timecode or End timecode may be left blank, but not both. When choosing this value, take into account your setting for Input timecode source. For example, if you have embedded timecodes that start at 01:00:00:00 and you want your clip to begin five minutes into the video, use 01:00:05:00." + "documentation": "Set End timecode (EndTimecode) to the end of the portion of the input you are clipping. The frame corresponding to the End timecode value is included in the clip. Start timecode or End timecode may be left blank, but not both. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When choosing this value, take into account your setting for timecode source under input settings (InputTimecodeSource). For example, if you have embedded timecodes that start at 01:00:00:00 and you want your clip to end six minutes into the video, use 01:06:00:00." }, "StartTimecode": { - "shape": "__string", + "shape": "__stringPattern010920405090509092", "locationName": "startTimecode", - "documentation": "Set Start timecode (StartTimecode) to the beginning of the portion of the input you are clipping. The frame corresponding to the Start timecode value is included in the clip. Start timecode or End timecode may be left blank, but not both. When choosing this value, take into account your setting for Input timecode source. For example, if you have embedded timecodes that start at 01:00:00:00 and you want your clip to begin five minutes into the video, use 01:00:05:00." + "documentation": "Set Start timecode (StartTimecode) to the beginning of the portion of the input you are clipping. The frame corresponding to the Start timecode value is included in the clip. Start timecode or End timecode may be left blank, but not both. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When choosing this value, take into account your setting for Input timecode source. For example, if you have embedded timecodes that start at 01:00:00:00 and you want your clip to begin five minutes into the video, use 01:05:00:00." } }, - "documentation": "Include one instance of (InputClipping) for each input clip." + "documentation": "To transcode only portions of your input (clips), include one Input clipping (one instance of InputClipping in the JSON job file) for each input clip. All input clips you specify will be included in every output of the job." }, "InputDeblockFilter": { "type": "string", @@ -4096,17 +4566,17 @@ "type": "structure", "members": { "AudioSelectorGroups": { - "shape": "MapOfAudioSelectorGroup", + "shape": "__mapOfAudioSelectorGroup", "locationName": "audioSelectorGroups", "documentation": "Specifies set of audio selectors within an input to combine. An input may have multiple audio selector groups. See \"Audio Selector Group\":#inputs-audio_selector_group for more information." }, "AudioSelectors": { - "shape": "MapOfAudioSelector", + "shape": "__mapOfAudioSelector", "locationName": "audioSelectors", "documentation": "Use Audio selectors (AudioSelectors) to specify a track or set of tracks from the input that you will use in your outputs. You can use mutiple Audio selectors per input." }, "CaptionSelectors": { - "shape": "MapOfCaptionSelector", + "shape": "__mapOfCaptionSelector", "locationName": "captionSelectors", "documentation": "Use Captions selectors (CaptionSelectors) to specify the captions data from the input that you will use in your outputs. You can use mutiple captions selectors per input." }, @@ -4123,17 +4593,17 @@ "locationName": "filterEnable" }, "FilterStrength": { - "shape": "__integer", + "shape": "__integerMinNegative5Max5", "locationName": "filterStrength", "documentation": "Use Filter strength (FilterStrength) to adjust the magnitude the input filter settings (Deblock and Denoise). The range is -5 to 5. Default is 0." }, "InputClippings": { - "shape": "ListOfInputClipping", + "shape": "__listOfInputClipping", "locationName": "inputClippings", "documentation": "(InputClippings) contains sets of start and end times that together specify a portion of the input to be used in the outputs. If you provide only a start time, the clip will be the entire input from that point to the end. If you provide only an end time, it will be the entire input up to that point. When you specify more than one input clip, the transcoding service creates the job outputs by stringing the clips together in the order you specify them." }, "ProgramNumber": { - "shape": "__integer", + "shape": "__integerMin1Max2147483647", "locationName": "programNumber", "documentation": "Use Program (programNumber) to select a specific program from within a multi-program transport stream. Note that Quad 4K is not currently supported. Default is the first program within the transport stream. If the program you specify doesn't exist, the transcoding service will use this default." }, @@ -4154,7 +4624,7 @@ }, "InputTimecodeSource": { "type": "string", - "documentation": "Use Timecode source (InputTimecodeSource) to specify how timecode information from your input is adjusted and encoded in all outputs for the job. Default is embedded. Set to Embedded (EMBEDDED) to use the timecode that is in the input video. If no embedded timecode is in the source, will set the timecode for the first frame to 00:00:00:00. Set to Start at 0 (ZEROBASED) to set the timecode of the initial frame to 00:00:00:00. Set to Specified start (SPECIFIEDSTART) to provide the initial timecode yourself the setting (Start).", + "documentation": "Timecode source under input settings (InputTimecodeSource) only affects the behavior of features that apply to a single input at a time, such as input clipping and synchronizing some captions formats. Use this setting to specify whether the service counts frames by timecodes embedded in the video (EMBEDDED) or by starting the first frame at zero (ZEROBASED). In both cases, the timecode format is HH:MM:SS:FF or HH:MM:SS;FF, where FF is the frame number. Only set this to EMBEDDED if your source video has embedded timecodes.", "enum": [ "EMBEDDED", "ZEROBASED", @@ -4165,62 +4635,69 @@ "type": "structure", "members": { "Duration": { - "shape": "__integer", + "shape": "__integerMinNegative2147483648Max2147483647", "locationName": "duration", "documentation": "Use Duration (Duration) to set the time, in milliseconds, for the image to remain on the output video." }, "FadeIn": { - "shape": "__integer", + "shape": "__integerMinNegative2147483648Max2147483647", "locationName": "fadeIn", "documentation": "Use Fade in (FadeIut) to set the length, in milliseconds, of the inserted image fade in. If you don't specify a value for Fade in, the image will appear abruptly at the Start time." }, "FadeOut": { - "shape": "__integer", + "shape": "__integerMinNegative2147483648Max2147483647", "locationName": "fadeOut", "documentation": "Use Fade out (FadeOut) to set the length, in milliseconds, of the inserted image fade out. If you don't specify a value for Fade out, the image will disappear abruptly at the end of the inserted image duration." }, "Height": { - "shape": "__integer", + "shape": "__integerMinNegative2147483648Max2147483647", "locationName": "height", "documentation": "Specify the Height (Height) of the inserted image. Use a value that is less than or equal to the video resolution height. Leave this setting blank to use the native height of the image." }, "ImageInserterInput": { - "shape": "__string", + "shape": "__stringMin14PatternS3BmpBMPPngPNGTgaTGA", "locationName": "imageInserterInput", "documentation": "Use Image location (imageInserterInput) to specify the Amazon S3 location of the image to be inserted into the output. Use a 32 bit BMP, PNG, or TGA file that fits inside the video frame." }, "ImageX": { - "shape": "__integer", + "shape": "__integerMinNegative2147483648Max2147483647", "locationName": "imageX", "documentation": "Use Left (ImageX) to set the distance, in pixels, between the inserted image and the left edge of the frame. Required for BMP, PNG and TGA input." }, "ImageY": { - "shape": "__integer", + "shape": "__integerMinNegative2147483648Max2147483647", "locationName": "imageY", "documentation": "Use Top (ImageY) to set the distance, in pixels, between the inserted image and the top edge of the video frame. Required for BMP, PNG and TGA input." }, "Layer": { - "shape": "__integer", + "shape": "__integerMin0Max99", "locationName": "layer", "documentation": "Use Layer (Layer) to specify how overlapping inserted images appear. Images with higher values of layer appear on top of images with lower values of layer." }, "Opacity": { - "shape": "__integer", + "shape": "__integerMin0Max100", "locationName": "opacity", "documentation": "Use Opacity (Opacity) to specify how much of the underlying video shows through the inserted image. 0 is transparent and 100 is fully opaque. Default is 50." }, "StartTime": { - "shape": "__string", + "shape": "__stringPattern01D20305D205D", "locationName": "startTime", - "documentation": "Use Start time (StartTime) to specify the video timecode when the image is inserted in the output. This must be in timecode format (HH:MM:SS:FF)" + "documentation": "Use Start time (StartTime) to specify the video timecode when the image is inserted in the output. This must be in timecode (HH:MM:SS:FF or HH:MM:SS;FF) format." }, "Width": { - "shape": "__integer", + "shape": "__integerMinNegative2147483648Max2147483647", "locationName": "width", "documentation": "Specify the Width (Width) of the inserted image. Use a value that is less than or equal to the video resolution width. Leave this setting blank to use the native width of the image." } }, - "documentation": "Settings for Insertable Image" + "documentation": "Settings for Insertable Image", + "required": [ + "ImageY", + "ImageX", + "ImageInserterInput", + "Opacity", + "Layer" + ] }, "InternalServerErrorException": { "type": "structure", @@ -4245,7 +4722,7 @@ "documentation": "An identifier for this resource that is unique within all of AWS." }, "CreatedAt": { - "shape": "__timestamp", + "shape": "__timestampIso8601", "locationName": "createdAt", "documentation": "The time, in Unix epoch format in seconds, when the job got created." }, @@ -4270,7 +4747,7 @@ "documentation": "The job template that the job is created from, if it is created from a job template." }, "OutputGroupDetails": { - "shape": "ListOfOutputGroupDetail", + "shape": "__listOfOutputGroupDetail", "locationName": "outputGroupDetails", "documentation": "List of output group details" }, @@ -4297,18 +4774,22 @@ "locationName": "timing" }, "UserMetadata": { - "shape": "MapOf__string", + "shape": "__mapOf__string", "locationName": "userMetadata", "documentation": "User-defined metadata that you want to associate with an MediaConvert job. You specify metadata in key/value pairs." } }, - "documentation": "Each job converts an input file into an output file or files. For more information, see the User Guide at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html" + "documentation": "Each job converts an input file into an output file or files. For more information, see the User Guide at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html", + "required": [ + "Role", + "Settings" + ] }, "JobSettings": { "type": "structure", "members": { "AdAvailOffset": { - "shape": "__integer", + "shape": "__integerMinNegative1000Max1000", "locationName": "adAvailOffset", "documentation": "When specified, this offset (in milliseconds) is added to the input Ad Avail PTS time." }, @@ -4318,7 +4799,7 @@ "documentation": "Settings for ad avail blanking. Video can be blanked or overlaid with an image, and audio muted during SCTE-35 triggered ad avails." }, "Inputs": { - "shape": "ListOfInput", + "shape": "__listOfInput", "locationName": "inputs", "documentation": "Use Inputs (inputs) to define source file used in the transcode job. There can be multiple inputs add in a job. These inputs will be concantenated together to create the output." }, @@ -4327,9 +4808,9 @@ "locationName": "nielsenConfiguration" }, "OutputGroups": { - "shape": "ListOfOutputGroup", + "shape": "__listOfOutputGroup", "locationName": "outputGroups", - "documentation": "**!!**(OutputGroups) contains one group of settings for each set of outputs that share a common package type. All unpackaged files (MPEG-4, MPEG-2 TS, Quicktime, MXF, and no container) are grouped in a single output group as well. Required in (OutputGroups) is a group of settings that apply to the whole group. This required object depends on the value you set for (Type) under (OutputGroups)>(OutputGroupSettings). Type, settings object pairs are as follows. * FILE_GROUP_SETTINGS, FileGroupSettings * HLS_GROUP_SETTINGS, HlsGroupSettings * DASH_ISO_GROUP_SETTINGS, DashIsoGroupSettings * MS_SMOOTH_GROUP_SETTINGS, MsSmoothGroupSettings" + "documentation": "(OutputGroups) contains one group of settings for each set of outputs that share a common package type. All unpackaged files (MPEG-4, MPEG-2 TS, Quicktime, MXF, and no container) are grouped in a single output group as well. Required in (OutputGroups) is a group of settings that apply to the whole group. This required object depends on the value you set for (Type) under (OutputGroups)>(OutputGroupSettings). Type, settings object pairs are as follows. * FILE_GROUP_SETTINGS, FileGroupSettings * HLS_GROUP_SETTINGS, HlsGroupSettings * DASH_ISO_GROUP_SETTINGS, DashIsoGroupSettings * MS_SMOOTH_GROUP_SETTINGS, MsSmoothGroupSettings * CMAF_GROUP_SETTINGS, CmafGroupSettings" }, "TimecodeConfig": { "shape": "TimecodeConfig", @@ -4341,7 +4822,11 @@ "locationName": "timedMetadataInsertion" } }, - "documentation": "JobSettings contains all the transcode settings for a job." + "documentation": "JobSettings contains all the transcode settings for a job.", + "required": [ + "OutputGroups", + "Inputs" + ] }, "JobStatus": { "type": "string", @@ -4368,7 +4853,7 @@ "documentation": "An optional category you create to organize your job templates." }, "CreatedAt": { - "shape": "__timestamp", + "shape": "__timestampIso8601", "locationName": "createdAt", "documentation": "The timestamp in epoch seconds for Job template creation." }, @@ -4378,7 +4863,7 @@ "documentation": "An optional description you create for each job template." }, "LastUpdated": { - "shape": "__timestamp", + "shape": "__timestampIso8601", "locationName": "lastUpdated", "documentation": "The timestamp in epoch seconds when the Job template was last updated." }, @@ -4402,7 +4887,11 @@ "documentation": "A job template can be of two types: system or custom. System or built-in job templates can't be modified or deleted by the user." } }, - "documentation": "A job template is a pre-made set of encoding instructions that you can use to quickly create a job." + "documentation": "A job template is a pre-made set of encoding instructions that you can use to quickly create a job.", + "required": [ + "Settings", + "Name" + ] }, "JobTemplateListBy": { "type": "string", @@ -4417,7 +4906,7 @@ "type": "structure", "members": { "AdAvailOffset": { - "shape": "__integer", + "shape": "__integerMinNegative1000Max1000", "locationName": "adAvailOffset", "documentation": "When specified, this offset (in milliseconds) is added to the input Ad Avail PTS time." }, @@ -4427,7 +4916,7 @@ "documentation": "Settings for ad avail blanking. Video can be blanked or overlaid with an image, and audio muted during SCTE-35 triggered ad avails." }, "Inputs": { - "shape": "ListOfInputTemplate", + "shape": "__listOfInputTemplate", "locationName": "inputs", "documentation": "Use Inputs (inputs) to define the source file used in the transcode job. There can only be one input in a job template. Using the API, you can include multiple inputs when referencing a job template." }, @@ -4436,9 +4925,9 @@ "locationName": "nielsenConfiguration" }, "OutputGroups": { - "shape": "ListOfOutputGroup", + "shape": "__listOfOutputGroup", "locationName": "outputGroups", - "documentation": "**!!**(OutputGroups) contains one group of settings for each set of outputs that share a common package type. All unpackaged files (MPEG-4, MPEG-2 TS, Quicktime, MXF, and no container) are grouped in a single output group as well. Required in (OutputGroups) is a group of settings that apply to the whole group. This required object depends on the value you set for (Type) under (OutputGroups)>(OutputGroupSettings). Type, settings object pairs are as follows. * FILE_GROUP_SETTINGS, FileGroupSettings * HLS_GROUP_SETTINGS, HlsGroupSettings * DASH_ISO_GROUP_SETTINGS, DashIsoGroupSettings * MS_SMOOTH_GROUP_SETTINGS, MsSmoothGroupSettings" + "documentation": "(OutputGroups) contains one group of settings for each set of outputs that share a common package type. All unpackaged files (MPEG-4, MPEG-2 TS, Quicktime, MXF, and no container) are grouped in a single output group as well. Required in (OutputGroups) is a group of settings that apply to the whole group. This required object depends on the value you set for (Type) under (OutputGroups)>(OutputGroupSettings). Type, settings object pairs are as follows. * FILE_GROUP_SETTINGS, FileGroupSettings * HLS_GROUP_SETTINGS, HlsGroupSettings * DASH_ISO_GROUP_SETTINGS, DashIsoGroupSettings * MS_SMOOTH_GROUP_SETTINGS, MsSmoothGroupSettings * CMAF_GROUP_SETTINGS, CmafGroupSettings" }, "TimecodeConfig": { "shape": "TimecodeConfig", @@ -4450,11 +4939,14 @@ "locationName": "timedMetadataInsertion" } }, - "documentation": "JobTemplateSettings contains all the transcode settings saved in the template that will be applied to jobs created from it." + "documentation": "JobTemplateSettings contains all the transcode settings saved in the template that will be applied to jobs created from it.", + "required": [ + "OutputGroups" + ] }, "LanguageCode": { "type": "string", - "documentation": "Code to specify the language, following the specification \"ISO 639-2 three-digit code\":http://www.loc.gov/standards/iso639-2/", + "documentation": "Specify the language, using the ISO 639-2 three-letter code listed at https://www.loc.gov/standards/iso639-2/php/code_list.php.", "enum": [ "ENG", "SPA", @@ -4664,7 +5156,7 @@ "location": "querystring" }, "MaxResults": { - "shape": "__integer", + "shape": "__integerMin1Max20", "locationName": "maxResults", "documentation": "Optional. Number of job templates, up to twenty, that will be returned at one time.", "location": "querystring" @@ -4686,7 +5178,7 @@ "type": "structure", "members": { "JobTemplates": { - "shape": "ListOfJobTemplate", + "shape": "__listOfJobTemplate", "locationName": "jobTemplates", "documentation": "List of Job templates." }, @@ -4701,7 +5193,7 @@ "type": "structure", "members": { "MaxResults": { - "shape": "__integer", + "shape": "__integerMin1Max20", "locationName": "maxResults", "documentation": "Optional. Number of jobs, up to twenty, that will be returned at one time.", "location": "querystring" @@ -4734,7 +5226,7 @@ "type": "structure", "members": { "Jobs": { - "shape": "ListOfJob", + "shape": "__listOfJob", "locationName": "jobs", "documentation": "List of jobs" }, @@ -4745,138 +5237,6 @@ } } }, - "ListOfAudioDescription": { - "type": "list", - "member": { - "shape": "AudioDescription" - } - }, - "ListOfCaptionDescription": { - "type": "list", - "member": { - "shape": "CaptionDescription" - } - }, - "ListOfCaptionDescriptionPreset": { - "type": "list", - "member": { - "shape": "CaptionDescriptionPreset" - } - }, - "ListOfEndpoint": { - "type": "list", - "member": { - "shape": "Endpoint" - } - }, - "ListOfHlsAdMarkers": { - "type": "list", - "member": { - "shape": "HlsAdMarkers" - } - }, - "ListOfHlsCaptionLanguageMapping": { - "type": "list", - "member": { - "shape": "HlsCaptionLanguageMapping" - } - }, - "ListOfId3Insertion": { - "type": "list", - "member": { - "shape": "Id3Insertion" - } - }, - "ListOfInput": { - "type": "list", - "member": { - "shape": "Input" - } - }, - "ListOfInputClipping": { - "type": "list", - "member": { - "shape": "InputClipping" - } - }, - "ListOfInputTemplate": { - "type": "list", - "member": { - "shape": "InputTemplate" - } - }, - "ListOfInsertableImage": { - "type": "list", - "member": { - "shape": "InsertableImage" - } - }, - "ListOfJob": { - "type": "list", - "member": { - "shape": "Job" - } - }, - "ListOfJobTemplate": { - "type": "list", - "member": { - "shape": "JobTemplate" - } - }, - "ListOfOutput": { - "type": "list", - "member": { - "shape": "Output" - } - }, - "ListOfOutputChannelMapping": { - "type": "list", - "member": { - "shape": "OutputChannelMapping" - } - }, - "ListOfOutputDetail": { - "type": "list", - "member": { - "shape": "OutputDetail" - } - }, - "ListOfOutputGroup": { - "type": "list", - "member": { - "shape": "OutputGroup" - } - }, - "ListOfOutputGroupDetail": { - "type": "list", - "member": { - "shape": "OutputGroupDetail" - } - }, - "ListOfPreset": { - "type": "list", - "member": { - "shape": "Preset" - } - }, - "ListOfQueue": { - "type": "list", - "member": { - "shape": "Queue" - } - }, - "ListOf__integer": { - "type": "list", - "member": { - "shape": "__integer" - } - }, - "ListOf__string": { - "type": "list", - "member": { - "shape": "__string" - } - }, "ListPresetsRequest": { "type": "structure", "members": { @@ -4892,7 +5252,7 @@ "location": "querystring" }, "MaxResults": { - "shape": "__integer", + "shape": "__integerMin1Max20", "locationName": "maxResults", "documentation": "Optional. Number of presets, up to twenty, that will be returned at one time", "location": "querystring" @@ -4919,7 +5279,7 @@ "documentation": "Use this string to request the next batch of presets." }, "Presets": { - "shape": "ListOfPreset", + "shape": "__listOfPreset", "locationName": "presets", "documentation": "List of presets" } @@ -4934,7 +5294,7 @@ "location": "querystring" }, "MaxResults": { - "shape": "__integer", + "shape": "__integerMin1Max20", "locationName": "maxResults", "documentation": "Optional. Number of queues, up to twenty, that will be returned at one time.", "location": "querystring" @@ -4961,12 +5321,35 @@ "documentation": "Use this string to request the next batch of queues." }, "Queues": { - "shape": "ListOfQueue", + "shape": "__listOfQueue", "locationName": "queues", "documentation": "List of queues" } } }, + "ListTagsForResourceRequest": { + "type": "structure", + "members": { + "Arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "The Amazon Resource Name (ARN) of the resource that you want to list tags for. To get the ARN, send a GET request with the resource name.", + "location": "uri" + } + }, + "required": [ + "Arn" + ] + }, + "ListTagsForResourceResponse": { + "type": "structure", + "members": { + "ResourceTags": { + "shape": "ResourceTags", + "locationName": "resourceTags" + } + } + }, "M2tsAudioBufferModel": { "type": "string", "documentation": "Selects between the DVB and ATSC buffer models for Dolby Digital audio.", @@ -5067,17 +5450,17 @@ "locationName": "audioBufferModel" }, "AudioFramesPerPes": { - "shape": "__integer", + "shape": "__integerMin0Max2147483647", "locationName": "audioFramesPerPes", "documentation": "The number of audio frames to insert for each PES packet." }, "AudioPids": { - "shape": "ListOf__integer", + "shape": "__listOf__integerMin32Max8182", "locationName": "audioPids", "documentation": "Packet Identifier (PID) of the elementary audio stream(s) in the transport stream. Multiple values are accepted, and can be entered in ranges and/or by comma separation." }, "Bitrate": { - "shape": "__integer", + "shape": "__integerMin0Max2147483647", "locationName": "bitrate", "documentation": "The output bitrate of the transport stream in bits per second. Setting to 0 lets the muxer automatically determine the appropriate bitrate. Other common values are 3750000, 7500000, and 15000000." }, @@ -5094,7 +5477,7 @@ "locationName": "dvbSdtSettings" }, "DvbSubPids": { - "shape": "ListOf__integer", + "shape": "__listOf__integerMin32Max8182", "locationName": "dvbSubPids", "documentation": "Packet Identifier (PID) for input source DVB Subtitle data to this output. Multiple values are accepted, and can be entered in ranges and/or by comma separation." }, @@ -5103,7 +5486,7 @@ "locationName": "dvbTdtSettings" }, "DvbTeletextPid": { - "shape": "__integer", + "shape": "__integerMin32Max8182", "locationName": "dvbTeletextPid", "documentation": "Packet Identifier (PID) for input source DVB Teletext data to this output." }, @@ -5120,17 +5503,17 @@ "locationName": "esRateInPes" }, "FragmentTime": { - "shape": "__double", + "shape": "__doubleMin0", "locationName": "fragmentTime", "documentation": "The length in seconds of each fragment. Only used with EBP markers." }, "MaxPcrInterval": { - "shape": "__integer", + "shape": "__integerMin0Max500", "locationName": "maxPcrInterval", "documentation": "Maximum time in milliseconds between Program Clock References (PCRs) inserted into the transport stream." }, "MinEbpInterval": { - "shape": "__integer", + "shape": "__integerMin0Max10000", "locationName": "minEbpInterval", "documentation": "When set, enforces that Encoder Boundary Points do not come within the specified time interval of each other by looking ahead at input video. If another EBP is going to come in within the specified time interval, the current EBP is not emitted, and the segment is \"stretched\" to the next marker. The lookahead value does not add latency to the system. The Live Event must be configured elsewhere to create sufficient latency to make the lookahead accurate." }, @@ -5139,12 +5522,12 @@ "locationName": "nielsenId3" }, "NullPacketBitrate": { - "shape": "__double", + "shape": "__doubleMin0", "locationName": "nullPacketBitrate", "documentation": "Value in bits per second of extra null packets to insert into the transport stream. This can be used if a downstream encryption system requires periodic null packets." }, "PatInterval": { - "shape": "__integer", + "shape": "__integerMin0Max1000", "locationName": "patInterval", "documentation": "The number of milliseconds between instances of this table in the output transport stream." }, @@ -5153,27 +5536,27 @@ "locationName": "pcrControl" }, "PcrPid": { - "shape": "__integer", + "shape": "__integerMin32Max8182", "locationName": "pcrPid", "documentation": "Packet Identifier (PID) of the Program Clock Reference (PCR) in the transport stream. When no value is given, the encoder will assign the same value as the Video PID." }, "PmtInterval": { - "shape": "__integer", + "shape": "__integerMin0Max1000", "locationName": "pmtInterval", "documentation": "The number of milliseconds between instances of this table in the output transport stream." }, "PmtPid": { - "shape": "__integer", + "shape": "__integerMin32Max8182", "locationName": "pmtPid", "documentation": "Packet Identifier (PID) for the Program Map Table (PMT) in the transport stream." }, "PrivateMetadataPid": { - "shape": "__integer", + "shape": "__integerMin32Max8182", "locationName": "privateMetadataPid", "documentation": "Packet Identifier (PID) of the private metadata stream in the transport stream." }, "ProgramNumber": { - "shape": "__integer", + "shape": "__integerMin0Max65535", "locationName": "programNumber", "documentation": "The value of the program number field in the Program Map Table." }, @@ -5182,7 +5565,7 @@ "locationName": "rateMode" }, "Scte35Pid": { - "shape": "__integer", + "shape": "__integerMin32Max8182", "locationName": "scte35Pid", "documentation": "Packet Identifier (PID) of the SCTE-35 stream in the transport stream." }, @@ -5199,22 +5582,22 @@ "locationName": "segmentationStyle" }, "SegmentationTime": { - "shape": "__double", + "shape": "__doubleMin0", "locationName": "segmentationTime", "documentation": "The length in seconds of each segment. Required unless markers is set to _none_." }, "TimedMetadataPid": { - "shape": "__integer", + "shape": "__integerMin32Max8182", "locationName": "timedMetadataPid", "documentation": "Packet Identifier (PID) of the timed metadata stream in the transport stream." }, "TransportStreamId": { - "shape": "__integer", + "shape": "__integerMin0Max65535", "locationName": "transportStreamId", "documentation": "The value of the transport stream ID field in the Program Map Table." }, "VideoPid": { - "shape": "__integer", + "shape": "__integerMin32Max8182", "locationName": "videoPid", "documentation": "Packet Identifier (PID) of the elementary video stream in the transport stream." } @@ -5249,12 +5632,12 @@ "type": "structure", "members": { "AudioFramesPerPes": { - "shape": "__integer", + "shape": "__integerMin0Max2147483647", "locationName": "audioFramesPerPes", "documentation": "The number of audio frames to insert for each PES packet." }, "AudioPids": { - "shape": "ListOf__integer", + "shape": "__listOf__integerMin32Max8182", "locationName": "audioPids", "documentation": "Packet Identifier (PID) of the elementary audio stream(s) in the transport stream. Multiple values are accepted, and can be entered in ranges and/or by comma separation." }, @@ -5263,7 +5646,7 @@ "locationName": "nielsenId3" }, "PatInterval": { - "shape": "__integer", + "shape": "__integerMin0Max1000", "locationName": "patInterval", "documentation": "The number of milliseconds between instances of this table in the output transport stream." }, @@ -5272,32 +5655,32 @@ "locationName": "pcrControl" }, "PcrPid": { - "shape": "__integer", + "shape": "__integerMin32Max8182", "locationName": "pcrPid", "documentation": "Packet Identifier (PID) of the Program Clock Reference (PCR) in the transport stream. When no value is given, the encoder will assign the same value as the Video PID." }, "PmtInterval": { - "shape": "__integer", + "shape": "__integerMin0Max1000", "locationName": "pmtInterval", "documentation": "The number of milliseconds between instances of this table in the output transport stream." }, "PmtPid": { - "shape": "__integer", + "shape": "__integerMin32Max8182", "locationName": "pmtPid", "documentation": "Packet Identifier (PID) for the Program Map Table (PMT) in the transport stream." }, "PrivateMetadataPid": { - "shape": "__integer", + "shape": "__integerMin32Max8182", "locationName": "privateMetadataPid", "documentation": "Packet Identifier (PID) of the private metadata stream in the transport stream." }, "ProgramNumber": { - "shape": "__integer", + "shape": "__integerMin0Max65535", "locationName": "programNumber", "documentation": "The value of the program number field in the Program Map Table." }, "Scte35Pid": { - "shape": "__integer", + "shape": "__integerMin32Max8182", "locationName": "scte35Pid", "documentation": "Packet Identifier (PID) of the SCTE-35 stream in the transport stream." }, @@ -5310,59 +5693,23 @@ "locationName": "timedMetadata" }, "TimedMetadataPid": { - "shape": "__integer", + "shape": "__integerMin32Max8182", "locationName": "timedMetadataPid", "documentation": "Packet Identifier (PID) of the timed metadata stream in the transport stream." }, "TransportStreamId": { - "shape": "__integer", + "shape": "__integerMin0Max65535", "locationName": "transportStreamId", "documentation": "The value of the transport stream ID field in the Program Map Table." }, "VideoPid": { - "shape": "__integer", + "shape": "__integerMin32Max8182", "locationName": "videoPid", "documentation": "Packet Identifier (PID) of the elementary video stream in the transport stream." } }, "documentation": "Settings for TS segments in HLS" }, - "MapOfAudioSelector": { - "type": "map", - "key": { - "shape": "__string" - }, - "value": { - "shape": "AudioSelector" - } - }, - "MapOfAudioSelectorGroup": { - "type": "map", - "key": { - "shape": "__string" - }, - "value": { - "shape": "AudioSelectorGroup" - } - }, - "MapOfCaptionSelector": { - "type": "map", - "key": { - "shape": "__string" - }, - "value": { - "shape": "CaptionSelector" - } - }, - "MapOf__string": { - "type": "map", - "key": { - "shape": "__string" - }, - "value": { - "shape": "__string" - } - }, "MovClapAtom": { "type": "string", "documentation": "When enabled, include 'clap' atom if appropriate for the video output settings.", @@ -5433,17 +5780,17 @@ "type": "structure", "members": { "Bitrate": { - "shape": "__integer", + "shape": "__integerMin32000Max384000", "locationName": "bitrate", "documentation": "Average bitrate in bits/second." }, "Channels": { - "shape": "__integer", + "shape": "__integerMin1Max2", "locationName": "channels", "documentation": "Set Channels to specify the number of channels in this output audio track. Choosing Mono in the console will give you 1 output channel; choosing Stereo will give you 2. In the API, valid values are 1 and 2." }, "SampleRate": { - "shape": "__integer", + "shape": "__integerMin32000Max48000", "locationName": "sampleRate", "documentation": "Sample rate in hz." } @@ -5528,7 +5875,7 @@ }, "Mpeg2FramerateControl": { "type": "string", - "documentation": "Using the API, set FramerateControl to INITIALIZE_FROM_SOURCE if you want the service to use the framerate from the input. Using the console, do this by choosing INITIALIZE_FROM_SOURCE for Framerate.", + "documentation": "If you are using the console, use the Framerate setting to specify the framerate for this output. If you want to keep the same framerate as the input video, choose Follow source. If you want to do framerate conversion, choose a framerate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your framerate as a fraction. If you are creating your transcoding job sepecification as a JSON file without the console, use FramerateControl to specify which value the service uses for the framerate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the framerate from the input. Choose SPECIFIED if you want the service to use the framerate you specify in the settings FramerateNumerator and FramerateDenominator.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -5552,7 +5899,7 @@ }, "Mpeg2InterlaceMode": { "type": "string", - "documentation": "Use Interlace mode (InterlaceMode) to choose the scan line type for the output. * Top Field First (TOP_FIELD) and Bottom Field First (BOTTOM_FIELD) produce interlaced output with the entire output having the same field polarity (top or bottom first). * Follow, Default Top (FOLLOw_TOP_FIELD) and Follow, Default Bottom (FOLLOW_BOTTOM_FIELD) use the same field polarity as the source. Therefore, behavior depends on the input scan type. - If the source is interlaced, the output will be interlaced with the same polarity as the source (it will follow the source). The output could therefore be a mix of \"top field first\" and \"bottom field first\". - If the source is progressive, the output will be interlaced with \"top field first\" or \"bottom field first\" polarity, depending on which of the Follow options you chose.", + "documentation": "Use Interlace mode (InterlaceMode) to choose the scan line type for the output. * Top Field First (TOP_FIELD) and Bottom Field First (BOTTOM_FIELD) produce interlaced output with the entire output having the same field polarity (top or bottom first). * Follow, Default Top (FOLLOW_TOP_FIELD) and Follow, Default Bottom (FOLLOW_BOTTOM_FIELD) use the same field polarity as the source. Therefore, behavior depends on the input scan type.\n - If the source is interlaced, the output will be interlaced with the same polarity as the source (it will follow the source). The output could therefore be a mix of \"top field first\" and \"bottom field first\".\n - If the source is progressive, the output will be interlaced with \"top field first\" or \"bottom field first\" polarity, depending on which of the Follow options you chose.", "enum": [ "PROGRESSIVE", "TOP_FIELD", @@ -5612,9 +5959,9 @@ "locationName": "adaptiveQuantization" }, "Bitrate": { - "shape": "__integer", + "shape": "__integerMin1000Max288000000", "locationName": "bitrate", - "documentation": "Average bitrate in bits/second. Required for VBR, CBR, and ABR. Five megabits can be entered as 5000000 or 5m. Five hundred kilobits can be entered as 500000 or 0.5m. For MS Smooth outputs, bitrates must be unique when rounded down to the nearest multiple of 1000." + "documentation": "Average bitrate in bits/second. Required for VBR and CBR. For MS Smooth outputs, bitrates must be unique when rounded down to the nearest multiple of 1000." }, "CodecLevel": { "shape": "Mpeg2CodecLevel", @@ -5633,22 +5980,22 @@ "locationName": "framerateConversionAlgorithm" }, "FramerateDenominator": { - "shape": "__integer", + "shape": "__integerMin1Max1001", "locationName": "framerateDenominator", "documentation": "Framerate denominator." }, "FramerateNumerator": { - "shape": "__integer", + "shape": "__integerMin24Max60000", "locationName": "framerateNumerator", "documentation": "Framerate numerator - framerate is a fraction, e.g. 24000 / 1001 = 23.976 fps." }, "GopClosedCadence": { - "shape": "__integer", + "shape": "__integerMin0Max2147483647", "locationName": "gopClosedCadence", "documentation": "Frequency of closed GOPs. In streaming applications, it is recommended that this be set to 1 so a decoder joining mid-stream will receive an IDR frame as quickly as possible. Setting this value to 0 will break output segmenting." }, "GopSize": { - "shape": "__double", + "shape": "__doubleMin0", "locationName": "gopSize", "documentation": "GOP Length (keyframe interval) in frames or seconds. Must be greater than zero." }, @@ -5657,14 +6004,14 @@ "locationName": "gopSizeUnits" }, "HrdBufferInitialFillPercentage": { - "shape": "__integer", + "shape": "__integerMin0Max100", "locationName": "hrdBufferInitialFillPercentage", "documentation": "Percentage of the buffer that should initially be filled (HRD buffer model)." }, "HrdBufferSize": { - "shape": "__integer", + "shape": "__integerMin0Max47185920", "locationName": "hrdBufferSize", - "documentation": "Size of buffer (HRD buffer model). Five megabits can be entered as 5000000 or 5m. Five hundred kilobits can be entered as 500000 or 0.5m." + "documentation": "Size of buffer (HRD buffer model) in bits. For example, enter five megabits as 5000000." }, "InterlaceMode": { "shape": "Mpeg2InterlaceMode", @@ -5675,17 +6022,17 @@ "locationName": "intraDcPrecision" }, "MaxBitrate": { - "shape": "__integer", + "shape": "__integerMin1000Max300000000", "locationName": "maxBitrate", - "documentation": "Maximum bitrate in bits/second (for VBR mode only). Five megabits can be entered as 5000000 or 5m. Five hundred kilobits can be entered as 500000 or 0.5m." + "documentation": "Maximum bitrate in bits/second. For example, enter five megabits per second as 5000000." }, "MinIInterval": { - "shape": "__integer", + "shape": "__integerMin0Max30", "locationName": "minIInterval", "documentation": "Enforces separation between repeated (cadence) I-frames and I-frames inserted by Scene Change Detection. If a scene change I-frame is within I-interval frames of a cadence I-frame, the GOP is shrunk and/or stretched to the scene change I-frame. GOP stretch requires enabling lookahead as well as setting I-interval. The normal cadence resumes for the next GOP. This setting is only used when Scene Change Detect is enabled. Note: Maximum GOP stretch = GOP size + Min-I-interval - 1" }, "NumberBFramesBetweenReferenceFrames": { - "shape": "__integer", + "shape": "__integerMin0Max7", "locationName": "numberBFramesBetweenReferenceFrames", "documentation": "Number of B-frames between reference frames." }, @@ -5694,12 +6041,12 @@ "locationName": "parControl" }, "ParDenominator": { - "shape": "__integer", + "shape": "__integerMin1Max2147483647", "locationName": "parDenominator", "documentation": "Pixel Aspect Ratio denominator." }, "ParNumerator": { - "shape": "__integer", + "shape": "__integerMin1Max2147483647", "locationName": "parNumerator", "documentation": "Pixel Aspect Ratio numerator." }, @@ -5720,7 +6067,7 @@ "locationName": "slowPal" }, "Softness": { - "shape": "__integer", + "shape": "__integerMin0Max128", "locationName": "softness", "documentation": "Softness. Selects quantizer matrix, larger values reduce high-frequency content in the encoded image." }, @@ -5800,7 +6147,10 @@ "locationName": "spekeKeyProvider" } }, - "documentation": "If you are using DRM, set DRM System (MsSmoothEncryptionSettings) to specify the value SpekeKeyProvider." + "documentation": "If you are using DRM, set DRM System (MsSmoothEncryptionSettings) to specify the value SpekeKeyProvider.", + "required": [ + "SpekeKeyProvider" + ] }, "MsSmoothGroupSettings": { "type": "structure", @@ -5810,7 +6160,7 @@ "locationName": "audioDeduplication" }, "Destination": { - "shape": "__string", + "shape": "__stringPatternS3", "locationName": "destination", "documentation": "Use Destination (Destination) to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file." }, @@ -5819,7 +6169,7 @@ "locationName": "encryption" }, "FragmentLength": { - "shape": "__integer", + "shape": "__integerMin1Max2147483647", "locationName": "fragmentLength", "documentation": "Use Fragment length (FragmentLength) to specify the mp4 fragment sizes in seconds. Fragment length must be compatible with GOP size and framerate." }, @@ -5828,7 +6178,10 @@ "locationName": "manifestEncoding" } }, - "documentation": "Required when you set (Type) under (OutputGroups)>(OutputGroupSettings) to MS_SMOOTH_GROUP_SETTINGS." + "documentation": "Required when you set (Type) under (OutputGroups)>(OutputGroupSettings) to MS_SMOOTH_GROUP_SETTINGS.", + "required": [ + "FragmentLength" + ] }, "MsSmoothManifestEncoding": { "type": "string", @@ -5842,7 +6195,7 @@ "type": "structure", "members": { "BreakoutCode": { - "shape": "__integer", + "shape": "__integerMin0Max9", "locationName": "breakoutCode", "documentation": "Use Nielsen Configuration (NielsenConfiguration) to set the Nielsen measurement system breakout code. Supported values are 0, 3, 7, and 9." }, @@ -5870,11 +6223,14 @@ "locationName": "spatialFilterSettings" } }, - "documentation": "Enable the Noise reducer (NoiseReducer) feature to remove noise from your video output if necessary. Enable or disable this feature for each output individually. This setting is disabled by default. When you enable Noise reducer (NoiseReducer), you must also select a value for Noise reducer filter (NoiseReducerFilter)." + "documentation": "Enable the Noise reducer (NoiseReducer) feature to remove noise from your video output if necessary. Enable or disable this feature for each output individually. This setting is disabled by default. When you enable Noise reducer (NoiseReducer), you must also select a value for Noise reducer filter (NoiseReducerFilter).", + "required": [ + "Filter" + ] }, "NoiseReducerFilter": { "type": "string", - "documentation": "Use Noise reducer filter (NoiseReducerFilter) to select one of the following spatial image filtering functions. To use this setting, you must also enable Noise reducer (NoiseReducer). * Bilateral is an edge preserving noise reduction filter * Mean (softest), Gaussian, Lanczos, and Sharpen (sharpest) are convolution filters * Conserve is a min/max noise reduction filter * Spatial is frequency-domain filter based on JND principles.", + "documentation": "Use Noise reducer filter (NoiseReducerFilter) to select one of the following spatial image filtering functions. To use this setting, you must also enable Noise reducer (NoiseReducer). * Bilateral is an edge preserving noise reduction filter. * Mean (softest), Gaussian, Lanczos, and Sharpen (sharpest) are convolution filters. * Conserve is a min/max noise reduction filter. * Spatial is a frequency-domain filter based on JND principles.", "enum": [ "BILATERAL", "MEAN", @@ -5889,7 +6245,7 @@ "type": "structure", "members": { "Strength": { - "shape": "__integer", + "shape": "__integerMin0Max3", "locationName": "strength", "documentation": "Relative strength of noise reducing filter. Higher values produce stronger filtering." } @@ -5900,17 +6256,17 @@ "type": "structure", "members": { "PostFilterSharpenStrength": { - "shape": "__integer", + "shape": "__integerMin0Max3", "locationName": "postFilterSharpenStrength", "documentation": "Specify strength of post noise reduction sharpening filter, with 0 disabling the filter and 3 enabling it at maximum strength." }, "Speed": { - "shape": "__integer", + "shape": "__integerMinNegative2Max3", "locationName": "speed", "documentation": "The speed of the filter, from -2 (lower speed) to 3 (higher speed), with 0 being the nominal value." }, "Strength": { - "shape": "__integer", + "shape": "__integerMin0Max16", "locationName": "strength", "documentation": "Relative strength of noise reducing filter. Higher values produce stronger filtering." } @@ -5943,12 +6299,12 @@ "type": "structure", "members": { "AudioDescriptions": { - "shape": "ListOfAudioDescription", + "shape": "__listOfAudioDescription", "locationName": "audioDescriptions", "documentation": "(AudioDescriptions) contains groups of audio encoding settings organized by audio codec. Include one instance of (AudioDescriptions) per output. (AudioDescriptions) can contain multiple groups of encoding settings." }, "CaptionDescriptions": { - "shape": "ListOfCaptionDescription", + "shape": "__listOfCaptionDescription", "locationName": "captionDescriptions", "documentation": "(CaptionDescriptions) contains groups of captions settings. For each output that has captions, include one instance of (CaptionDescriptions). (CaptionDescriptions) can contain multiple groups of captions settings." }, @@ -5962,7 +6318,7 @@ "documentation": "Use Extension (Extension) to specify the file extension for outputs in File output groups. If you do not specify a value, the service will use default extensions by container type as follows * MPEG-2 transport stream, m2ts * Quicktime, mov * MXF container, mxf * MPEG-4 container, mp4 * No Container, the service will use codec extensions (e.g. AAC, H265, H265, AC3)" }, "NameModifier": { - "shape": "__string", + "shape": "__stringMin1", "locationName": "nameModifier", "documentation": "Use Name modifier (NameModifier) to have the service add a string to the end of each output filename. You specify the base filename as part of your destination URI. When you create multiple outputs in the same output group, Name modifier (NameModifier) is required. Name modifier also accepts format identifiers. For DASH ISO outputs, if you use the format identifiers $Number$ or $Time$ in one output, you must use them in the same way in all outputs of the output group." }, @@ -5971,7 +6327,7 @@ "locationName": "outputSettings" }, "Preset": { - "shape": "__string", + "shape": "__stringMin0", "locationName": "preset", "documentation": "Use Preset (Preset) to specifiy a preset for your transcoding settings. Provide the system or custom preset name. You can specify either Preset (Preset) or Container settings (ContainerSettings), but not both." }, @@ -5987,12 +6343,15 @@ "type": "structure", "members": { "InputChannels": { - "shape": "ListOf__integer", + "shape": "__listOf__integerMinNegative60Max6", "locationName": "inputChannels", "documentation": "List of input channels" } }, - "documentation": "OutputChannel mapping settings." + "documentation": "OutputChannel mapping settings.", + "required": [ + "InputChannels" + ] }, "OutputDetail": { "type": "structure", @@ -6027,18 +6386,22 @@ "locationName": "outputGroupSettings" }, "Outputs": { - "shape": "ListOfOutput", + "shape": "__listOfOutput", "locationName": "outputs", "documentation": "This object holds groups of encoding settings, one group of settings per output." } }, - "documentation": "Group of outputs" + "documentation": "Group of outputs", + "required": [ + "Outputs", + "OutputGroupSettings" + ] }, "OutputGroupDetail": { "type": "structure", "members": { "OutputDetails": { - "shape": "ListOfOutputDetail", + "shape": "__listOfOutputDetail", "locationName": "outputDetails", "documentation": "Details about the output" } @@ -6048,6 +6411,10 @@ "OutputGroupSettings": { "type": "structure", "members": { + "CmafGroupSettings": { + "shape": "CmafGroupSettings", + "locationName": "cmafGroupSettings" + }, "DashIsoGroupSettings": { "shape": "DashIsoGroupSettings", "locationName": "dashIsoGroupSettings" @@ -6069,16 +6436,20 @@ "locationName": "type" } }, - "documentation": "Output Group settings, including type" + "documentation": "Output Group settings, including type", + "required": [ + "Type" + ] }, "OutputGroupType": { "type": "string", - "documentation": "Type of output group (File group, Apple HLS, DASH ISO, Microsoft Smooth Streaming)", + "documentation": "Type of output group (File group, Apple HLS, DASH ISO, Microsoft Smooth Streaming, CMAF)", "enum": [ "HLS_GROUP_SETTINGS", "DASH_ISO_GROUP_SETTINGS", "FILE_GROUP_SETTINGS", - "MS_SMOOTH_GROUP_SETTINGS" + "MS_SMOOTH_GROUP_SETTINGS", + "CMAF_GROUP_SETTINGS" ] }, "OutputSdt": { @@ -6115,7 +6486,7 @@ "documentation": "An optional category you create to organize your presets." }, "CreatedAt": { - "shape": "__timestamp", + "shape": "__timestampIso8601", "locationName": "createdAt", "documentation": "The timestamp in epoch seconds for preset creation." }, @@ -6125,7 +6496,7 @@ "documentation": "An optional description you create for each preset." }, "LastUpdated": { - "shape": "__timestamp", + "shape": "__timestampIso8601", "locationName": "lastUpdated", "documentation": "The timestamp in epoch seconds when the preset was last updated." }, @@ -6144,7 +6515,11 @@ "documentation": "A preset can be of two types: system or custom. System or built-in preset can't be modified or deleted by the user." } }, - "documentation": "A preset is a collection of preconfigured media conversion settings that you want MediaConvert to apply to the output during the conversion process." + "documentation": "A preset is a collection of preconfigured media conversion settings that you want MediaConvert to apply to the output during the conversion process.", + "required": [ + "Settings", + "Name" + ] }, "PresetListBy": { "type": "string", @@ -6159,12 +6534,12 @@ "type": "structure", "members": { "AudioDescriptions": { - "shape": "ListOfAudioDescription", + "shape": "__listOfAudioDescription", "locationName": "audioDescriptions", "documentation": "(AudioDescriptions) contains groups of audio encoding settings organized by audio codec. Include one instance of (AudioDescriptions) per output. (AudioDescriptions) can contain multiple groups of encoding settings." }, "CaptionDescriptions": { - "shape": "ListOfCaptionDescriptionPreset", + "shape": "__listOfCaptionDescriptionPreset", "locationName": "captionDescriptions", "documentation": "Caption settings for this preset. There can be multiple caption settings in a single output." }, @@ -6192,7 +6567,7 @@ }, "ProresFramerateControl": { "type": "string", - "documentation": "Using the API, set FramerateControl to INITIALIZE_FROM_SOURCE if you want the service to use the framerate from the input. Using the console, do this by choosing INITIALIZE_FROM_SOURCE for Framerate.", + "documentation": "If you are using the console, use the Framerate setting to specify the framerate for this output. If you want to keep the same framerate as the input video, choose Follow source. If you want to do framerate conversion, choose a framerate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your framerate as a fraction. If you are creating your transcoding job sepecification as a JSON file without the console, use FramerateControl to specify which value the service uses for the framerate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the framerate from the input. Choose SPECIFIED if you want the service to use the framerate you specify in the settings FramerateNumerator and FramerateDenominator.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -6208,7 +6583,7 @@ }, "ProresInterlaceMode": { "type": "string", - "documentation": "Use Interlace mode (InterlaceMode) to choose the scan line type for the output. * Top Field First (TOP_FIELD) and Bottom Field First (BOTTOM_FIELD) produce interlaced output with the entire output having the same field polarity (top or bottom first). * Follow, Default Top (FOLLOw_TOP_FIELD) and Follow, Default Bottom (FOLLOW_BOTTOM_FIELD) use the same field polarity as the source. Therefore, behavior depends on the input scan type. - If the source is interlaced, the output will be interlaced with the same polarity as the source (it will follow the source). The output could therefore be a mix of \"top field first\" and \"bottom field first\". - If the source is progressive, the output will be interlaced with \"top field first\" or \"bottom field first\" polarity, depending on which of the Follow options you chose.", + "documentation": "Use Interlace mode (InterlaceMode) to choose the scan line type for the output. * Top Field First (TOP_FIELD) and Bottom Field First (BOTTOM_FIELD) produce interlaced output with the entire output having the same field polarity (top or bottom first). * Follow, Default Top (FOLLOW_TOP_FIELD) and Follow, Default Bottom (FOLLOW_BOTTOM_FIELD) use the same field polarity as the source. Therefore, behavior depends on the input scan type.\n - If the source is interlaced, the output will be interlaced with the same polarity as the source (it will follow the source). The output could therefore be a mix of \"top field first\" and \"bottom field first\".\n - If the source is progressive, the output will be interlaced with \"top field first\" or \"bottom field first\" polarity, depending on which of the Follow options you chose.", "enum": [ "PROGRESSIVE", "TOP_FIELD", @@ -6241,12 +6616,12 @@ "locationName": "framerateConversionAlgorithm" }, "FramerateDenominator": { - "shape": "__integer", + "shape": "__integerMin1Max2147483647", "locationName": "framerateDenominator", "documentation": "Framerate denominator." }, "FramerateNumerator": { - "shape": "__integer", + "shape": "__integerMin1Max2147483647", "locationName": "framerateNumerator", "documentation": "When you use the API for transcode jobs that use framerate conversion, specify the framerate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use FramerateNumerator to specify the numerator of this fraction. In this example, use 24000 for the value of FramerateNumerator." }, @@ -6259,12 +6634,12 @@ "locationName": "parControl" }, "ParDenominator": { - "shape": "__integer", + "shape": "__integerMin1Max2147483647", "locationName": "parDenominator", "documentation": "Pixel Aspect Ratio denominator." }, "ParNumerator": { - "shape": "__integer", + "shape": "__integerMin1Max2147483647", "locationName": "parNumerator", "documentation": "Pixel Aspect Ratio numerator." }, @@ -6304,7 +6679,7 @@ "documentation": "An identifier for this resource that is unique within all of AWS." }, "CreatedAt": { - "shape": "__timestamp", + "shape": "__timestampIso8601", "locationName": "createdAt", "documentation": "The timestamp in epoch seconds for queue creation." }, @@ -6314,7 +6689,7 @@ "documentation": "An optional description you create for each queue." }, "LastUpdated": { - "shape": "__timestamp", + "shape": "__timestampIso8601", "locationName": "lastUpdated", "documentation": "The timestamp in epoch seconds when the queue was last updated." }, @@ -6323,17 +6698,30 @@ "locationName": "name", "documentation": "A name you create for each queue. Each name must be unique within your account." }, + "ProgressingJobsCount": { + "shape": "__integer", + "locationName": "progressingJobsCount", + "documentation": "Estimated number of jobs in PROGRESSING status." + }, "Status": { "shape": "QueueStatus", "locationName": "status" }, + "SubmittedJobsCount": { + "shape": "__integer", + "locationName": "submittedJobsCount", + "documentation": "Estimated number of jobs in SUBMITTED status." + }, "Type": { "shape": "Type", "locationName": "type", "documentation": "A queue can be of two types: system or custom. System or built-in queues can't be modified or deleted by the user." } }, - "documentation": "MediaConvert jobs are submitted to a queue. Unless specified otherwise jobs are submitted to a built-in default queue. User can create additional queues to separate the jobs of different categories or priority." + "documentation": "MediaConvert jobs are submitted to a queue. Unless specified otherwise jobs are submitted to a built-in default queue. User can create additional queues to separate the jobs of different categories or priority.", + "required": [ + "Name" + ] }, "QueueListBy": { "type": "string", @@ -6355,27 +6743,33 @@ "type": "structure", "members": { "Height": { - "shape": "__integer", + "shape": "__integerMinNegative2147483648Max2147483647", "locationName": "height", "documentation": "Height of rectangle in pixels." }, "Width": { - "shape": "__integer", + "shape": "__integerMinNegative2147483648Max2147483647", "locationName": "width", "documentation": "Width of rectangle in pixels." }, "X": { - "shape": "__integer", + "shape": "__integerMinNegative2147483648Max2147483647", "locationName": "x", "documentation": "The distance, in pixels, between the rectangle and the left edge of the video frame." }, "Y": { - "shape": "__integer", + "shape": "__integerMinNegative2147483648Max2147483647", "locationName": "y", "documentation": "The distance, in pixels, between the rectangle and the top edge of the video frame." } }, - "documentation": "Use Rectangle to identify a specific area of the video frame." + "documentation": "Use Rectangle to identify a specific area of the video frame.", + "required": [ + "X", + "Y", + "Height", + "Width" + ] }, "RemixSettings": { "type": "structure", @@ -6385,17 +6779,38 @@ "locationName": "channelMapping" }, "ChannelsIn": { - "shape": "__integer", + "shape": "__integerMin1Max16", "locationName": "channelsIn", "documentation": "Specify the number of audio channels from your input that you want to use in your output. With remixing, you might combine or split the data in these channels, so the number of channels in your final output might be different." }, "ChannelsOut": { - "shape": "__integer", + "shape": "__integerMin1Max8", "locationName": "channelsOut", "documentation": "Specify the number of channels in this output after remixing. Valid values: 1, 2, 4, 6, 8" } }, - "documentation": "Use Manual audio remixing (RemixSettings) to adjust audio levels for each output channel. With audio remixing, you can output more or fewer audio channels than your input audio source provides." + "documentation": "Use Manual audio remixing (RemixSettings) to adjust audio levels for each audio channel in each output of your job. With audio remixing, you can output more or fewer audio channels than your input audio source provides.", + "required": [ + "ChannelsOut", + "ChannelMapping", + "ChannelsIn" + ] + }, + "ResourceTags": { + "type": "structure", + "members": { + "Arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "The Amazon Resource Name (ARN) of the resource." + }, + "Tags": { + "shape": "__mapOf__string", + "locationName": "tags", + "documentation": "The tags for the resource." + } + }, + "documentation": "The Amazon Resource Name (ARN) and tags for an AWS Elemental MediaConvert resource." }, "RespondToAfd": { "type": "string", @@ -6443,33 +6858,38 @@ "documentation": "The SPEKE-compliant server uses Resource ID (ResourceId) to identify content." }, "SystemIds": { - "shape": "ListOf__string", + "shape": "__listOf__stringPattern09aFAF809aFAF409aFAF409aFAF409aFAF12", "locationName": "systemIds", "documentation": "Relates to SPEKE implementation. DRM system identifiers. DASH output groups support a max of two system ids. Other group types support one system id." }, "Url": { - "shape": "__string", + "shape": "__stringPatternHttps", "locationName": "url", "documentation": "Use URL (Url) to specify the SPEKE-compliant server that will provide keys for content." } }, - "documentation": "Settings for use with a SPEKE key provider" + "documentation": "Settings for use with a SPEKE key provider", + "required": [ + "ResourceId", + "SystemIds", + "Url" + ] }, "StaticKeyProvider": { "type": "structure", "members": { "KeyFormat": { - "shape": "__string", + "shape": "__stringPatternIdentityAZaZ26AZaZ09163", "locationName": "keyFormat", "documentation": "Relates to DRM implementation. Sets the value of the KEYFORMAT attribute. Must be 'identity' or a reverse DNS string. May be omitted to indicate an implicit value of 'identity'." }, "KeyFormatVersions": { - "shape": "__string", + "shape": "__stringPatternDD", "locationName": "keyFormatVersions", "documentation": "Relates to DRM implementation. Either a single positive integer version value or a slash delimited list of version values (1/2/3)." }, "StaticKeyValue": { - "shape": "__string", + "shape": "__stringPatternAZaZ0932", "locationName": "staticKeyValue", "documentation": "Relates to DRM implementation. Use a 32-character hexidecimal string to specify Key Value (StaticKeyValue)." }, @@ -6479,13 +6899,41 @@ "documentation": "Relates to DRM implementation. The location of the license server used for protecting content." } }, - "documentation": "Settings for use with a SPEKE key provider." + "documentation": "Settings for use with a SPEKE key provider.", + "required": [ + "Url", + "StaticKeyValue" + ] + }, + "TagResourceRequest": { + "type": "structure", + "members": { + "Arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "The Amazon Resource Name (ARN) of the resource that you want to tag. To get the ARN, send a GET request with the resource name." + }, + "Tags": { + "shape": "__mapOf__string", + "locationName": "tags", + "documentation": "The tags that you want to add to the resource. You can tag resources with a key-value pair or with only a key." + } + }, + "required": [ + "Arn", + "Tags" + ] + }, + "TagResourceResponse": { + "type": "structure", + "members": { + } }, "TeletextDestinationSettings": { "type": "structure", "members": { "PageNumber": { - "shape": "__string", + "shape": "__stringMin3Max3Pattern1809aFAF09aEAE", "locationName": "pageNumber", "documentation": "Set pageNumber to the Teletext page number for the destination captions for this output. This value must be a three-digit hexadecimal string; strings ending in -FF are invalid. If you are passing through the entire set of Teletext data, do not use this field." } @@ -6496,7 +6944,7 @@ "type": "structure", "members": { "PageNumber": { - "shape": "__string", + "shape": "__stringMin3Max3Pattern1809aFAF09aEAE", "locationName": "pageNumber", "documentation": "Use Page Number (PageNumber) to specify the three-digit hexadecimal page number that will be used for Teletext captions. Do not use this setting if you are passing through teletext from the input source to output." } @@ -6507,7 +6955,7 @@ "type": "structure", "members": { "FontSize": { - "shape": "__integer", + "shape": "__integerMin10Max48", "locationName": "fontSize", "documentation": "Use Font Size (FontSize) to set the font size of any burned-in timecode. Valid values are 10, 16, 32, 48." }, @@ -6516,7 +6964,7 @@ "locationName": "position" }, "Prefix": { - "shape": "__string", + "shape": "__stringPattern", "locationName": "prefix", "documentation": "Use Prefix (Prefix) to place ASCII characters before any burned-in timecode. For example, a prefix of \"EZ-\" will result in the timecode \"EZ-00:00:00:00\". Provide either the characters themselves or the ASCII code equivalents. The supported range of characters is 0x20 through 0x7e. This includes letters, numbers, and all special characters represented on a standard English keyboard." } @@ -6542,30 +6990,30 @@ "type": "structure", "members": { "Anchor": { - "shape": "__string", + "shape": "__stringPattern010920405090509092", "locationName": "anchor", - "documentation": "If you use an editing platform that relies on an anchor timecode, use Anchor Timecode (Anchor) to specify a timecode that will match the input video frame to the output video frame. Use 24-hour format with frame number, (HH:MM:SS:FF) or (HH:MM:SS;FF). This setting ignores framerate conversion. System behavior for Anchor Timecode varies depending on your setting for Timecode source (TimecodeSource). * If Timecode source (TimecodeSource) is set to Specified Start (specifiedstart), the first input frame is the specified value in Start Timecode (Start). Anchor Timecode (Anchor) and Start Timecode (Start) are used calculate output timecode. * If Timecode source (TimecodeSource) is set to Start at 0 (zerobased) the first frame is 00:00:00:00. * If Timecode source (TimecodeSource) is set to Embedded (embedded), the first frame is the timecode value on the first input frame of the input." + "documentation": "If you use an editing platform that relies on an anchor timecode, use Anchor Timecode (Anchor) to specify a timecode that will match the input video frame to the output video frame. Use 24-hour format with frame number, (HH:MM:SS:FF) or (HH:MM:SS;FF). This setting ignores framerate conversion. System behavior for Anchor Timecode varies depending on your setting for Source (TimecodeSource). * If Source (TimecodeSource) is set to Specified Start (SPECIFIEDSTART), the first input frame is the specified value in Start Timecode (Start). Anchor Timecode (Anchor) and Start Timecode (Start) are used calculate output timecode. * If Source (TimecodeSource) is set to Start at 0 (ZEROBASED) the first frame is 00:00:00:00. * If Source (TimecodeSource) is set to Embedded (EMBEDDED), the first frame is the timecode value on the first input frame of the input." }, "Source": { "shape": "TimecodeSource", "locationName": "source" }, "Start": { - "shape": "__string", + "shape": "__stringPattern010920405090509092", "locationName": "start", - "documentation": "Only use when you set Timecode Source (TimecodeSource) to Specified Start (SPECIFIEDSTART). Use Start timecode (Start) to specify the timecode for the initial frame. Use 24-hour format with frame number, (HH:MM:SS:FF) or (HH:MM:SS;FF)." + "documentation": "Only use when you set Source (TimecodeSource) to Specified start (SPECIFIEDSTART). Use Start timecode (Start) to specify the timecode for the initial frame. Use 24-hour format with frame number, (HH:MM:SS:FF) or (HH:MM:SS;FF)." }, "TimestampOffset": { - "shape": "__string", + "shape": "__stringPattern0940191020191209301", "locationName": "timestampOffset", - "documentation": "Only applies to outputs that support program-date-time stamp. Use Time stamp offset (TimestampOffset) to overwrite the timecode date without affecting the time and frame number. Provide the new date as a string in the format \"yyyy-mm-dd\". To use Time stamp offset, you must also enable Insert program-date-time (InsertProgramDateTime) in the output settings." + "documentation": "Only applies to outputs that support program-date-time stamp. Use Timestamp offset (TimestampOffset) to overwrite the timecode date without affecting the time and frame number. Provide the new date as a string in the format \"yyyy-mm-dd\". To use Time stamp offset, you must also enable Insert program-date-time (InsertProgramDateTime) in the output settings. For example, if the date part of your timecodes is 2002-1-25 and you want to change it to one year later, set Timestamp offset (TimestampOffset) to 2003-1-25." } }, - "documentation": "Contains settings used to acquire and adjust timecode information from inputs." + "documentation": "These settings control how the service handles timecodes throughout the job. These settings don't affect input clipping." }, "TimecodeSource": { "type": "string", - "documentation": "Use Timecode source (TimecodeSource) to set how timecodes are handled within this input. To make sure that your video, audio, captions, and markers are synchronized and that time-based features, such as image inserter, work correctly, choose the Timecode source option that matches your assets. All timecodes are in a 24-hour format with frame number (HH:MM:SS:FF). * Embedded (EMBEDDED) - Use the timecode that is in the input video. If no embedded timecode is in the source, the service will use Start at 0 (ZEROBASED) instead. * Start at 0 (ZEROBASED) - Set the timecode of the initial frame to 00:00:00:00. * Specified Start (SPECIFIEDSTART) - Set the timecode of the initial frame to a value other than zero. You use Start timecode (Start) to provide this value.", + "documentation": "Use Source (TimecodeSource) to set how timecodes are handled within this job. To make sure that your video, audio, captions, and markers are synchronized and that time-based features, such as image inserter, work correctly, choose the Timecode source option that matches your assets. All timecodes are in a 24-hour format with frame number (HH:MM:SS:FF). * Embedded (EMBEDDED) - Use the timecode that is in the input video. If no embedded timecode is in the source, the service will use Start at 0 (ZEROBASED) instead. * Start at 0 (ZEROBASED) - Set the timecode of the initial frame to 00:00:00:00. * Specified Start (SPECIFIEDSTART) - Set the timecode of the initial frame to a value other than zero. You use Start timecode (Start) to provide this value.", "enum": [ "EMBEDDED", "ZEROBASED", @@ -6574,7 +7022,7 @@ }, "TimedMetadata": { "type": "string", - "documentation": "If PASSTHROUGH, inserts ID3 timed metadata from the timed_metadata REST command into this output.", + "documentation": "Applies only to HLS outputs. Use this setting to specify whether the service inserts the ID3 timed metadata from the input in this output.", "enum": [ "PASSTHROUGH", "NONE" @@ -6584,28 +7032,31 @@ "type": "structure", "members": { "Id3Insertions": { - "shape": "ListOfId3Insertion", + "shape": "__listOfId3Insertion", "locationName": "id3Insertions", "documentation": "Id3Insertions contains the array of Id3Insertion instances." } }, - "documentation": "Enable Timed metadata insertion (TimedMetadataInsertion) to include ID3 tags in your job. To include timed metadata, you must enable it here, enable it in each output container, and specify tags and timecodes in ID3 insertion (Id3Insertion) objects." + "documentation": "Enable Timed metadata insertion (TimedMetadataInsertion) to include ID3 tags in your job. To include timed metadata, you must enable it here, enable it in each output container, and specify tags and timecodes in ID3 insertion (Id3Insertion) objects.", + "required": [ + "Id3Insertions" + ] }, "Timing": { "type": "structure", "members": { "FinishTime": { - "shape": "__timestamp", + "shape": "__timestampIso8601", "locationName": "finishTime", "documentation": "The time, in Unix epoch format, that the transcoding job finished" }, "StartTime": { - "shape": "__timestamp", + "shape": "__timestampIso8601", "locationName": "startTime", "documentation": "The time, in Unix epoch format, that transcoding for the job began." }, "SubmitTime": { - "shape": "__timestamp", + "shape": "__timestampIso8601", "locationName": "submitTime", "documentation": "The time, in Unix epoch format, that you submitted the job." } @@ -6651,6 +7102,26 @@ "CUSTOM" ] }, + "UntagResourceRequest": { + "type": "structure", + "members": { + "Arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "The Amazon Resource Name (ARN) of the resource that you want to remove tags from. To get the ARN, send a GET request with the resource name." + }, + "TagKeys": { + "shape": "__listOf__string", + "locationName": "tagKeys", + "documentation": "The keys of the tags that you want to remove from the resource." + } + } + }, + "UntagResourceResponse": { + "type": "structure", + "members": { + } + }, "UpdateJobTemplateRequest": { "type": "structure", "members": { @@ -6801,7 +7272,10 @@ "locationName": "proresSettings" } }, - "documentation": "Video codec settings, (CodecSettings) under (VideoDescription), contains the group of settings related to video encoding. The settings in this group vary depending on the value you choose for Video codec (Codec). For each codec enum you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings * FRAME_CAPTURE, FrameCaptureSettings" + "documentation": "Video codec settings, (CodecSettings) under (VideoDescription), contains the group of settings related to video encoding. The settings in this group vary depending on the value you choose for Video codec (Codec). For each codec enum you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings * FRAME_CAPTURE, FrameCaptureSettings", + "required": [ + "Codec" + ] }, "VideoDescription": { "type": "structure", @@ -6832,12 +7306,12 @@ "locationName": "dropFrameTimecode" }, "FixedAfd": { - "shape": "__integer", + "shape": "__integerMin0Max15", "locationName": "fixedAfd", "documentation": "Applies only if you set AFD Signaling(AfdSignaling) to Fixed (FIXED). Use Fixed (FixedAfd) to specify a four-bit AFD value which the service will write on all frames of this video output." }, "Height": { - "shape": "__integer", + "shape": "__integerMin32Max2160", "locationName": "height", "documentation": "Use the Height (Height) setting to define the video resolution height for this output. Specify in pixels. If you don't provide a value here, the service will use the input height." }, @@ -6855,7 +7329,7 @@ "locationName": "scalingBehavior" }, "Sharpness": { - "shape": "__integer", + "shape": "__integerMin0Max100", "locationName": "sharpness", "documentation": "Use Sharpness (Sharpness)setting to specify the strength of anti-aliasing. This setting changes the width of the anti-alias filter kernel used for scaling. Sharpness only applies if your output resolution is different from your input resolution, and if you set Anti-alias (AntiAlias) to ENABLED. 0 is the softest setting, 100 the sharpest, and 50 recommended for most content." }, @@ -6869,12 +7343,15 @@ "documentation": "Find additional transcoding features under Preprocessors (VideoPreprocessors). Enable the features at each output individually. These features are disabled by default." }, "Width": { - "shape": "__integer", + "shape": "__integerMin32Max4096", "locationName": "width", "documentation": "Use Width (Width) to define the video resolution width, in pixels, for this output. If you don't provide a value here, the service will use the input width." } }, - "documentation": "Settings for video outputs" + "documentation": "Settings for video outputs", + "required": [ + "CodecSettings" + ] }, "VideoDetail": { "type": "structure", @@ -6939,12 +7416,12 @@ "locationName": "hdr10Metadata" }, "Pid": { - "shape": "__integer", + "shape": "__integerMin1Max2147483647", "locationName": "pid", "documentation": "Use PID (Pid) to select specific video data from an input file. Specify this value as an integer; the system automatically converts it to the hexidecimal value. For example, 257 selects PID 0x101. A PID, or packet identifier, is an identifier for a set of data in an MPEG-2 transport stream container." }, "ProgramNumber": { - "shape": "__integer", + "shape": "__integerMinNegative2147483648Max2147483647", "locationName": "programNumber", "documentation": "Selects a specific program from within a multi-program transport stream. Note that Quad 4K is not currently supported." } @@ -6953,27 +7430,39 @@ }, "VideoTimecodeInsertion": { "type": "string", - "documentation": "Enable Timecode insertion to include timecode information in this output. Do this in the API by setting (VideoTimecodeInsertion) to (PIC_TIMING_SEI). To get timecodes to appear correctly in your output, also set up the timecode configuration for your job in the input settings. Only enable Timecode insertion when the input framerate is identical to output framerate. Disable this setting to remove the timecode from the output. Default is disabled.", + "documentation": "Applies only to H.264, H.265, MPEG2, and ProRes outputs. Only enable Timecode insertion when the input framerate is identical to the output framerate. To include timecodes in this output, set Timecode insertion (VideoTimecodeInsertion) to PIC_TIMING_SEI. To leave them out, set it to DISABLED. Default is DISABLED. When the service inserts timecodes in an output, by default, it uses any embedded timecodes from the input. If none are present, the service will set the timecode for the first output frame to zero. To change this default behavior, adjust the settings under Timecode configuration (TimecodeConfig). In the console, these settings are located under Job > Job settings > Timecode configuration. Note - Timecode source under input settings (InputTimecodeSource) does not affect the timecodes that are inserted in the output. Source under Job settings > Timecode configuration (TimecodeSource) does.", "enum": [ "DISABLED", "PIC_TIMING_SEI" ] }, + "WavFormat": { + "type": "string", + "documentation": "The service defaults to using RIFF for WAV outputs. If your output audio is likely to exceed 4 GB in file size, or if you otherwise need the extended support of the RF64 format, set your output WAV file format to RF64.", + "enum": [ + "RIFF", + "RF64" + ] + }, "WavSettings": { "type": "structure", "members": { "BitDepth": { - "shape": "__integer", + "shape": "__integerMin16Max24", "locationName": "bitDepth", "documentation": "Specify Bit depth (BitDepth), in bits per sample, to choose the encoding quality for this audio track." }, "Channels": { - "shape": "__integer", + "shape": "__integerMin1Max8", "locationName": "channels", "documentation": "Set Channels to specify the number of channels in this output audio track. With WAV, valid values 1, 2, 4, and 8. In the console, these values are Mono, Stereo, 4-Channel, and 8-Channel, respectively." }, + "Format": { + "shape": "WavFormat", + "locationName": "format" + }, "SampleRate": { - "shape": "__integer", + "shape": "__integerMin8000Max192000", "locationName": "sampleRate", "documentation": "Sample rate in Hz." } @@ -6986,14 +7475,648 @@ "__double": { "type": "double" }, + "__doubleMin0": { + "type": "double" + }, + "__doubleMinNegative59Max0": { + "type": "double" + }, + "__doubleMinNegative60Max3": { + "type": "double" + }, + "__doubleMinNegative60MaxNegative1": { + "type": "double" + }, "__integer": { "type": "integer" }, + "__integerMin0Max10": { + "type": "integer", + "min": 0, + "max": 10 + }, + "__integerMin0Max100": { + "type": "integer", + "min": 0, + "max": 100 + }, + "__integerMin0Max1000": { + "type": "integer", + "min": 0, + "max": 1000 + }, + "__integerMin0Max10000": { + "type": "integer", + "min": 0, + "max": 10000 + }, + "__integerMin0Max1152000000": { + "type": "integer", + "min": 0, + "max": 1152000000 + }, + "__integerMin0Max128": { + "type": "integer", + "min": 0, + "max": 128 + }, + "__integerMin0Max1466400000": { + "type": "integer", + "min": 0, + "max": 1466400000 + }, + "__integerMin0Max15": { + "type": "integer", + "min": 0, + "max": 15 + }, + "__integerMin0Max16": { + "type": "integer", + "min": 0, + "max": 16 + }, + "__integerMin0Max2147483647": { + "type": "integer", + "min": 0, + "max": 2147483647 + }, + "__integerMin0Max255": { + "type": "integer", + "min": 0, + "max": 255 + }, + "__integerMin0Max3": { + "type": "integer", + "min": 0, + "max": 3 + }, + "__integerMin0Max30": { + "type": "integer", + "min": 0, + "max": 30 + }, + "__integerMin0Max3600": { + "type": "integer", + "min": 0, + "max": 3600 + }, + "__integerMin0Max47185920": { + "type": "integer", + "min": 0, + "max": 47185920 + }, + "__integerMin0Max500": { + "type": "integer", + "min": 0, + "max": 500 + }, + "__integerMin0Max50000": { + "type": "integer", + "min": 0, + "max": 50000 + }, + "__integerMin0Max65535": { + "type": "integer", + "min": 0, + "max": 65535 + }, + "__integerMin0Max7": { + "type": "integer", + "min": 0, + "max": 7 + }, + "__integerMin0Max8": { + "type": "integer", + "min": 0, + "max": 8 + }, + "__integerMin0Max9": { + "type": "integer", + "min": 0, + "max": 9 + }, + "__integerMin0Max96": { + "type": "integer", + "min": 0, + "max": 96 + }, + "__integerMin0Max99": { + "type": "integer", + "min": 0, + "max": 99 + }, + "__integerMin1000Max1152000000": { + "type": "integer", + "min": 1000, + "max": 1152000000 + }, + "__integerMin1000Max1466400000": { + "type": "integer", + "min": 1000, + "max": 1466400000 + }, + "__integerMin1000Max288000000": { + "type": "integer", + "min": 1000, + "max": 288000000 + }, + "__integerMin1000Max30000": { + "type": "integer", + "min": 1000, + "max": 30000 + }, + "__integerMin1000Max300000000": { + "type": "integer", + "min": 1000, + "max": 300000000 + }, + "__integerMin10Max48": { + "type": "integer", + "min": 10, + "max": 48 + }, + "__integerMin16Max24": { + "type": "integer", + "min": 16, + "max": 24 + }, + "__integerMin1Max1": { + "type": "integer", + "min": 1, + "max": 1 + }, + "__integerMin1Max100": { + "type": "integer", + "min": 1, + "max": 100 + }, + "__integerMin1Max10000000": { + "type": "integer", + "min": 1, + "max": 10000000 + }, + "__integerMin1Max1001": { + "type": "integer", + "min": 1, + "max": 1001 + }, + "__integerMin1Max16": { + "type": "integer", + "min": 1, + "max": 16 + }, + "__integerMin1Max2": { + "type": "integer", + "min": 1, + "max": 2 + }, + "__integerMin1Max20": { + "type": "integer", + "min": 1, + "max": 20 + }, + "__integerMin1Max2147483647": { + "type": "integer", + "min": 1, + "max": 2147483647 + }, + "__integerMin1Max31": { + "type": "integer", + "min": 1, + "max": 31 + }, + "__integerMin1Max32": { + "type": "integer", + "min": 1, + "max": 32 + }, + "__integerMin1Max4": { + "type": "integer", + "min": 1, + "max": 4 + }, + "__integerMin1Max6": { + "type": "integer", + "min": 1, + "max": 6 + }, + "__integerMin1Max8": { + "type": "integer", + "min": 1, + "max": 8 + }, + "__integerMin24Max60000": { + "type": "integer", + "min": 24, + "max": 60000 + }, + "__integerMin25Max10000": { + "type": "integer", + "min": 25, + "max": 10000 + }, + "__integerMin25Max2000": { + "type": "integer", + "min": 25, + "max": 2000 + }, + "__integerMin32000Max384000": { + "type": "integer", + "min": 32000, + "max": 384000 + }, + "__integerMin32000Max48000": { + "type": "integer", + "min": 32000, + "max": 48000 + }, + "__integerMin32Max2160": { + "type": "integer", + "min": 32, + "max": 2160 + }, + "__integerMin32Max4096": { + "type": "integer", + "min": 32, + "max": 4096 + }, + "__integerMin32Max8182": { + "type": "integer", + "min": 32, + "max": 8182 + }, + "__integerMin48000Max48000": { + "type": "integer", + "min": 48000, + "max": 48000 + }, + "__integerMin6000Max1024000": { + "type": "integer", + "min": 6000, + "max": 1024000 + }, + "__integerMin64000Max640000": { + "type": "integer", + "min": 64000, + "max": 640000 + }, + "__integerMin8000Max192000": { + "type": "integer", + "min": 8000, + "max": 192000 + }, + "__integerMin8000Max96000": { + "type": "integer", + "min": 8000, + "max": 96000 + }, + "__integerMin96Max600": { + "type": "integer", + "min": 96, + "max": 600 + }, + "__integerMinNegative1000Max1000": { + "type": "integer", + "min": -1000, + "max": 1000 + }, + "__integerMinNegative180Max180": { + "type": "integer", + "min": -180, + "max": 180 + }, + "__integerMinNegative2147483648Max2147483647": { + "type": "integer", + "min": -2147483648, + "max": 2147483647 + }, + "__integerMinNegative2Max3": { + "type": "integer", + "min": -2, + "max": 3 + }, + "__integerMinNegative5Max5": { + "type": "integer", + "min": -5, + "max": 5 + }, + "__integerMinNegative60Max6": { + "type": "integer", + "min": -60, + "max": 6 + }, + "__integerMinNegative70Max0": { + "type": "integer", + "min": -70, + "max": 0 + }, + "__listOfAudioDescription": { + "type": "list", + "member": { + "shape": "AudioDescription" + } + }, + "__listOfCaptionDescription": { + "type": "list", + "member": { + "shape": "CaptionDescription" + } + }, + "__listOfCaptionDescriptionPreset": { + "type": "list", + "member": { + "shape": "CaptionDescriptionPreset" + } + }, + "__listOfEndpoint": { + "type": "list", + "member": { + "shape": "Endpoint" + } + }, + "__listOfHlsAdMarkers": { + "type": "list", + "member": { + "shape": "HlsAdMarkers" + } + }, + "__listOfHlsCaptionLanguageMapping": { + "type": "list", + "member": { + "shape": "HlsCaptionLanguageMapping" + } + }, + "__listOfId3Insertion": { + "type": "list", + "member": { + "shape": "Id3Insertion" + } + }, + "__listOfInput": { + "type": "list", + "member": { + "shape": "Input" + } + }, + "__listOfInputClipping": { + "type": "list", + "member": { + "shape": "InputClipping" + } + }, + "__listOfInputTemplate": { + "type": "list", + "member": { + "shape": "InputTemplate" + } + }, + "__listOfInsertableImage": { + "type": "list", + "member": { + "shape": "InsertableImage" + } + }, + "__listOfJob": { + "type": "list", + "member": { + "shape": "Job" + } + }, + "__listOfJobTemplate": { + "type": "list", + "member": { + "shape": "JobTemplate" + } + }, + "__listOfOutput": { + "type": "list", + "member": { + "shape": "Output" + } + }, + "__listOfOutputChannelMapping": { + "type": "list", + "member": { + "shape": "OutputChannelMapping" + } + }, + "__listOfOutputDetail": { + "type": "list", + "member": { + "shape": "OutputDetail" + } + }, + "__listOfOutputGroup": { + "type": "list", + "member": { + "shape": "OutputGroup" + } + }, + "__listOfOutputGroupDetail": { + "type": "list", + "member": { + "shape": "OutputGroupDetail" + } + }, + "__listOfPreset": { + "type": "list", + "member": { + "shape": "Preset" + } + }, + "__listOfQueue": { + "type": "list", + "member": { + "shape": "Queue" + } + }, + "__listOf__integerMin1Max2147483647": { + "type": "list", + "member": { + "shape": "__integerMin1Max2147483647" + } + }, + "__listOf__integerMin32Max8182": { + "type": "list", + "member": { + "shape": "__integerMin32Max8182" + } + }, + "__listOf__integerMinNegative60Max6": { + "type": "list", + "member": { + "shape": "__integerMinNegative60Max6" + } + }, + "__listOf__string": { + "type": "list", + "member": { + "shape": "__string" + } + }, + "__listOf__stringMin1": { + "type": "list", + "member": { + "shape": "__stringMin1" + } + }, + "__listOf__stringPattern09aFAF809aFAF409aFAF409aFAF409aFAF12": { + "type": "list", + "member": { + "shape": "__stringPattern09aFAF809aFAF409aFAF409aFAF409aFAF12" + } + }, + "__long": { + "type": "long" + }, + "__mapOfAudioSelector": { + "type": "map", + "key": { + "shape": "__string" + }, + "value": { + "shape": "AudioSelector" + } + }, + "__mapOfAudioSelectorGroup": { + "type": "map", + "key": { + "shape": "__string" + }, + "value": { + "shape": "AudioSelectorGroup" + } + }, + "__mapOfCaptionSelector": { + "type": "map", + "key": { + "shape": "__string" + }, + "value": { + "shape": "CaptionSelector" + } + }, + "__mapOf__string": { + "type": "map", + "key": { + "shape": "__string" + }, + "value": { + "shape": "__string" + } + }, "__string": { "type": "string" }, - "__timestamp": { - "type": "timestamp" + "__stringMin0": { + "type": "string", + "min": 0 + }, + "__stringMin1": { + "type": "string", + "min": 1 + }, + "__stringMin14PatternS3BmpBMPPngPNG": { + "type": "string", + "min": 14, + "pattern": "^(s3:\\/\\/)(.*?)\\.(bmp|BMP|png|PNG)$" + }, + "__stringMin14PatternS3BmpBMPPngPNGTgaTGA": { + "type": "string", + "min": 14, + "pattern": "^(s3:\\/\\/)(.*?)\\.(bmp|BMP|png|PNG|tga|TGA)$" + }, + "__stringMin14PatternS3SccSCCTtmlTTMLDfxpDFXPStlSTLSrtSRTSmiSMI": { + "type": "string", + "min": 14, + "pattern": "^(s3:\\/\\/)(.*?)\\.(scc|SCC|ttml|TTML|dfxp|DFXP|stl|STL|srt|SRT|smi|SMI)$" + }, + "__stringMin1Max256": { + "type": "string", + "min": 1, + "max": 256 + }, + "__stringMin32Max32Pattern09aFAF32": { + "type": "string", + "min": 32, + "max": 32, + "pattern": "^[0-9a-fA-F]{32}$" + }, + "__stringMin3Max3Pattern1809aFAF09aEAE": { + "type": "string", + "min": 3, + "max": 3, + "pattern": "^[1-8][0-9a-fA-F][0-9a-eA-E]$" + }, + "__stringMin3Max3PatternAZaZ3": { + "type": "string", + "min": 3, + "max": 3, + "pattern": "^[A-Za-z]{3}$" + }, + "__stringPattern": { + "type": "string", + "pattern": "^[ -~]+$" + }, + "__stringPattern010920405090509092": { + "type": "string", + "pattern": "^([01][0-9]|2[0-4]):[0-5][0-9]:[0-5][0-9][:;][0-9]{2}$" + }, + "__stringPattern01D20305D205D": { + "type": "string", + "pattern": "^((([0-1]\\d)|(2[0-3]))(:[0-5]\\d){2}([:;][0-5]\\d))$" + }, + "__stringPattern0940191020191209301": { + "type": "string", + "pattern": "^([0-9]{4})-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01])$" + }, + "__stringPattern09aFAF809aFAF409aFAF409aFAF409aFAF12": { + "type": "string", + "pattern": "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$" + }, + "__stringPatternAZaZ0902": { + "type": "string", + "pattern": "^[A-Za-z0-9+\\/]+={0,2}$" + }, + "__stringPatternAZaZ0932": { + "type": "string", + "pattern": "^[A-Za-z0-9]{32}$" + }, + "__stringPatternDD": { + "type": "string", + "pattern": "^(\\d+(\\/\\d+)*)$" + }, + "__stringPatternHttps": { + "type": "string", + "pattern": "^https:\\/\\/" + }, + "__stringPatternIdentityAZaZ26AZaZ09163": { + "type": "string", + "pattern": "^(identity|[A-Za-z]{2,6}(\\.[A-Za-z0-9-]{1,63})+)$" + }, + "__stringPatternS3": { + "type": "string", + "pattern": "^s3:\\/\\/" + }, + "__stringPatternS3MM2VVMMPPEEGGAAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MM": { + "type": "string", + "pattern": "^(s3:\\/\\/)([^\\/]+\\/)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM])))$" + }, + "__stringPatternS3MM2VVMMPPEEGGAAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEE": { + "type": "string", + "pattern": "^(s3:\\/\\/)([^\\/]+\\/)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[aA][aA][cC]|[aA][iI][fF][fF]|[mM][pP]2|[aA][cC]3|[eE][cC]3|[dD][tT][sS][eE])))$" + }, + "__stringPatternWS": { + "type": "string", + "pattern": "^[\\w\\s]*$" + }, + "__timestampIso8601": { + "type": "timestamp", + "timestampFormat": "iso8601" + }, + "__timestampUnix": { + "type": "timestamp", + "timestampFormat": "unixTimestamp" } }, "documentation": "AWS Elemental MediaConvert" diff --git a/botocore/data/medialive/2017-10-14/paginators-1.json b/botocore/data/medialive/2017-10-14/paginators-1.json index 1335d966..c9bc58df 100644 --- a/botocore/data/medialive/2017-10-14/paginators-1.json +++ b/botocore/data/medialive/2017-10-14/paginators-1.json @@ -17,6 +17,18 @@ "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "InputSecurityGroups" + }, + "ListOfferings": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Offerings" + }, + "ListReservations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Reservations" } } } diff --git a/botocore/data/medialive/2017-10-14/service-2.json b/botocore/data/medialive/2017-10-14/service-2.json index df3ba860..a7a31279 100644 --- a/botocore/data/medialive/2017-10-14/service-2.json +++ b/botocore/data/medialive/2017-10-14/service-2.json @@ -27,14 +27,14 @@ "documentation": "Creation of channel is started." }, "errors": [ - { - "shape": "UnprocessableEntityException", - "documentation": "The Channel failed validation and could not be created." - }, { "shape": "BadRequestException", "documentation": "This request was invalid." }, + { + "shape": "UnprocessableEntityException", + "documentation": "The Channel failed validation and could not be created." + }, { "shape": "InternalServerErrorException", "documentation": "Unexpected internal service error." @@ -292,6 +292,56 @@ ], "documentation": "Deletes an Input Security Group" }, + "DeleteReservation": { + "name": "DeleteReservation", + "http": { + "method": "DELETE", + "requestUri": "/prod/reservations/{reservationId}", + "responseCode": 200 + }, + "input": { + "shape": "DeleteReservationRequest" + }, + "output": { + "shape": "DeleteReservationResponse", + "documentation": "Deleted reservation" + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "This request was invalid" + }, + { + "shape": "InternalServerErrorException", + "documentation": "Internal service error" + }, + { + "shape": "ForbiddenException", + "documentation": "You do not have permission to delete reservation" + }, + { + "shape": "BadGatewayException", + "documentation": "Bad gateway error" + }, + { + "shape": "NotFoundException", + "documentation": "Reservation you're attempting to delete does not exist" + }, + { + "shape": "GatewayTimeoutException", + "documentation": "Gateway timeout error" + }, + { + "shape": "TooManyRequestsException", + "documentation": "Request limit exceeded on delete reservation request" + }, + { + "shape": "ConflictException", + "documentation": "The reservation could not be deleted because it is currently active." + } + ], + "documentation": "Delete an expired reservation." + }, "DescribeChannel": { "name": "DescribeChannel", "http": { @@ -430,6 +480,98 @@ ], "documentation": "Produces a summary of an Input Security Group" }, + "DescribeOffering": { + "name": "DescribeOffering", + "http": { + "method": "GET", + "requestUri": "/prod/offerings/{offeringId}", + "responseCode": 200 + }, + "input": { + "shape": "DescribeOfferingRequest" + }, + "output": { + "shape": "DescribeOfferingResponse", + "documentation": "Offering details" + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "This request was invalid" + }, + { + "shape": "InternalServerErrorException", + "documentation": "Internal service error" + }, + { + "shape": "ForbiddenException", + "documentation": "You do not have permission to describe offering" + }, + { + "shape": "BadGatewayException", + "documentation": "Bad gateway error" + }, + { + "shape": "NotFoundException", + "documentation": "Offering you're attempting to describe does not exist" + }, + { + "shape": "GatewayTimeoutException", + "documentation": "Gateway timeout error" + }, + { + "shape": "TooManyRequestsException", + "documentation": "Request limit exceeded on describe offering request" + } + ], + "documentation": "Get details for an offering." + }, + "DescribeReservation": { + "name": "DescribeReservation", + "http": { + "method": "GET", + "requestUri": "/prod/reservations/{reservationId}", + "responseCode": 200 + }, + "input": { + "shape": "DescribeReservationRequest" + }, + "output": { + "shape": "DescribeReservationResponse", + "documentation": "Reservation details" + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "This request was invalid" + }, + { + "shape": "InternalServerErrorException", + "documentation": "Internal service error" + }, + { + "shape": "ForbiddenException", + "documentation": "You do not have permission to describe reservation" + }, + { + "shape": "BadGatewayException", + "documentation": "Bad gateway error" + }, + { + "shape": "NotFoundException", + "documentation": "Reservation you're attempting to describe does not exist" + }, + { + "shape": "GatewayTimeoutException", + "documentation": "Gateway timeout error" + }, + { + "shape": "TooManyRequestsException", + "documentation": "Request limit exceeded on describe reservation request" + } + ], + "documentation": "Get details for a reservation." + }, "ListChannels": { "name": "ListChannels", "http": { @@ -556,6 +698,140 @@ ], "documentation": "Produces list of inputs that have been created" }, + "ListOfferings": { + "name": "ListOfferings", + "http": { + "method": "GET", + "requestUri": "/prod/offerings", + "responseCode": 200 + }, + "input": { + "shape": "ListOfferingsRequest" + }, + "output": { + "shape": "ListOfferingsResponse", + "documentation": "List of offerings" + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "This request was invalid" + }, + { + "shape": "InternalServerErrorException", + "documentation": "Internal service error" + }, + { + "shape": "ForbiddenException", + "documentation": "You do not have permission to list offerings" + }, + { + "shape": "BadGatewayException", + "documentation": "Bad gateway error" + }, + { + "shape": "GatewayTimeoutException", + "documentation": "Gateway timeout error" + }, + { + "shape": "TooManyRequestsException", + "documentation": "Request limit exceeded on list offerings request" + } + ], + "documentation": "List offerings available for purchase." + }, + "ListReservations": { + "name": "ListReservations", + "http": { + "method": "GET", + "requestUri": "/prod/reservations", + "responseCode": 200 + }, + "input": { + "shape": "ListReservationsRequest" + }, + "output": { + "shape": "ListReservationsResponse", + "documentation": "List of reservations" + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "This request was invalid" + }, + { + "shape": "InternalServerErrorException", + "documentation": "Internal service error" + }, + { + "shape": "ForbiddenException", + "documentation": "You do not have permission to list reservations" + }, + { + "shape": "BadGatewayException", + "documentation": "Bad gateway error" + }, + { + "shape": "GatewayTimeoutException", + "documentation": "Gateway timeout error" + }, + { + "shape": "TooManyRequestsException", + "documentation": "Request limit exceeded on list reservations request" + } + ], + "documentation": "List purchased reservations." + }, + "PurchaseOffering": { + "name": "PurchaseOffering", + "http": { + "method": "POST", + "requestUri": "/prod/offerings/{offeringId}/purchase", + "responseCode": 201 + }, + "input": { + "shape": "PurchaseOfferingRequest" + }, + "output": { + "shape": "PurchaseOfferingResponse", + "documentation": "Purchased reservation" + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "This request was invalid" + }, + { + "shape": "InternalServerErrorException", + "documentation": "Internal service error" + }, + { + "shape": "ForbiddenException", + "documentation": "You do not have permission to purchase the offering" + }, + { + "shape": "BadGatewayException", + "documentation": "Bad gateway error" + }, + { + "shape": "NotFoundException", + "documentation": "Offering you're attempting to purchase does not exist" + }, + { + "shape": "GatewayTimeoutException", + "documentation": "Gateway timeout error" + }, + { + "shape": "TooManyRequestsException", + "documentation": "Request limit exceeded on purchase offering request" + }, + { + "shape": "ConflictException", + "documentation": "Offering purchase prevented by service resource issue" + } + ], + "documentation": "Purchase an offering and create a reservation." + }, "StartChannel": { "name": "StartChannel", "http": { @@ -671,14 +947,14 @@ "documentation": "Channel is successfully updated." }, "errors": [ - { - "shape": "UnprocessableEntityException", - "documentation": "The channel configuration failed validation and could not be updated." - }, { "shape": "BadRequestException", "documentation": "This request was invalid." }, + { + "shape": "UnprocessableEntityException", + "documentation": "The channel configuration failed validation and could not be updated." + }, { "shape": "InternalServerErrorException", "documentation": "Unexpected internal service error." @@ -1842,6 +2118,11 @@ "shape": "InputSpecification", "locationName": "inputSpecification" }, + "LogLevel": { + "shape": "LogLevel", + "locationName": "logLevel", + "documentation": "The log level being written to CloudWatch Logs." + }, "Name": { "shape": "__string", "locationName": "name", @@ -1937,6 +2218,11 @@ "shape": "InputSpecification", "locationName": "inputSpecification" }, + "LogLevel": { + "shape": "LogLevel", + "locationName": "logLevel", + "documentation": "The log level being written to CloudWatch Logs." + }, "Name": { "shape": "__string", "locationName": "name", @@ -1994,6 +2280,11 @@ "locationName": "inputSpecification", "documentation": "Specification of input for this channel (max. bitrate, resolution, codec, etc.)" }, + "LogLevel": { + "shape": "LogLevel", + "locationName": "logLevel", + "documentation": "The log level to write to CloudWatch Logs." + }, "Name": { "shape": "__string", "locationName": "name", @@ -2040,6 +2331,11 @@ "locationName": "inputSpecification", "documentation": "Specification of input for this channel (max. bitrate, resolution, codec, etc.)" }, + "LogLevel": { + "shape": "LogLevel", + "locationName": "logLevel", + "documentation": "The log level to write to CloudWatch Logs." + }, "Name": { "shape": "__string", "locationName": "name", @@ -2259,6 +2555,11 @@ "shape": "InputSpecification", "locationName": "inputSpecification" }, + "LogLevel": { + "shape": "LogLevel", + "locationName": "logLevel", + "documentation": "The log level being written to CloudWatch Logs." + }, "Name": { "shape": "__string", "locationName": "name", @@ -2323,6 +2624,112 @@ }, "documentation": "Placeholder documentation for DeleteInputSecurityGroupResponse" }, + "DeleteReservationRequest": { + "type": "structure", + "members": { + "ReservationId": { + "shape": "__string", + "location": "uri", + "locationName": "reservationId", + "documentation": "Unique reservation ID, e.g. '1234567'" + } + }, + "required": [ + "ReservationId" + ], + "documentation": "Placeholder documentation for DeleteReservationRequest" + }, + "DeleteReservationResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "Unique reservation ARN, e.g. 'arn:aws:medialive:us-west-2:123456789012:reservation:1234567'" + }, + "Count": { + "shape": "__integer", + "locationName": "count", + "documentation": "Number of reserved resources" + }, + "CurrencyCode": { + "shape": "__string", + "locationName": "currencyCode", + "documentation": "Currency code for usagePrice and fixedPrice in ISO-4217 format, e.g. 'USD'" + }, + "Duration": { + "shape": "__integer", + "locationName": "duration", + "documentation": "Lease duration, e.g. '12'" + }, + "DurationUnits": { + "shape": "OfferingDurationUnits", + "locationName": "durationUnits", + "documentation": "Units for duration, e.g. 'MONTHS'" + }, + "End": { + "shape": "__string", + "locationName": "end", + "documentation": "Reservation UTC end date and time in ISO-8601 format, e.g. '2019-03-01T00:00:00'" + }, + "FixedPrice": { + "shape": "__double", + "locationName": "fixedPrice", + "documentation": "One-time charge for each reserved resource, e.g. '0.0' for a NO_UPFRONT offering" + }, + "Name": { + "shape": "__string", + "locationName": "name", + "documentation": "User specified reservation name" + }, + "OfferingDescription": { + "shape": "__string", + "locationName": "offeringDescription", + "documentation": "Offering description, e.g. 'HD AVC output at 10-20 Mbps, 30 fps, and standard VQ in US West (Oregon)'" + }, + "OfferingId": { + "shape": "__string", + "locationName": "offeringId", + "documentation": "Unique offering ID, e.g. '87654321'" + }, + "OfferingType": { + "shape": "OfferingType", + "locationName": "offeringType", + "documentation": "Offering type, e.g. 'NO_UPFRONT'" + }, + "Region": { + "shape": "__string", + "locationName": "region", + "documentation": "AWS region, e.g. 'us-west-2'" + }, + "ReservationId": { + "shape": "__string", + "locationName": "reservationId", + "documentation": "Unique reservation ID, e.g. '1234567'" + }, + "ResourceSpecification": { + "shape": "ReservationResourceSpecification", + "locationName": "resourceSpecification", + "documentation": "Resource configuration details" + }, + "Start": { + "shape": "__string", + "locationName": "start", + "documentation": "Reservation UTC start date and time in ISO-8601 format, e.g. '2018-03-01T00:00:00'" + }, + "State": { + "shape": "ReservationState", + "locationName": "state", + "documentation": "Current state of reservation, e.g. 'ACTIVE'" + }, + "UsagePrice": { + "shape": "__double", + "locationName": "usagePrice", + "documentation": "Recurring usage charge for each reserved resource, e.g. '157.0'" + } + }, + "documentation": "Placeholder documentation for DeleteReservationResponse" + }, "DescribeChannelRequest": { "type": "structure", "members": { @@ -2374,6 +2781,11 @@ "shape": "InputSpecification", "locationName": "inputSpecification" }, + "LogLevel": { + "shape": "LogLevel", + "locationName": "logLevel", + "documentation": "The log level being written to CloudWatch Logs." + }, "Name": { "shape": "__string", "locationName": "name", @@ -2506,6 +2918,188 @@ }, "documentation": "Placeholder documentation for DescribeInputSecurityGroupResponse" }, + "DescribeOfferingRequest": { + "type": "structure", + "members": { + "OfferingId": { + "shape": "__string", + "location": "uri", + "locationName": "offeringId", + "documentation": "Unique offering ID, e.g. '87654321'" + } + }, + "required": [ + "OfferingId" + ], + "documentation": "Placeholder documentation for DescribeOfferingRequest" + }, + "DescribeOfferingResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "Unique offering ARN, e.g. 'arn:aws:medialive:us-west-2:123456789012:offering:87654321'" + }, + "CurrencyCode": { + "shape": "__string", + "locationName": "currencyCode", + "documentation": "Currency code for usagePrice and fixedPrice in ISO-4217 format, e.g. 'USD'" + }, + "Duration": { + "shape": "__integer", + "locationName": "duration", + "documentation": "Lease duration, e.g. '12'" + }, + "DurationUnits": { + "shape": "OfferingDurationUnits", + "locationName": "durationUnits", + "documentation": "Units for duration, e.g. 'MONTHS'" + }, + "FixedPrice": { + "shape": "__double", + "locationName": "fixedPrice", + "documentation": "One-time charge for each reserved resource, e.g. '0.0' for a NO_UPFRONT offering" + }, + "OfferingDescription": { + "shape": "__string", + "locationName": "offeringDescription", + "documentation": "Offering description, e.g. 'HD AVC output at 10-20 Mbps, 30 fps, and standard VQ in US West (Oregon)'" + }, + "OfferingId": { + "shape": "__string", + "locationName": "offeringId", + "documentation": "Unique offering ID, e.g. '87654321'" + }, + "OfferingType": { + "shape": "OfferingType", + "locationName": "offeringType", + "documentation": "Offering type, e.g. 'NO_UPFRONT'" + }, + "Region": { + "shape": "__string", + "locationName": "region", + "documentation": "AWS region, e.g. 'us-west-2'" + }, + "ResourceSpecification": { + "shape": "ReservationResourceSpecification", + "locationName": "resourceSpecification", + "documentation": "Resource configuration details" + }, + "UsagePrice": { + "shape": "__double", + "locationName": "usagePrice", + "documentation": "Recurring usage charge for each reserved resource, e.g. '157.0'" + } + }, + "documentation": "Placeholder documentation for DescribeOfferingResponse" + }, + "DescribeReservationRequest": { + "type": "structure", + "members": { + "ReservationId": { + "shape": "__string", + "location": "uri", + "locationName": "reservationId", + "documentation": "Unique reservation ID, e.g. '1234567'" + } + }, + "required": [ + "ReservationId" + ], + "documentation": "Placeholder documentation for DescribeReservationRequest" + }, + "DescribeReservationResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "Unique reservation ARN, e.g. 'arn:aws:medialive:us-west-2:123456789012:reservation:1234567'" + }, + "Count": { + "shape": "__integer", + "locationName": "count", + "documentation": "Number of reserved resources" + }, + "CurrencyCode": { + "shape": "__string", + "locationName": "currencyCode", + "documentation": "Currency code for usagePrice and fixedPrice in ISO-4217 format, e.g. 'USD'" + }, + "Duration": { + "shape": "__integer", + "locationName": "duration", + "documentation": "Lease duration, e.g. '12'" + }, + "DurationUnits": { + "shape": "OfferingDurationUnits", + "locationName": "durationUnits", + "documentation": "Units for duration, e.g. 'MONTHS'" + }, + "End": { + "shape": "__string", + "locationName": "end", + "documentation": "Reservation UTC end date and time in ISO-8601 format, e.g. '2019-03-01T00:00:00'" + }, + "FixedPrice": { + "shape": "__double", + "locationName": "fixedPrice", + "documentation": "One-time charge for each reserved resource, e.g. '0.0' for a NO_UPFRONT offering" + }, + "Name": { + "shape": "__string", + "locationName": "name", + "documentation": "User specified reservation name" + }, + "OfferingDescription": { + "shape": "__string", + "locationName": "offeringDescription", + "documentation": "Offering description, e.g. 'HD AVC output at 10-20 Mbps, 30 fps, and standard VQ in US West (Oregon)'" + }, + "OfferingId": { + "shape": "__string", + "locationName": "offeringId", + "documentation": "Unique offering ID, e.g. '87654321'" + }, + "OfferingType": { + "shape": "OfferingType", + "locationName": "offeringType", + "documentation": "Offering type, e.g. 'NO_UPFRONT'" + }, + "Region": { + "shape": "__string", + "locationName": "region", + "documentation": "AWS region, e.g. 'us-west-2'" + }, + "ReservationId": { + "shape": "__string", + "locationName": "reservationId", + "documentation": "Unique reservation ID, e.g. '1234567'" + }, + "ResourceSpecification": { + "shape": "ReservationResourceSpecification", + "locationName": "resourceSpecification", + "documentation": "Resource configuration details" + }, + "Start": { + "shape": "__string", + "locationName": "start", + "documentation": "Reservation UTC start date and time in ISO-8601 format, e.g. '2018-03-01T00:00:00'" + }, + "State": { + "shape": "ReservationState", + "locationName": "state", + "documentation": "Current state of reservation, e.g. 'ACTIVE'" + }, + "UsagePrice": { + "shape": "__double", + "locationName": "usagePrice", + "documentation": "Recurring usage charge for each reserved resource, e.g. '157.0'" + } + }, + "documentation": "Placeholder documentation for DescribeReservationResponse" + }, "DvbNitSettings": { "type": "structure", "members": { @@ -4789,6 +5383,203 @@ }, "documentation": "Placeholder documentation for ListInputsResultModel" }, + "ListOfferingsRequest": { + "type": "structure", + "members": { + "ChannelConfiguration": { + "shape": "__string", + "location": "querystring", + "locationName": "channelConfiguration", + "documentation": "Filter to offerings that match the configuration of an existing channel, e.g. '2345678' (a channel ID)\n" + }, + "Codec": { + "shape": "__string", + "location": "querystring", + "locationName": "codec", + "documentation": "Filter by codec, 'AVC', 'HEVC', 'MPEG2', or 'AUDIO'" + }, + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults" + }, + "MaximumBitrate": { + "shape": "__string", + "location": "querystring", + "locationName": "maximumBitrate", + "documentation": "Filter by bitrate, 'MAX_10_MBPS', 'MAX_20_MBPS', or 'MAX_50_MBPS'\n" + }, + "MaximumFramerate": { + "shape": "__string", + "location": "querystring", + "locationName": "maximumFramerate", + "documentation": "Filter by framerate, 'MAX_30_FPS' or 'MAX_60_FPS'" + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken" + }, + "Resolution": { + "shape": "__string", + "location": "querystring", + "locationName": "resolution", + "documentation": "Filter by resolution, 'SD', 'HD', or 'UHD'" + }, + "ResourceType": { + "shape": "__string", + "location": "querystring", + "locationName": "resourceType", + "documentation": "Filter by resource type, 'INPUT', 'OUTPUT', or 'CHANNEL'" + }, + "SpecialFeature": { + "shape": "__string", + "location": "querystring", + "locationName": "specialFeature", + "documentation": "Filter by special feature, 'ADVANCED_AUDIO' or 'AUDIO_NORMALIZATION'\n" + }, + "VideoQuality": { + "shape": "__string", + "location": "querystring", + "locationName": "videoQuality", + "documentation": "Filter by video quality, 'STANDARD', 'ENHANCED', or 'PREMIUM'\n" + } + }, + "documentation": "Placeholder documentation for ListOfferingsRequest" + }, + "ListOfferingsResponse": { + "type": "structure", + "members": { + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "Token to retrieve the next page of results" + }, + "Offerings": { + "shape": "__listOfOffering", + "locationName": "offerings", + "documentation": "List of offerings" + } + }, + "documentation": "Placeholder documentation for ListOfferingsResponse" + }, + "ListOfferingsResultModel": { + "type": "structure", + "members": { + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "Token to retrieve the next page of results" + }, + "Offerings": { + "shape": "__listOfOffering", + "locationName": "offerings", + "documentation": "List of offerings" + } + }, + "documentation": "ListOfferings response" + }, + "ListReservationsRequest": { + "type": "structure", + "members": { + "Codec": { + "shape": "__string", + "location": "querystring", + "locationName": "codec", + "documentation": "Filter by codec, 'AVC', 'HEVC', 'MPEG2', or 'AUDIO'" + }, + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults" + }, + "MaximumBitrate": { + "shape": "__string", + "location": "querystring", + "locationName": "maximumBitrate", + "documentation": "Filter by bitrate, 'MAX_10_MBPS', 'MAX_20_MBPS', or 'MAX_50_MBPS'\n" + }, + "MaximumFramerate": { + "shape": "__string", + "location": "querystring", + "locationName": "maximumFramerate", + "documentation": "Filter by framerate, 'MAX_30_FPS' or 'MAX_60_FPS'" + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken" + }, + "Resolution": { + "shape": "__string", + "location": "querystring", + "locationName": "resolution", + "documentation": "Filter by resolution, 'SD', 'HD', or 'UHD'" + }, + "ResourceType": { + "shape": "__string", + "location": "querystring", + "locationName": "resourceType", + "documentation": "Filter by resource type, 'INPUT', 'OUTPUT', or 'CHANNEL'" + }, + "SpecialFeature": { + "shape": "__string", + "location": "querystring", + "locationName": "specialFeature", + "documentation": "Filter by special feature, 'ADVANCED_AUDIO' or 'AUDIO_NORMALIZATION'\n" + }, + "VideoQuality": { + "shape": "__string", + "location": "querystring", + "locationName": "videoQuality", + "documentation": "Filter by video quality, 'STANDARD', 'ENHANCED', or 'PREMIUM'\n" + } + }, + "documentation": "Placeholder documentation for ListReservationsRequest" + }, + "ListReservationsResponse": { + "type": "structure", + "members": { + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "Token to retrieve the next page of results" + }, + "Reservations": { + "shape": "__listOfReservation", + "locationName": "reservations", + "documentation": "List of reservations" + } + }, + "documentation": "Placeholder documentation for ListReservationsResponse" + }, + "ListReservationsResultModel": { + "type": "structure", + "members": { + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "Token to retrieve the next page of results" + }, + "Reservations": { + "shape": "__listOfReservation", + "locationName": "reservations", + "documentation": "List of reservations" + } + }, + "documentation": "ListReservations response" + }, + "LogLevel": { + "type": "string", + "enum": [ + "ERROR", + "WARNING", + "INFO", + "DEBUG", + "DISABLED" + ], + "documentation": "The log level the user wants for their channel." + }, "M2tsAbsentInputAudioBehavior": { "type": "string", "enum": [ @@ -5471,6 +6262,81 @@ }, "documentation": "Placeholder documentation for NotFoundException" }, + "Offering": { + "type": "structure", + "members": { + "Arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "Unique offering ARN, e.g. 'arn:aws:medialive:us-west-2:123456789012:offering:87654321'" + }, + "CurrencyCode": { + "shape": "__string", + "locationName": "currencyCode", + "documentation": "Currency code for usagePrice and fixedPrice in ISO-4217 format, e.g. 'USD'" + }, + "Duration": { + "shape": "__integer", + "locationName": "duration", + "documentation": "Lease duration, e.g. '12'" + }, + "DurationUnits": { + "shape": "OfferingDurationUnits", + "locationName": "durationUnits", + "documentation": "Units for duration, e.g. 'MONTHS'" + }, + "FixedPrice": { + "shape": "__double", + "locationName": "fixedPrice", + "documentation": "One-time charge for each reserved resource, e.g. '0.0' for a NO_UPFRONT offering" + }, + "OfferingDescription": { + "shape": "__string", + "locationName": "offeringDescription", + "documentation": "Offering description, e.g. 'HD AVC output at 10-20 Mbps, 30 fps, and standard VQ in US West (Oregon)'" + }, + "OfferingId": { + "shape": "__string", + "locationName": "offeringId", + "documentation": "Unique offering ID, e.g. '87654321'" + }, + "OfferingType": { + "shape": "OfferingType", + "locationName": "offeringType", + "documentation": "Offering type, e.g. 'NO_UPFRONT'" + }, + "Region": { + "shape": "__string", + "locationName": "region", + "documentation": "AWS region, e.g. 'us-west-2'" + }, + "ResourceSpecification": { + "shape": "ReservationResourceSpecification", + "locationName": "resourceSpecification", + "documentation": "Resource configuration details" + }, + "UsagePrice": { + "shape": "__double", + "locationName": "usagePrice", + "documentation": "Recurring usage charge for each reserved resource, e.g. '157.0'" + } + }, + "documentation": "Reserved resources available for purchase" + }, + "OfferingDurationUnits": { + "type": "string", + "documentation": "Units for duration, e.g. 'MONTHS'", + "enum": [ + "MONTHS" + ] + }, + "OfferingType": { + "type": "string", + "documentation": "Offering type, e.g. 'NO_UPFRONT'", + "enum": [ + "NO_UPFRONT" + ] + }, "Output": { "type": "structure", "members": { @@ -5639,6 +6505,79 @@ }, "documentation": "Placeholder documentation for PassThroughSettings" }, + "PurchaseOffering": { + "type": "structure", + "members": { + "Count": { + "shape": "__integerMin1", + "locationName": "count", + "documentation": "Number of resources" + }, + "Name": { + "shape": "__string", + "locationName": "name", + "documentation": "Name for the new reservation" + }, + "RequestId": { + "shape": "__string", + "locationName": "requestId", + "documentation": "Unique request ID to be specified. This is needed to prevent retries from creating multiple resources.", + "idempotencyToken": true + } + }, + "documentation": "PurchaseOffering request" + }, + "PurchaseOfferingRequest": { + "type": "structure", + "members": { + "Count": { + "shape": "__integerMin1", + "locationName": "count", + "documentation": "Number of resources" + }, + "Name": { + "shape": "__string", + "locationName": "name", + "documentation": "Name for the new reservation" + }, + "OfferingId": { + "shape": "__string", + "location": "uri", + "locationName": "offeringId", + "documentation": "Offering to purchase, e.g. '87654321'" + }, + "RequestId": { + "shape": "__string", + "locationName": "requestId", + "documentation": "Unique request ID to be specified. This is needed to prevent retries from creating multiple resources.", + "idempotencyToken": true + } + }, + "required": [ + "OfferingId" + ], + "documentation": "Placeholder documentation for PurchaseOfferingRequest" + }, + "PurchaseOfferingResponse": { + "type": "structure", + "members": { + "Reservation": { + "shape": "Reservation", + "locationName": "reservation" + } + }, + "documentation": "Placeholder documentation for PurchaseOfferingResponse" + }, + "PurchaseOfferingResultModel": { + "type": "structure", + "members": { + "Reservation": { + "shape": "Reservation", + "locationName": "reservation" + } + }, + "documentation": "PurchaseOffering response" + }, "RemixSettings": { "type": "structure", "members": { @@ -5663,6 +6602,210 @@ ], "documentation": "Placeholder documentation for RemixSettings" }, + "Reservation": { + "type": "structure", + "members": { + "Arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "Unique reservation ARN, e.g. 'arn:aws:medialive:us-west-2:123456789012:reservation:1234567'" + }, + "Count": { + "shape": "__integer", + "locationName": "count", + "documentation": "Number of reserved resources" + }, + "CurrencyCode": { + "shape": "__string", + "locationName": "currencyCode", + "documentation": "Currency code for usagePrice and fixedPrice in ISO-4217 format, e.g. 'USD'" + }, + "Duration": { + "shape": "__integer", + "locationName": "duration", + "documentation": "Lease duration, e.g. '12'" + }, + "DurationUnits": { + "shape": "OfferingDurationUnits", + "locationName": "durationUnits", + "documentation": "Units for duration, e.g. 'MONTHS'" + }, + "End": { + "shape": "__string", + "locationName": "end", + "documentation": "Reservation UTC end date and time in ISO-8601 format, e.g. '2019-03-01T00:00:00'" + }, + "FixedPrice": { + "shape": "__double", + "locationName": "fixedPrice", + "documentation": "One-time charge for each reserved resource, e.g. '0.0' for a NO_UPFRONT offering" + }, + "Name": { + "shape": "__string", + "locationName": "name", + "documentation": "User specified reservation name" + }, + "OfferingDescription": { + "shape": "__string", + "locationName": "offeringDescription", + "documentation": "Offering description, e.g. 'HD AVC output at 10-20 Mbps, 30 fps, and standard VQ in US West (Oregon)'" + }, + "OfferingId": { + "shape": "__string", + "locationName": "offeringId", + "documentation": "Unique offering ID, e.g. '87654321'" + }, + "OfferingType": { + "shape": "OfferingType", + "locationName": "offeringType", + "documentation": "Offering type, e.g. 'NO_UPFRONT'" + }, + "Region": { + "shape": "__string", + "locationName": "region", + "documentation": "AWS region, e.g. 'us-west-2'" + }, + "ReservationId": { + "shape": "__string", + "locationName": "reservationId", + "documentation": "Unique reservation ID, e.g. '1234567'" + }, + "ResourceSpecification": { + "shape": "ReservationResourceSpecification", + "locationName": "resourceSpecification", + "documentation": "Resource configuration details" + }, + "Start": { + "shape": "__string", + "locationName": "start", + "documentation": "Reservation UTC start date and time in ISO-8601 format, e.g. '2018-03-01T00:00:00'" + }, + "State": { + "shape": "ReservationState", + "locationName": "state", + "documentation": "Current state of reservation, e.g. 'ACTIVE'" + }, + "UsagePrice": { + "shape": "__double", + "locationName": "usagePrice", + "documentation": "Recurring usage charge for each reserved resource, e.g. '157.0'" + } + }, + "documentation": "Reserved resources available to use" + }, + "ReservationCodec": { + "type": "string", + "documentation": "Codec, 'MPEG2', 'AVC', 'HEVC', or 'AUDIO'", + "enum": [ + "MPEG2", + "AVC", + "HEVC", + "AUDIO" + ] + }, + "ReservationMaximumBitrate": { + "type": "string", + "documentation": "Maximum bitrate in megabits per second", + "enum": [ + "MAX_10_MBPS", + "MAX_20_MBPS", + "MAX_50_MBPS" + ] + }, + "ReservationMaximumFramerate": { + "type": "string", + "documentation": "Maximum framerate in frames per second (Outputs only)", + "enum": [ + "MAX_30_FPS", + "MAX_60_FPS" + ] + }, + "ReservationResolution": { + "type": "string", + "documentation": "Resolution based on lines of vertical resolution; SD is less than 720 lines, HD is 720 to 1080 lines, UHD is greater than 1080 lines\n", + "enum": [ + "SD", + "HD", + "UHD" + ] + }, + "ReservationResourceSpecification": { + "type": "structure", + "members": { + "Codec": { + "shape": "ReservationCodec", + "locationName": "codec", + "documentation": "Codec, e.g. 'AVC'" + }, + "MaximumBitrate": { + "shape": "ReservationMaximumBitrate", + "locationName": "maximumBitrate", + "documentation": "Maximum bitrate, e.g. 'MAX_20_MBPS'" + }, + "MaximumFramerate": { + "shape": "ReservationMaximumFramerate", + "locationName": "maximumFramerate", + "documentation": "Maximum framerate, e.g. 'MAX_30_FPS' (Outputs only)" + }, + "Resolution": { + "shape": "ReservationResolution", + "locationName": "resolution", + "documentation": "Resolution, e.g. 'HD'" + }, + "ResourceType": { + "shape": "ReservationResourceType", + "locationName": "resourceType", + "documentation": "Resource type, 'INPUT', 'OUTPUT', or 'CHANNEL'" + }, + "SpecialFeature": { + "shape": "ReservationSpecialFeature", + "locationName": "specialFeature", + "documentation": "Special feature, e.g. 'AUDIO_NORMALIZATION' (Channels only)" + }, + "VideoQuality": { + "shape": "ReservationVideoQuality", + "locationName": "videoQuality", + "documentation": "Video quality, e.g. 'STANDARD' (Outputs only)" + } + }, + "documentation": "Resource configuration (codec, resolution, bitrate, ...)" + }, + "ReservationResourceType": { + "type": "string", + "documentation": "Resource type, 'INPUT', 'OUTPUT', or 'CHANNEL'", + "enum": [ + "INPUT", + "OUTPUT", + "CHANNEL" + ] + }, + "ReservationSpecialFeature": { + "type": "string", + "documentation": "Special features, 'ADVANCED_AUDIO' or 'AUDIO_NORMALIZATION'", + "enum": [ + "ADVANCED_AUDIO", + "AUDIO_NORMALIZATION" + ] + }, + "ReservationState": { + "type": "string", + "documentation": "Current reservation state", + "enum": [ + "ACTIVE", + "EXPIRED", + "CANCELED", + "DELETED" + ] + }, + "ReservationVideoQuality": { + "type": "string", + "documentation": "Video quality, e.g. 'STANDARD' (Outputs only)", + "enum": [ + "STANDARD", + "ENHANCED", + "PREMIUM" + ] + }, "ResourceConflict": { "type": "structure", "members": { @@ -6035,6 +7178,11 @@ "shape": "InputSpecification", "locationName": "inputSpecification" }, + "LogLevel": { + "shape": "LogLevel", + "locationName": "logLevel", + "documentation": "The log level being written to CloudWatch Logs." + }, "Name": { "shape": "__string", "locationName": "name", @@ -6127,6 +7275,11 @@ "shape": "InputSpecification", "locationName": "inputSpecification" }, + "LogLevel": { + "shape": "LogLevel", + "locationName": "logLevel", + "documentation": "The log level being written to CloudWatch Logs." + }, "Name": { "shape": "__string", "locationName": "name", @@ -6337,6 +7490,11 @@ "locationName": "inputSpecification", "documentation": "Specification of input for this channel (max. bitrate, resolution, codec, etc.)" }, + "LogLevel": { + "shape": "LogLevel", + "locationName": "logLevel", + "documentation": "The log level to write to CloudWatch Logs." + }, "Name": { "shape": "__string", "locationName": "name", @@ -6378,6 +7536,11 @@ "locationName": "inputSpecification", "documentation": "Specification of input for this channel (max. bitrate, resolution, codec, etc.)" }, + "LogLevel": { + "shape": "LogLevel", + "locationName": "logLevel", + "documentation": "The log level to write to CloudWatch Logs." + }, "Name": { "shape": "__string", "locationName": "name", @@ -7080,6 +8243,13 @@ }, "documentation": "Placeholder documentation for __listOfInputWhitelistRuleCidr" }, + "__listOfOffering": { + "type": "list", + "member": { + "shape": "Offering" + }, + "documentation": "Placeholder documentation for __listOfOffering" + }, "__listOfOutput": { "type": "list", "member": { @@ -7108,6 +8278,13 @@ }, "documentation": "Placeholder documentation for __listOfOutputGroup" }, + "__listOfReservation": { + "type": "list", + "member": { + "shape": "Reservation" + }, + "documentation": "Placeholder documentation for __listOfReservation" + }, "__listOfValidationError": { "type": "list", "member": { diff --git a/botocore/data/mediatailor/2018-04-23/paginators-1.json b/botocore/data/mediatailor/2018-04-23/paginators-1.json new file mode 100644 index 00000000..ea142457 --- /dev/null +++ b/botocore/data/mediatailor/2018-04-23/paginators-1.json @@ -0,0 +1,3 @@ +{ + "pagination": {} +} diff --git a/botocore/data/mediatailor/2018-04-23/service-2.json b/botocore/data/mediatailor/2018-04-23/service-2.json new file mode 100644 index 00000000..5f3e6707 --- /dev/null +++ b/botocore/data/mediatailor/2018-04-23/service-2.json @@ -0,0 +1,320 @@ +{ + "metadata" : { + "apiVersion" : "2018-04-23", + "endpointPrefix" : "api.mediatailor", + "signingName" : "mediatailor", + "serviceFullName" : "AWS MediaTailor", + "serviceId" : "MediaTailor", + "protocol" : "rest-json", + "jsonVersion" : "1.1", + "uid" : "mediatailor-2018-04-23", + "signatureVersion" : "v4", + "serviceAbbreviation": "MediaTailor" + }, + "operations" : { + "DeletePlaybackConfiguration" : { + "name" : "DeletePlaybackConfiguration", + "http" : { + "method" : "DELETE", + "requestUri" : "/playbackConfiguration/{Name}", + "responseCode" : 204 + }, + "input" : { + "shape" : "DeletePlaybackConfigurationRequest" + }, + "output" : { + "shape" : "DeletePlaybackConfigurationResponse" + }, + "errors" : [ ], + "documentation" : "

Deletes the configuration for the specified name.

" + }, + "GetPlaybackConfiguration" : { + "name" : "GetPlaybackConfiguration", + "http" : { + "method" : "GET", + "requestUri" : "/playbackConfiguration/{Name}", + "responseCode" : 200 + }, + "input" : { + "shape" : "GetPlaybackConfigurationRequest" + }, + "output" : { + "shape" : "GetPlaybackConfigurationResponse" + }, + "errors" : [ ], + "documentation" : "

Returns the configuration for the specified name.

" + }, + "ListPlaybackConfigurations" : { + "name" : "ListPlaybackConfigurations", + "http" : { + "method" : "GET", + "requestUri" : "/playbackConfigurations", + "responseCode" : 200 + }, + "input" : { + "shape" : "ListPlaybackConfigurationsRequest" + }, + "output" : { + "shape" : "ListPlaybackConfigurationsResponse" + }, + "errors" : [ ], + "documentation" : "

Returns a list of the configurations defined in AWS Elemental MediaTailor. You can specify a max number of configurations to return at a time. The default max is 50. Results are returned in pagefuls. If AWS Elemental MediaTailor has more configurations than the specified max, it provides parameters in the response that you can use to retrieve the next pageful.

" + }, + "PutPlaybackConfiguration" : { + "name" : "PutPlaybackConfiguration", + "http" : { + "method" : "PUT", + "requestUri" : "/playbackConfiguration", + "responseCode" : 200 + }, + "input" : { + "shape" : "PutPlaybackConfigurationRequest" + }, + "output" : { + "shape" : "PutPlaybackConfigurationResponse" + }, + "errors" : [ ], + "documentation" : "

Adds a new configuration to AWS Elemental MediaTailor.

" + } + }, + "shapes" : { + "CdnConfiguration" : { + "type" : "structure", + "members" : { + "AdSegmentUrlPrefix" : { + "shape" : "__string", + "documentation" : "

A non-default content delivery network (CDN) to serve ad segments. By default, AWS Elemental MediaTailor uses Amazon CloudFront with default cache settings as its CDN for ad segments. To set up an alternate CDN, create a rule in your CDN for the following origin: ads.mediatailor.<region>.amazonaws.com. Then specify the rule's name in this AdSegmentUrlPrefix. When AWS Elemental MediaTailor serves a manifest, it reports your CDN as the source for ad segments.

" + }, + "ContentSegmentUrlPrefix" : { + "shape" : "__string", + "documentation" : "

A content delivery network (CDN) to cache content segments, so that content requests don’t always have to go to the origin server. First, create a rule in your CDN for the content segment origin server. Then specify the rule's name in this ContentSegmentUrlPrefix. When AWS Elemental MediaTailor serves a manifest, it reports your CDN as the source for content segments.

" + } + }, + "documentation" : "

The configuration for using a content delivery network (CDN), like Amazon CloudFront, for content and ad segment management.

" + }, + "HlsConfiguration": { + "type" : "structure", + "members" : { + "ManifestEndpointPrefix" : { + "shape" : "__string", + "documentation" : "

The URL that is used to initiate a playback session for devices that support Apple HLS. The session uses server-side reporting.

" + } + }, + "documentation" : "

The configuration for HLS content.

" + }, + "DeletePlaybackConfigurationRequest" : { + "type" : "structure", + "members" : { + "Name" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "Name", + "documentation" : "

The identifier for the configuration.

" + } + }, + "required" : [ "Name" ] + }, + "DeletePlaybackConfigurationResponse" : { + "type" : "structure", + "members" : { } + }, + "Empty" : { + "type" : "structure", + "members" : { } + }, + "GetPlaybackConfigurationRequest" : { + "type" : "structure", + "members" : { + "Name" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "Name", + "documentation" : "

The identifier for the configuration.

" + } + }, + "required" : [ "Name" ] + }, + "GetPlaybackConfigurationResponse" : { + "type" : "structure", + "members" : { + "AdDecisionServerUrl" : { + "shape" : "__string", + "documentation" : "

The URL for the ad decision server (ADS). This includes the specification of static parameters and placeholders for dynamic parameters. AWS Elemental MediaTailor substitutes player-specific and session-specific parameters as needed when calling the ADS. Alternately, for testing, you can provide a static VAST URL. The maximum length is 25000 characters.

" + }, + "CdnConfiguration" : { + "shape" : "CdnConfiguration", + "documentation" : "

The configuration for using a content delivery network (CDN), like Amazon CloudFront, for content and ad segment management.

" + }, + "HlsConfiguration" : { + "shape" : "HlsConfiguration", + "documentation" : "

The configuration for HLS content.

" + }, + "Name" : { + "shape" : "__string", + "documentation" : "

The identifier for the configuration.

" + }, + "PlaybackEndpointPrefix" : { + "shape" : "__string", + "documentation" : "

The URL that the player accesses to get a manifest from AWS Elemental MediaTailor. This session will use server-side reporting.

" + }, + "SessionInitializationEndpointPrefix" : { + "shape" : "__string", + "documentation" : "

The URL that the player uses to initialize a session that uses client-side reporting.

" + }, + "SlateAdUrl" : { + "shape" : "__string", + "documentation" : "

URL for a high-quality video asset to transcode and use to fill in time that's not used by ads. AWS Elemental MediaTailor shows the slate to fill in gaps in media content. Configuring the slate is optional for non-VPAID configurations. For VPAID, the slate is required because AWS Elemental MediaTailor provides it in the slots designated for dynamic ad content. The slate must be a high-quality asset that contains both audio and video.

" + }, + "VideoContentSourceUrl" : { + "shape" : "__string", + "documentation" : "

The URL prefix for the master playlist for the stream, minus the asset ID. The maximum length is 512 characters.

" + } + } + }, + "PlaybackConfiguration" : { + "type" : "structure", + "documentation": "

The AWSMediaTailor configuration.

", + "members" : { + "AdDecisionServerUrl" : { + "shape" : "__string", + "documentation" : "

The URL for the ad decision server (ADS). This includes the specification of static parameters and placeholders for dynamic parameters. AWS Elemental MediaTailor substitutes player-specific and session-specific parameters as needed when calling the ADS. Alternately, for testing, you can provide a static VAST URL. The maximum length is 25000 characters.

" + }, + "CdnConfiguration" : { + "shape" : "CdnConfiguration", + "documentation" : "

The configuration for using a content delivery network (CDN), like Amazon CloudFront, for content and ad segment management.

" + }, + "Name" : { + "shape" : "__string", + "documentation" : "

The identifier for the configuration.

" + }, + "SlateAdUrl" : { + "shape" : "__string", + "documentation" : "

URL for a high-quality video asset to transcode and use to fill in time that's not used by ads. AWS Elemental MediaTailor shows the slate to fill in gaps in media content. Configuring the slate is optional for non-VPAID configurations. For VPAID, the slate is required because AWS Elemental MediaTailor provides it in the slots designated for dynamic ad content. The slate must be a high-quality asset that contains both audio and video.

" + }, + "VideoContentSourceUrl" : { + "shape" : "__string", + "documentation" : "

The URL prefix for the master playlist for the stream, minus the asset ID. The maximum length is 512 characters.

" + } + } + }, + "ListPlaybackConfigurationsRequest" : { + "type" : "structure", + "members" : { + "MaxResults" : { + "shape" : "__integerMin1Max100", + "location" : "querystring", + "locationName" : "MaxResults", + "documentation" : "

Maximum number of records to return.

" + }, + "NextToken" : { + "shape" : "__string", + "location" : "querystring", + "locationName" : "NextToken", + "documentation" : "

Pagination token returned by the GET list request when results overrun the meximum allowed. Use the token to fetch the next page of results.

" + } + } + }, + "ListPlaybackConfigurationsResponse" : { + "type" : "structure", + "members" : { + "Items" : { + "shape" : "__listOfPlaybackConfigurations", + "documentation" : "

Array of playback configurations. This may be all of the available configurations or a subset, depending on the settings you provide and on the total number of configurations stored.

" + }, + "NextToken" : { + "shape" : "__string", + "documentation" : "

Pagination token returned by the GET list request when results overrun the meximum allowed. Use the token to fetch the next page of results.

" + } + } + }, + "PutPlaybackConfigurationRequest" : { + "type" : "structure", + "members" : { + "AdDecisionServerUrl" : { + "shape" : "__string", + "documentation" : "

The URL for the ad decision server (ADS). This includes the specification of static parameters and placeholders for dynamic parameters. AWS Elemental MediaTailor substitutes player-specific and session-specific parameters as needed when calling the ADS. Alternately, for testing you can provide a static VAST URL. The maximum length is 25000 characters.

" + }, + "CdnConfiguration" : { + "shape" : "CdnConfiguration", + "documentation" : "

The configuration for using a content delivery network (CDN), like Amazon CloudFront, for content and ad segment management.

" + }, + "Name" : { + "shape" : "__string", + "documentation" : "

The identifier for the configuration.

" + }, + "SlateAdUrl" : { + "shape" : "__string", + "documentation" : "

The URL for a high-quality video asset to transcode and use to fill in time that's not used by ads. AWS Elemental MediaTailor shows the slate to fill in gaps in media content. Configuring the slate is optional for non-VPAID configurations. For VPAID, the slate is required because AWS Elemental MediaTailor provides it in the slots that are designated for dynamic ad content. The slate must be a high-quality asset that contains both audio and video.

" + }, + "VideoContentSourceUrl" : { + "shape" : "__string", + "documentation" : "

The URL prefix for the master playlist for the stream, minus the asset ID. The maximum length is 512 characters.

" + } + } + }, + "PutPlaybackConfigurationResponse" : { + "type" : "structure", + "members" : { + "AdDecisionServerUrl" : { + "shape" : "__string", + "documentation" : "

The URL for the ad decision server (ADS). This includes the specification of static parameters and placeholders for dynamic parameters. AWS Elemental MediaTailor substitutes player-specific and session-specific parameters as needed when calling the ADS. Alternately, for testing, you can provide a static VAST URL. The maximum length is 25000 characters.

" + }, + "CdnConfiguration" : { + "shape" : "CdnConfiguration", + "documentation" : "

The configuration for using a content delivery network (CDN), like Amazon CloudFront, for content and ad segment management.

" + }, + "HlsConfiguration" : { + "shape" : "HlsConfiguration", + "documentation" : "

The configuration for HLS content.

" + }, + "Name" : { + "shape" : "__string", + "documentation" : "

The identifier for the configuration.

" + }, + "PlaybackEndpointPrefix" : { + "shape" : "__string", + "documentation" : "

The URL that the player accesses to get a manifest from AWS Elemental MediaTailor. This session will use server-side reporting.

" + }, + "SessionInitializationEndpointPrefix" : { + "shape" : "__string", + "documentation" : "

The URL that the player uses to initialize a session that uses client-side reporting.

" + }, + "SlateAdUrl" : { + "shape" : "__string", + "documentation" : "

URL for a high-quality video asset to transcode and use to fill in time that's not used by ads. AWS Elemental MediaTailor shows the slate to fill in gaps in media content. Configuring the slate is optional for non-VPAID configurations. For VPAID, the slate is required because AWS Elemental MediaTailor provides it in the slots designated for dynamic ad content. The slate must be a high-quality asset that contains both audio and video.

" + }, + "VideoContentSourceUrl" : { + "shape" : "__string", + "documentation" : "

The URL prefix for the master playlist for the stream, minus the asset ID. The maximum length is 512 characters.

" + } + } + }, + "__boolean" : { + "type" : "boolean" + }, + "__double" : { + "type" : "double" + }, + "__integer" : { + "type" : "integer" + }, + "__listOfPlaybackConfigurations" : { + "type" : "list", + "member": { + "shape" : "PlaybackConfiguration" + } + }, + "__long" : { + "type" : "long" + }, + "__string" : { + "type" : "string" + }, + "__integerMin1Max100" : { + "type": "integer", + "min": 1, + "max": 100 + } + }, + "documentation" : "

Use the AWS Elemental MediaTailor SDK to configure scalable ad insertion for your live and VOD content. With AWS Elemental MediaTailor, you can serve targeted ads to viewers while maintaining broadcast quality in over-the-top (OTT) video applications. For information about using the service, including detailed information about the settings covered in this guide, see the AWS Elemental MediaTailor User Guide.

Through the SDK, you manage AWS Elemental MediaTailor configurations the same as you do through the console. For example, you specify ad insertion behavior and mapping information for the origin server and the ad decision server (ADS).

" +} diff --git a/botocore/data/meteringmarketplace/2016-01-14/service-2.json b/botocore/data/meteringmarketplace/2016-01-14/service-2.json index b6f34128..87ad4a2a 100644 --- a/botocore/data/meteringmarketplace/2016-01-14/service-2.json +++ b/botocore/data/meteringmarketplace/2016-01-14/service-2.json @@ -7,6 +7,7 @@ "jsonVersion":"1.1", "protocol":"json", "serviceFullName":"AWSMarketplace Metering", + "serviceId":"Marketplace Metering", "signatureVersion":"v4", "signingName":"aws-marketplace", "targetPrefix":"AWSMPMeteringService" diff --git a/botocore/data/mgh/2017-05-31/service-2.json b/botocore/data/mgh/2017-05-31/service-2.json index 74eb2aa8..68e2b5a2 100644 --- a/botocore/data/mgh/2017-05-31/service-2.json +++ b/botocore/data/mgh/2017-05-31/service-2.json @@ -6,6 +6,7 @@ "jsonVersion":"1.1", "protocol":"json", "serviceFullName":"AWS Migration Hub", + "serviceId":"Migration Hub", "signatureVersion":"v4", "targetPrefix":"AWSMigrationHub", "uid":"AWSMigrationHub-2017-05-31" @@ -303,7 +304,7 @@ {"shape":"InvalidInputException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Provides identifying details of the resource being migrated so that it can be associated in the Application Discovery Service (ADS)'s repository. This association occurs asynchronously after PutResourceAttributes returns.

  • Keep in mind that subsequent calls to PutResourceAttributes will override previously stored attributes. For example, if it is first called with a MAC address, but later, it is desired to add an IP address, it will then be required to call it with both the IP and MAC addresses to prevent overiding the MAC address.

  • Note the instructions regarding the special use case of the ResourceAttributeList parameter when specifying any \"VM\" related value.

Because this is an asynchronous call, it will always return 200, whether an association occurs or not. To confirm if an association was found based on the provided details, call ListDiscoveredResources.

" + "documentation":"

Provides identifying details of the resource being migrated so that it can be associated in the Application Discovery Service (ADS)'s repository. This association occurs asynchronously after PutResourceAttributes returns.

  • Keep in mind that subsequent calls to PutResourceAttributes will override previously stored attributes. For example, if it is first called with a MAC address, but later, it is desired to add an IP address, it will then be required to call it with both the IP and MAC addresses to prevent overiding the MAC address.

  • Note the instructions regarding the special use case of the ResourceAttributeList parameter when specifying any \"VM\" related value.

Because this is an asynchronous call, it will always return 200, whether an association occurs or not. To confirm if an association was found based on the provided details, call ListDiscoveredResources.

" } }, "shapes":{ @@ -995,7 +996,7 @@ }, "ResourceAttributeList":{ "shape":"ResourceAttributeList", - "documentation":"

Information about the resource that is being migrated. This data will be used to map the task to a resource in the Application Discovery Service (ADS)'s repository.

In the ResourceAttribute object array, the Type field is reserved for the following values: IPV4_ADDRESS | IPV6_ADDRESS | MAC_ADDRESS | FQDN | VM_MANAGER_ID | VM_MANAGED_OBJECT_REFERENCE | VM_NAME | VM_PATH | BIOS_ID | MOTHERBOARD_SERIAL_NUMBER, and the identifying value can be a string up to 256 characters.

If any \"VM\" related value is used for a ResourceAttribute object, it is required that VM_MANAGER_ID, as a minimum, is always used. If it is not used, the server will not be associated in the Application Discovery Service (ADS)'s repository using any of the other \"VM\" related values, and you will experience data loss. See the Example section below for a use case of specifying \"VM\" related values.

" + "documentation":"

Information about the resource that is being migrated. This data will be used to map the task to a resource in the Application Discovery Service (ADS)'s repository.

Takes the object array of ResourceAttribute where the Type field is reserved for the following values: IPV4_ADDRESS | IPV6_ADDRESS | MAC_ADDRESS | FQDN | VM_MANAGER_ID | VM_MANAGED_OBJECT_REFERENCE | VM_NAME | VM_PATH | BIOS_ID | MOTHERBOARD_SERIAL_NUMBER where the identifying value can be a string up to 256 characters.

  • If any \"VM\" related value is set for a ResourceAttribute object, it is required that VM_MANAGER_ID, as a minimum, is always set. If VM_MANAGER_ID is not set, then all \"VM\" fields will be discarded and \"VM\" fields will not be used for matching the migration task to a server in Application Discovery Service (ADS)'s repository. See the Example section below for a use case of specifying \"VM\" related values.

  • If a server you are trying to match has multiple IP or MAC addresses, you should provide as many as you know in separate type/value pairs passed to the ResourceAttributeList parameter to maximize the chances of matching.

" }, "DryRun":{ "shape":"DryRun", diff --git a/botocore/data/mobile/2017-07-01/service-2.json b/botocore/data/mobile/2017-07-01/service-2.json index 8eee00d5..b588798e 100644 --- a/botocore/data/mobile/2017-07-01/service-2.json +++ b/botocore/data/mobile/2017-07-01/service-2.json @@ -6,6 +6,7 @@ "jsonVersion":"1.1", "protocol":"rest-json", "serviceFullName":"AWS Mobile", + "serviceId":"Mobile", "signatureVersion":"v4", "signingName":"AWSMobileHubService", "uid":"mobile-2017-07-01" diff --git a/botocore/data/neptune/2014-10-31/paginators-1.json b/botocore/data/neptune/2014-10-31/paginators-1.json new file mode 100644 index 00000000..f1a247f7 --- /dev/null +++ b/botocore/data/neptune/2014-10-31/paginators-1.json @@ -0,0 +1,58 @@ +{ + "pagination": { + "DescribeDBEngineVersions": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "DBEngineVersions" + }, + "DescribeDBInstances": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "DBInstances" + }, + "DescribeDBParameterGroups": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "DBParameterGroups" + }, + "DescribeDBParameters": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "Parameters" + }, + "DescribeDBSubnetGroups": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "DBSubnetGroups" + }, + "DescribeEngineDefaultParameters": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "EngineDefaults.Marker", + "result_key": "EngineDefaults.Parameters" + }, + "DescribeEventSubscriptions": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "EventSubscriptionsList" + }, + "DescribeEvents": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "Events" + }, + "DescribeOrderableDBInstanceOptions": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "OrderableDBInstanceOptions" + } + } +} diff --git a/botocore/data/neptune/2014-10-31/service-2.json b/botocore/data/neptune/2014-10-31/service-2.json new file mode 100644 index 00000000..bb7d1563 --- /dev/null +++ b/botocore/data/neptune/2014-10-31/service-2.json @@ -0,0 +1,5513 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2014-10-31", + "endpointPrefix":"rds", + "protocol":"query", + "serviceAbbreviation":"Amazon Neptune", + "serviceFullName":"Amazon Neptune", + "serviceId":"Neptune", + "signatureVersion":"v4", + "signingName":"rds", + "uid":"neptune-2014-10-31", + "xmlNamespace":"http://rds.amazonaws.com/doc/2014-10-31/" + }, + "operations":{ + "AddRoleToDBCluster":{ + "name":"AddRoleToDBCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddRoleToDBClusterMessage"}, + "errors":[ + {"shape":"DBClusterNotFoundFault"}, + {"shape":"DBClusterRoleAlreadyExistsFault"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"DBClusterRoleQuotaExceededFault"} + ], + "documentation":"

Associates an Identity and Access Management (IAM) role from an Neptune DB cluster.

" + }, + "AddSourceIdentifierToSubscription":{ + "name":"AddSourceIdentifierToSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddSourceIdentifierToSubscriptionMessage"}, + "output":{ + "shape":"AddSourceIdentifierToSubscriptionResult", + "resultWrapper":"AddSourceIdentifierToSubscriptionResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"SourceNotFoundFault"} + ], + "documentation":"

Adds a source identifier to an existing event notification subscription.

" + }, + "AddTagsToResource":{ + "name":"AddTagsToResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddTagsToResourceMessage"}, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"}, + {"shape":"DBClusterNotFoundFault"} + ], + "documentation":"

Adds metadata tags to an Amazon Neptune resource. These tags can also be used with cost allocation reporting to track cost associated with Amazon Neptune resources, or used in a Condition statement in an IAM policy for Amazon Neptune.

" + }, + "ApplyPendingMaintenanceAction":{ + "name":"ApplyPendingMaintenanceAction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ApplyPendingMaintenanceActionMessage"}, + "output":{ + "shape":"ApplyPendingMaintenanceActionResult", + "resultWrapper":"ApplyPendingMaintenanceActionResult" + }, + "errors":[ + {"shape":"ResourceNotFoundFault"} + ], + "documentation":"

Applies a pending maintenance action to a resource (for example, to a DB instance).

" + }, + "CopyDBClusterParameterGroup":{ + "name":"CopyDBClusterParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopyDBClusterParameterGroupMessage"}, + "output":{ + "shape":"CopyDBClusterParameterGroupResult", + "resultWrapper":"CopyDBClusterParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"DBParameterGroupQuotaExceededFault"}, + {"shape":"DBParameterGroupAlreadyExistsFault"} + ], + "documentation":"

Copies the specified DB cluster parameter group.

" + }, + "CopyDBClusterSnapshot":{ + "name":"CopyDBClusterSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopyDBClusterSnapshotMessage"}, + "output":{ + "shape":"CopyDBClusterSnapshotResult", + "resultWrapper":"CopyDBClusterSnapshotResult" + }, + "errors":[ + {"shape":"DBClusterSnapshotAlreadyExistsFault"}, + {"shape":"DBClusterSnapshotNotFoundFault"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"InvalidDBClusterSnapshotStateFault"}, + {"shape":"SnapshotQuotaExceededFault"}, + {"shape":"KMSKeyNotAccessibleFault"} + ], + "documentation":"

Copies a snapshot of a DB cluster.

To copy a DB cluster snapshot from a shared manual DB cluster snapshot, SourceDBClusterSnapshotIdentifier must be the Amazon Resource Name (ARN) of the shared DB cluster snapshot.

You can copy an encrypted DB cluster snapshot from another AWS Region. In that case, the AWS Region where you call the CopyDBClusterSnapshot action is the destination AWS Region for the encrypted DB cluster snapshot to be copied to. To copy an encrypted DB cluster snapshot from another AWS Region, you must provide the following values:

  • KmsKeyId - The AWS Key Management System (AWS KMS) key identifier for the key to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region.

  • PreSignedUrl - A URL that contains a Signature Version 4 signed request for the CopyDBClusterSnapshot action to be called in the source AWS Region where the DB cluster snapshot is copied from. The pre-signed URL must be a valid request for the CopyDBClusterSnapshot API action that can be executed in the source AWS Region that contains the encrypted DB cluster snapshot to be copied.

    The pre-signed URL request must contain the following parameter values:

    • KmsKeyId - The KMS key identifier for the key to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region. This is the same identifier for both the CopyDBClusterSnapshot action that is called in the destination AWS Region, and the action contained in the pre-signed URL.

    • DestinationRegion - The name of the AWS Region that the DB cluster snapshot will be created in.

    • SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source AWS Region. For example, if you are copying an encrypted DB cluster snapshot from the us-west-2 AWS Region, then your SourceDBClusterSnapshotIdentifier looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:neptune-cluster1-snapshot-20161115.

    To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process.

  • TargetDBClusterSnapshotIdentifier - The identifier for the new copy of the DB cluster snapshot in the destination AWS Region.

  • SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. This identifier must be in the ARN format for the source AWS Region and is the same value as the SourceDBClusterSnapshotIdentifier in the pre-signed URL.

To cancel the copy operation once it is in progress, delete the target DB cluster snapshot identified by TargetDBClusterSnapshotIdentifier while that DB cluster snapshot is in \"copying\" status.

" + }, + "CopyDBParameterGroup":{ + "name":"CopyDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CopyDBParameterGroupMessage"}, + "output":{ + "shape":"CopyDBParameterGroupResult", + "resultWrapper":"CopyDBParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"DBParameterGroupAlreadyExistsFault"}, + {"shape":"DBParameterGroupQuotaExceededFault"} + ], + "documentation":"

Copies the specified DB parameter group.

" + }, + "CreateDBCluster":{ + "name":"CreateDBCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBClusterMessage"}, + "output":{ + "shape":"CreateDBClusterResult", + "resultWrapper":"CreateDBClusterResult" + }, + "errors":[ + {"shape":"DBClusterAlreadyExistsFault"}, + {"shape":"InsufficientStorageClusterCapacityFault"}, + {"shape":"DBClusterQuotaExceededFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"InvalidDBSubnetGroupStateFault"}, + {"shape":"InvalidSubnet"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBClusterParameterGroupNotFoundFault"}, + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"DBClusterNotFoundFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"} + ], + "documentation":"

Creates a new Amazon Neptune DB cluster.

You can use the ReplicationSourceIdentifier parameter to create the DB cluster as a Read Replica of another DB cluster or Amazon Neptune DB instance. For cross-region replication where the DB cluster identified by ReplicationSourceIdentifier is encrypted, you must also specify the PreSignedUrl parameter.

" + }, + "CreateDBClusterParameterGroup":{ + "name":"CreateDBClusterParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBClusterParameterGroupMessage"}, + "output":{ + "shape":"CreateDBClusterParameterGroupResult", + "resultWrapper":"CreateDBClusterParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupQuotaExceededFault"}, + {"shape":"DBParameterGroupAlreadyExistsFault"} + ], + "documentation":"

Creates a new DB cluster parameter group.

Parameters in a DB cluster parameter group apply to all of the instances in a DB cluster.

A DB cluster parameter group is initially created with the default parameters for the database engine used by instances in the DB cluster. To provide custom values for any of the parameters, you must modify the group after creating it using ModifyDBClusterParameterGroup. Once you've created a DB cluster parameter group, you need to associate it with your DB cluster using ModifyDBCluster. When you associate a new DB cluster parameter group with a running DB cluster, you need to reboot the DB instances in the DB cluster without failover for the new DB cluster parameter group and associated settings to take effect.

After you create a DB cluster parameter group, you should wait at least 5 minutes before creating your first DB cluster that uses that DB cluster parameter group as the default parameter group. This allows Amazon Neptune to fully complete the create action before the DB cluster parameter group is used as the default for a new DB cluster. This is especially important for parameters that are critical when creating the default database for a DB cluster, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon Neptune console or the DescribeDBClusterParameters command to verify that your DB cluster parameter group has been created or modified.

" + }, + "CreateDBClusterSnapshot":{ + "name":"CreateDBClusterSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBClusterSnapshotMessage"}, + "output":{ + "shape":"CreateDBClusterSnapshotResult", + "resultWrapper":"CreateDBClusterSnapshotResult" + }, + "errors":[ + {"shape":"DBClusterSnapshotAlreadyExistsFault"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"DBClusterNotFoundFault"}, + {"shape":"SnapshotQuotaExceededFault"}, + {"shape":"InvalidDBClusterSnapshotStateFault"} + ], + "documentation":"

Creates a snapshot of a DB cluster.

" + }, + "CreateDBInstance":{ + "name":"CreateDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBInstanceMessage"}, + "output":{ + "shape":"CreateDBInstanceResult", + "resultWrapper":"CreateDBInstanceResult" + }, + "errors":[ + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"InstanceQuotaExceededFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"InvalidSubnet"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"DBClusterNotFoundFault"}, + {"shape":"StorageTypeNotSupportedFault"}, + {"shape":"AuthorizationNotFoundFault"}, + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"DomainNotFoundFault"} + ], + "documentation":"

Creates a new DB instance.

" + }, + "CreateDBParameterGroup":{ + "name":"CreateDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBParameterGroupMessage"}, + "output":{ + "shape":"CreateDBParameterGroupResult", + "resultWrapper":"CreateDBParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupQuotaExceededFault"}, + {"shape":"DBParameterGroupAlreadyExistsFault"} + ], + "documentation":"

Creates a new DB parameter group.

A DB parameter group is initially created with the default parameters for the database engine used by the DB instance. To provide custom values for any of the parameters, you must modify the group after creating it using ModifyDBParameterGroup. Once you've created a DB parameter group, you need to associate it with your DB instance using ModifyDBInstance. When you associate a new DB parameter group with a running DB instance, you need to reboot the DB instance without failover for the new DB parameter group and associated settings to take effect.

After you create a DB parameter group, you should wait at least 5 minutes before creating your first DB instance that uses that DB parameter group as the default parameter group. This allows Amazon Neptune to fully complete the create action before the parameter group is used as the default for a new DB instance. This is especially important for parameters that are critical when creating the default database for a DB instance, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon Neptune console or the DescribeDBParameters command to verify that your DB parameter group has been created or modified.

" + }, + "CreateDBSubnetGroup":{ + "name":"CreateDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBSubnetGroupMessage"}, + "output":{ + "shape":"CreateDBSubnetGroupResult", + "resultWrapper":"CreateDBSubnetGroupResult" + }, + "errors":[ + {"shape":"DBSubnetGroupAlreadyExistsFault"}, + {"shape":"DBSubnetGroupQuotaExceededFault"}, + {"shape":"DBSubnetQuotaExceededFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"} + ], + "documentation":"

Creates a new DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the AWS Region.

" + }, + "CreateEventSubscription":{ + "name":"CreateEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateEventSubscriptionMessage"}, + "output":{ + "shape":"CreateEventSubscriptionResult", + "resultWrapper":"CreateEventSubscriptionResult" + }, + "errors":[ + {"shape":"EventSubscriptionQuotaExceededFault"}, + {"shape":"SubscriptionAlreadyExistFault"}, + {"shape":"SNSInvalidTopicFault"}, + {"shape":"SNSNoAuthorizationFault"}, + {"shape":"SNSTopicArnNotFoundFault"}, + {"shape":"SubscriptionCategoryNotFoundFault"}, + {"shape":"SourceNotFoundFault"} + ], + "documentation":"

Creates an event notification subscription. This action requires a topic ARN (Amazon Resource Name) created by either the Neptune console, the SNS console, or the SNS API. To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console.

You can specify the type of source (SourceType) you want to be notified of, provide a list of Neptune sources (SourceIds) that triggers the events, and provide a list of event categories (EventCategories) for events you want to be notified of. For example, you can specify SourceType = db-instance, SourceIds = mydbinstance1, mydbinstance2 and EventCategories = Availability, Backup.

If you specify both the SourceType and SourceIds, such as SourceType = db-instance and SourceIdentifier = myDBInstance1, you are notified of all the db-instance events for the specified source. If you specify a SourceType but do not specify a SourceIdentifier, you receive notice of the events for that source type for all your Neptune sources. If you do not specify either the SourceType nor the SourceIdentifier, you are notified of events generated from all Neptune sources belonging to your customer account.

" + }, + "DeleteDBCluster":{ + "name":"DeleteDBCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBClusterMessage"}, + "output":{ + "shape":"DeleteDBClusterResult", + "resultWrapper":"DeleteDBClusterResult" + }, + "errors":[ + {"shape":"DBClusterNotFoundFault"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"DBClusterSnapshotAlreadyExistsFault"}, + {"shape":"SnapshotQuotaExceededFault"}, + {"shape":"InvalidDBClusterSnapshotStateFault"} + ], + "documentation":"

The DeleteDBCluster action deletes a previously provisioned DB cluster. When you delete a DB cluster, all automated backups for that DB cluster are deleted and can't be recovered. Manual DB cluster snapshots of the specified DB cluster are not deleted.

" + }, + "DeleteDBClusterParameterGroup":{ + "name":"DeleteDBClusterParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBClusterParameterGroupMessage"}, + "errors":[ + {"shape":"InvalidDBParameterGroupStateFault"}, + {"shape":"DBParameterGroupNotFoundFault"} + ], + "documentation":"

Deletes a specified DB cluster parameter group. The DB cluster parameter group to be deleted can't be associated with any DB clusters.

" + }, + "DeleteDBClusterSnapshot":{ + "name":"DeleteDBClusterSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBClusterSnapshotMessage"}, + "output":{ + "shape":"DeleteDBClusterSnapshotResult", + "resultWrapper":"DeleteDBClusterSnapshotResult" + }, + "errors":[ + {"shape":"InvalidDBClusterSnapshotStateFault"}, + {"shape":"DBClusterSnapshotNotFoundFault"} + ], + "documentation":"

Deletes a DB cluster snapshot. If the snapshot is being copied, the copy operation is terminated.

The DB cluster snapshot must be in the available state to be deleted.

" + }, + "DeleteDBInstance":{ + "name":"DeleteDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBInstanceMessage"}, + "output":{ + "shape":"DeleteDBInstanceResult", + "resultWrapper":"DeleteDBInstanceResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBSnapshotAlreadyExistsFault"}, + {"shape":"SnapshotQuotaExceededFault"}, + {"shape":"InvalidDBClusterStateFault"} + ], + "documentation":"

The DeleteDBInstance action deletes a previously provisioned DB instance. When you delete a DB instance, all automated backups for that instance are deleted and can't be recovered. Manual DB snapshots of the DB instance to be deleted by DeleteDBInstance are not deleted.

If you request a final DB snapshot the status of the Amazon Neptune DB instance is deleting until the DB snapshot is created. The API action DescribeDBInstance is used to monitor the status of this operation. The action can't be canceled or reverted once submitted.

Note that when a DB instance is in a failure state and has a status of failed, incompatible-restore, or incompatible-network, you can only delete it when the SkipFinalSnapshot parameter is set to true.

If the specified DB instance is part of a DB cluster, you can't delete the DB instance if both of the following conditions are true:

  • The DB cluster is a Read Replica of another DB cluster.

  • The DB instance is the only instance in the DB cluster.

To delete a DB instance in this case, first call the PromoteReadReplicaDBCluster API action to promote the DB cluster so it's no longer a Read Replica. After the promotion completes, then call the DeleteDBInstance API action to delete the final instance in the DB cluster.

" + }, + "DeleteDBParameterGroup":{ + "name":"DeleteDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBParameterGroupMessage"}, + "errors":[ + {"shape":"InvalidDBParameterGroupStateFault"}, + {"shape":"DBParameterGroupNotFoundFault"} + ], + "documentation":"

Deletes a specified DBParameterGroup. The DBParameterGroup to be deleted can't be associated with any DB instances.

" + }, + "DeleteDBSubnetGroup":{ + "name":"DeleteDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBSubnetGroupMessage"}, + "errors":[ + {"shape":"InvalidDBSubnetGroupStateFault"}, + {"shape":"InvalidDBSubnetStateFault"}, + {"shape":"DBSubnetGroupNotFoundFault"} + ], + "documentation":"

Deletes a DB subnet group.

The specified database subnet group must not be associated with any DB instances.

" + }, + "DeleteEventSubscription":{ + "name":"DeleteEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteEventSubscriptionMessage"}, + "output":{ + "shape":"DeleteEventSubscriptionResult", + "resultWrapper":"DeleteEventSubscriptionResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"InvalidEventSubscriptionStateFault"} + ], + "documentation":"

Deletes an event notification subscription.

" + }, + "DescribeDBClusterParameterGroups":{ + "name":"DescribeDBClusterParameterGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBClusterParameterGroupsMessage"}, + "output":{ + "shape":"DBClusterParameterGroupsMessage", + "resultWrapper":"DescribeDBClusterParameterGroupsResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"} + ], + "documentation":"

Returns a list of DBClusterParameterGroup descriptions. If a DBClusterParameterGroupName parameter is specified, the list will contain only the description of the specified DB cluster parameter group.

" + }, + "DescribeDBClusterParameters":{ + "name":"DescribeDBClusterParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBClusterParametersMessage"}, + "output":{ + "shape":"DBClusterParameterGroupDetails", + "resultWrapper":"DescribeDBClusterParametersResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"} + ], + "documentation":"

Returns the detailed parameter list for a particular DB cluster parameter group.

" + }, + "DescribeDBClusterSnapshotAttributes":{ + "name":"DescribeDBClusterSnapshotAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBClusterSnapshotAttributesMessage"}, + "output":{ + "shape":"DescribeDBClusterSnapshotAttributesResult", + "resultWrapper":"DescribeDBClusterSnapshotAttributesResult" + }, + "errors":[ + {"shape":"DBClusterSnapshotNotFoundFault"} + ], + "documentation":"

Returns a list of DB cluster snapshot attribute names and values for a manual DB cluster snapshot.

When sharing snapshots with other AWS accounts, DescribeDBClusterSnapshotAttributes returns the restore attribute and a list of IDs for the AWS accounts that are authorized to copy or restore the manual DB cluster snapshot. If all is included in the list of values for the restore attribute, then the manual DB cluster snapshot is public and can be copied or restored by all AWS accounts.

To add or remove access for an AWS account to copy or restore a manual DB cluster snapshot, or to make the manual DB cluster snapshot public or private, use the ModifyDBClusterSnapshotAttribute API action.

" + }, + "DescribeDBClusterSnapshots":{ + "name":"DescribeDBClusterSnapshots", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBClusterSnapshotsMessage"}, + "output":{ + "shape":"DBClusterSnapshotMessage", + "resultWrapper":"DescribeDBClusterSnapshotsResult" + }, + "errors":[ + {"shape":"DBClusterSnapshotNotFoundFault"} + ], + "documentation":"

Returns information about DB cluster snapshots. This API action supports pagination.

" + }, + "DescribeDBClusters":{ + "name":"DescribeDBClusters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBClustersMessage"}, + "output":{ + "shape":"DBClusterMessage", + "resultWrapper":"DescribeDBClustersResult" + }, + "errors":[ + {"shape":"DBClusterNotFoundFault"} + ], + "documentation":"

Returns information about provisioned DB clusters. This API supports pagination.

" + }, + "DescribeDBEngineVersions":{ + "name":"DescribeDBEngineVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBEngineVersionsMessage"}, + "output":{ + "shape":"DBEngineVersionMessage", + "resultWrapper":"DescribeDBEngineVersionsResult" + }, + "documentation":"

Returns a list of the available DB engines.

" + }, + "DescribeDBInstances":{ + "name":"DescribeDBInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBInstancesMessage"}, + "output":{ + "shape":"DBInstanceMessage", + "resultWrapper":"DescribeDBInstancesResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"} + ], + "documentation":"

Returns information about provisioned instances. This API supports pagination.

" + }, + "DescribeDBParameterGroups":{ + "name":"DescribeDBParameterGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBParameterGroupsMessage"}, + "output":{ + "shape":"DBParameterGroupsMessage", + "resultWrapper":"DescribeDBParameterGroupsResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"} + ], + "documentation":"

Returns a list of DBParameterGroup descriptions. If a DBParameterGroupName is specified, the list will contain only the description of the specified DB parameter group.

" + }, + "DescribeDBParameters":{ + "name":"DescribeDBParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBParametersMessage"}, + "output":{ + "shape":"DBParameterGroupDetails", + "resultWrapper":"DescribeDBParametersResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"} + ], + "documentation":"

Returns the detailed parameter list for a particular DB parameter group.

" + }, + "DescribeDBSubnetGroups":{ + "name":"DescribeDBSubnetGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBSubnetGroupsMessage"}, + "output":{ + "shape":"DBSubnetGroupMessage", + "resultWrapper":"DescribeDBSubnetGroupsResult" + }, + "errors":[ + {"shape":"DBSubnetGroupNotFoundFault"} + ], + "documentation":"

Returns a list of DBSubnetGroup descriptions. If a DBSubnetGroupName is specified, the list will contain only the descriptions of the specified DBSubnetGroup.

For an overview of CIDR ranges, go to the Wikipedia Tutorial.

" + }, + "DescribeEngineDefaultClusterParameters":{ + "name":"DescribeEngineDefaultClusterParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEngineDefaultClusterParametersMessage"}, + "output":{ + "shape":"DescribeEngineDefaultClusterParametersResult", + "resultWrapper":"DescribeEngineDefaultClusterParametersResult" + }, + "documentation":"

Returns the default engine and system parameter information for the cluster database engine.

" + }, + "DescribeEngineDefaultParameters":{ + "name":"DescribeEngineDefaultParameters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEngineDefaultParametersMessage"}, + "output":{ + "shape":"DescribeEngineDefaultParametersResult", + "resultWrapper":"DescribeEngineDefaultParametersResult" + }, + "documentation":"

Returns the default engine and system parameter information for the specified database engine.

" + }, + "DescribeEventCategories":{ + "name":"DescribeEventCategories", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventCategoriesMessage"}, + "output":{ + "shape":"EventCategoriesMessage", + "resultWrapper":"DescribeEventCategoriesResult" + }, + "documentation":"

Displays a list of categories for all event source types, or, if specified, for a specified source type.

" + }, + "DescribeEventSubscriptions":{ + "name":"DescribeEventSubscriptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventSubscriptionsMessage"}, + "output":{ + "shape":"EventSubscriptionsMessage", + "resultWrapper":"DescribeEventSubscriptionsResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"} + ], + "documentation":"

Lists all the subscription descriptions for a customer account. The description for a subscription includes SubscriptionName, SNSTopicARN, CustomerID, SourceType, SourceID, CreationTime, and Status.

If you specify a SubscriptionName, lists the description for that subscription.

" + }, + "DescribeEvents":{ + "name":"DescribeEvents", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventsMessage"}, + "output":{ + "shape":"EventsMessage", + "resultWrapper":"DescribeEventsResult" + }, + "documentation":"

Returns events related to DB instances, DB security groups, DB snapshots, and DB parameter groups for the past 14 days. Events specific to a particular DB instance, DB security group, database snapshot, or DB parameter group can be obtained by providing the name as a parameter. By default, the past hour of events are returned.

" + }, + "DescribeOrderableDBInstanceOptions":{ + "name":"DescribeOrderableDBInstanceOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOrderableDBInstanceOptionsMessage"}, + "output":{ + "shape":"OrderableDBInstanceOptionsMessage", + "resultWrapper":"DescribeOrderableDBInstanceOptionsResult" + }, + "documentation":"

Returns a list of orderable DB instance options for the specified engine.

" + }, + "DescribePendingMaintenanceActions":{ + "name":"DescribePendingMaintenanceActions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribePendingMaintenanceActionsMessage"}, + "output":{ + "shape":"PendingMaintenanceActionsMessage", + "resultWrapper":"DescribePendingMaintenanceActionsResult" + }, + "errors":[ + {"shape":"ResourceNotFoundFault"} + ], + "documentation":"

Returns a list of resources (for example, DB instances) that have at least one pending maintenance action.

" + }, + "DescribeValidDBInstanceModifications":{ + "name":"DescribeValidDBInstanceModifications", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeValidDBInstanceModificationsMessage"}, + "output":{ + "shape":"DescribeValidDBInstanceModificationsResult", + "resultWrapper":"DescribeValidDBInstanceModificationsResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"InvalidDBInstanceStateFault"} + ], + "documentation":"

You can call DescribeValidDBInstanceModifications to learn what modifications you can make to your DB instance. You can use this information when you call ModifyDBInstance.

" + }, + "FailoverDBCluster":{ + "name":"FailoverDBCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"FailoverDBClusterMessage"}, + "output":{ + "shape":"FailoverDBClusterResult", + "resultWrapper":"FailoverDBClusterResult" + }, + "errors":[ + {"shape":"DBClusterNotFoundFault"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"InvalidDBInstanceStateFault"} + ], + "documentation":"

Forces a failover for a DB cluster.

A failover for a DB cluster promotes one of the Read Replicas (read-only instances) in the DB cluster to be the primary instance (the cluster writer).

Amazon Neptune will automatically fail over to a Read Replica, if one exists, when the primary instance fails. You can force a failover when you want to simulate a failure of a primary instance for testing. Because each instance in a DB cluster has its own endpoint address, you will need to clean up and re-establish any existing connections that use those endpoint addresses when the failover is complete.

" + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceMessage"}, + "output":{ + "shape":"TagListMessage", + "resultWrapper":"ListTagsForResourceResult" + }, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"}, + {"shape":"DBClusterNotFoundFault"} + ], + "documentation":"

Lists all tags on an Amazon Neptune resource.

" + }, + "ModifyDBCluster":{ + "name":"ModifyDBCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBClusterMessage"}, + "output":{ + "shape":"ModifyDBClusterResult", + "resultWrapper":"ModifyDBClusterResult" + }, + "errors":[ + {"shape":"DBClusterNotFoundFault"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidDBSubnetGroupStateFault"}, + {"shape":"InvalidSubnet"}, + {"shape":"DBClusterParameterGroupNotFoundFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBClusterAlreadyExistsFault"} + ], + "documentation":"

Modify a setting for a DB cluster. You can change one or more database configuration parameters by specifying these parameters and the new values in the request.

" + }, + "ModifyDBClusterParameterGroup":{ + "name":"ModifyDBClusterParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBClusterParameterGroupMessage"}, + "output":{ + "shape":"DBClusterParameterGroupNameMessage", + "resultWrapper":"ModifyDBClusterParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"InvalidDBParameterGroupStateFault"} + ], + "documentation":"

Modifies the parameters of a DB cluster parameter group. To modify more than one parameter, submit a list of the following: ParameterName, ParameterValue, and ApplyMethod. A maximum of 20 parameters can be modified in a single request.

Changes to dynamic parameters are applied immediately. Changes to static parameters require a reboot without failover to the DB cluster associated with the parameter group before the change can take effect.

After you create a DB cluster parameter group, you should wait at least 5 minutes before creating your first DB cluster that uses that DB cluster parameter group as the default parameter group. This allows Amazon Neptune to fully complete the create action before the parameter group is used as the default for a new DB cluster. This is especially important for parameters that are critical when creating the default database for a DB cluster, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon Neptune console or the DescribeDBClusterParameters command to verify that your DB cluster parameter group has been created or modified.

" + }, + "ModifyDBClusterSnapshotAttribute":{ + "name":"ModifyDBClusterSnapshotAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBClusterSnapshotAttributeMessage"}, + "output":{ + "shape":"ModifyDBClusterSnapshotAttributeResult", + "resultWrapper":"ModifyDBClusterSnapshotAttributeResult" + }, + "errors":[ + {"shape":"DBClusterSnapshotNotFoundFault"}, + {"shape":"InvalidDBClusterSnapshotStateFault"}, + {"shape":"SharedSnapshotQuotaExceededFault"} + ], + "documentation":"

Adds an attribute and values to, or removes an attribute and values from, a manual DB cluster snapshot.

To share a manual DB cluster snapshot with other AWS accounts, specify restore as the AttributeName and use the ValuesToAdd parameter to add a list of IDs of the AWS accounts that are authorized to restore the manual DB cluster snapshot. Use the value all to make the manual DB cluster snapshot public, which means that it can be copied or restored by all AWS accounts. Do not add the all value for any manual DB cluster snapshots that contain private information that you don't want available to all AWS accounts. If a manual DB cluster snapshot is encrypted, it can be shared, but only by specifying a list of authorized AWS account IDs for the ValuesToAdd parameter. You can't use all as a value for that parameter in this case.

To view which AWS accounts have access to copy or restore a manual DB cluster snapshot, or whether a manual DB cluster snapshot public or private, use the DescribeDBClusterSnapshotAttributes API action.

" + }, + "ModifyDBInstance":{ + "name":"ModifyDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBInstanceMessage"}, + "output":{ + "shape":"ModifyDBInstanceResult", + "resultWrapper":"ModifyDBInstanceResult" + }, + "errors":[ + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"InvalidDBSecurityGroupStateFault"}, + {"shape":"DBInstanceAlreadyExistsFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSecurityGroupNotFoundFault"}, + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"ProvisionedIopsNotAvailableInAZFault"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"DBUpgradeDependencyFailureFault"}, + {"shape":"StorageTypeNotSupportedFault"}, + {"shape":"AuthorizationNotFoundFault"}, + {"shape":"CertificateNotFoundFault"}, + {"shape":"DomainNotFoundFault"} + ], + "documentation":"

Modifies settings for a DB instance. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. To learn what modifications you can make to your DB instance, call DescribeValidDBInstanceModifications before you call ModifyDBInstance.

" + }, + "ModifyDBParameterGroup":{ + "name":"ModifyDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBParameterGroupMessage"}, + "output":{ + "shape":"DBParameterGroupNameMessage", + "resultWrapper":"ModifyDBParameterGroupResult" + }, + "errors":[ + {"shape":"DBParameterGroupNotFoundFault"}, + {"shape":"InvalidDBParameterGroupStateFault"} + ], + "documentation":"

Modifies the parameters of a DB parameter group. To modify more than one parameter, submit a list of the following: ParameterName, ParameterValue, and ApplyMethod. A maximum of 20 parameters can be modified in a single request.

Changes to dynamic parameters are applied immediately. Changes to static parameters require a reboot without failover to the DB instance associated with the parameter group before the change can take effect.

After you modify a DB parameter group, you should wait at least 5 minutes before creating your first DB instance that uses that DB parameter group as the default parameter group. This allows Amazon Neptune to fully complete the modify action before the parameter group is used as the default for a new DB instance. This is especially important for parameters that are critical when creating the default database for a DB instance, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon Neptune console or the DescribeDBParameters command to verify that your DB parameter group has been created or modified.

" + }, + "ModifyDBSubnetGroup":{ + "name":"ModifyDBSubnetGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBSubnetGroupMessage"}, + "output":{ + "shape":"ModifyDBSubnetGroupResult", + "resultWrapper":"ModifyDBSubnetGroupResult" + }, + "errors":[ + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSubnetQuotaExceededFault"}, + {"shape":"SubnetAlreadyInUse"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, + {"shape":"InvalidSubnet"} + ], + "documentation":"

Modifies an existing DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the AWS Region.

" + }, + "ModifyEventSubscription":{ + "name":"ModifyEventSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyEventSubscriptionMessage"}, + "output":{ + "shape":"ModifyEventSubscriptionResult", + "resultWrapper":"ModifyEventSubscriptionResult" + }, + "errors":[ + {"shape":"EventSubscriptionQuotaExceededFault"}, + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"SNSInvalidTopicFault"}, + {"shape":"SNSNoAuthorizationFault"}, + {"shape":"SNSTopicArnNotFoundFault"}, + {"shape":"SubscriptionCategoryNotFoundFault"} + ], + "documentation":"

Modifies an existing event notification subscription. Note that you can't modify the source identifiers using this call; to change source identifiers for a subscription, use the AddSourceIdentifierToSubscription and RemoveSourceIdentifierFromSubscription calls.

You can see a list of the event categories for a given SourceType by using the DescribeEventCategories action.

" + }, + "PromoteReadReplicaDBCluster":{ + "name":"PromoteReadReplicaDBCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PromoteReadReplicaDBClusterMessage"}, + "output":{ + "shape":"PromoteReadReplicaDBClusterResult", + "resultWrapper":"PromoteReadReplicaDBClusterResult" + }, + "errors":[ + {"shape":"DBClusterNotFoundFault"}, + {"shape":"InvalidDBClusterStateFault"} + ], + "documentation":"

Promotes a Read Replica DB cluster to a standalone DB cluster.

" + }, + "RebootDBInstance":{ + "name":"RebootDBInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebootDBInstanceMessage"}, + "output":{ + "shape":"RebootDBInstanceResult", + "resultWrapper":"RebootDBInstanceResult" + }, + "errors":[ + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"DBInstanceNotFoundFault"} + ], + "documentation":"

You might need to reboot your DB instance, usually for maintenance reasons. For example, if you make certain modifications, or if you change the DB parameter group associated with the DB instance, you must reboot the instance for the changes to take effect.

Rebooting a DB instance restarts the database engine service. Rebooting a DB instance results in a momentary outage, during which the DB instance status is set to rebooting.

" + }, + "RemoveRoleFromDBCluster":{ + "name":"RemoveRoleFromDBCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveRoleFromDBClusterMessage"}, + "errors":[ + {"shape":"DBClusterNotFoundFault"}, + {"shape":"DBClusterRoleNotFoundFault"}, + {"shape":"InvalidDBClusterStateFault"} + ], + "documentation":"

Disassociates an Identity and Access Management (IAM) role from a DB cluster.

" + }, + "RemoveSourceIdentifierFromSubscription":{ + "name":"RemoveSourceIdentifierFromSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveSourceIdentifierFromSubscriptionMessage"}, + "output":{ + "shape":"RemoveSourceIdentifierFromSubscriptionResult", + "resultWrapper":"RemoveSourceIdentifierFromSubscriptionResult" + }, + "errors":[ + {"shape":"SubscriptionNotFoundFault"}, + {"shape":"SourceNotFoundFault"} + ], + "documentation":"

Removes a source identifier from an existing event notification subscription.

" + }, + "RemoveTagsFromResource":{ + "name":"RemoveTagsFromResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveTagsFromResourceMessage"}, + "errors":[ + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"}, + {"shape":"DBClusterNotFoundFault"} + ], + "documentation":"

Removes metadata tags from an Amazon Neptune resource.

" + }, + "ResetDBClusterParameterGroup":{ + "name":"ResetDBClusterParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetDBClusterParameterGroupMessage"}, + "output":{ + "shape":"DBClusterParameterGroupNameMessage", + "resultWrapper":"ResetDBClusterParameterGroupResult" + }, + "errors":[ + {"shape":"InvalidDBParameterGroupStateFault"}, + {"shape":"DBParameterGroupNotFoundFault"} + ], + "documentation":"

Modifies the parameters of a DB cluster parameter group to the default value. To reset specific parameters submit a list of the following: ParameterName and ApplyMethod. To reset the entire DB cluster parameter group, specify the DBClusterParameterGroupName and ResetAllParameters parameters.

When resetting the entire group, dynamic parameters are updated immediately and static parameters are set to pending-reboot to take effect on the next DB instance restart or RebootDBInstance request. You must call RebootDBInstance for every DB instance in your DB cluster that you want the updated static parameter to apply to.

" + }, + "ResetDBParameterGroup":{ + "name":"ResetDBParameterGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetDBParameterGroupMessage"}, + "output":{ + "shape":"DBParameterGroupNameMessage", + "resultWrapper":"ResetDBParameterGroupResult" + }, + "errors":[ + {"shape":"InvalidDBParameterGroupStateFault"}, + {"shape":"DBParameterGroupNotFoundFault"} + ], + "documentation":"

Modifies the parameters of a DB parameter group to the engine/system default value. To reset specific parameters, provide a list of the following: ParameterName and ApplyMethod. To reset the entire DB parameter group, specify the DBParameterGroup name and ResetAllParameters parameters. When resetting the entire group, dynamic parameters are updated immediately and static parameters are set to pending-reboot to take effect on the next DB instance restart or RebootDBInstance request.

" + }, + "RestoreDBClusterFromSnapshot":{ + "name":"RestoreDBClusterFromSnapshot", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreDBClusterFromSnapshotMessage"}, + "output":{ + "shape":"RestoreDBClusterFromSnapshotResult", + "resultWrapper":"RestoreDBClusterFromSnapshotResult" + }, + "errors":[ + {"shape":"DBClusterAlreadyExistsFault"}, + {"shape":"DBClusterQuotaExceededFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"DBSnapshotNotFoundFault"}, + {"shape":"DBClusterSnapshotNotFoundFault"}, + {"shape":"InsufficientDBClusterCapacityFault"}, + {"shape":"InsufficientStorageClusterCapacityFault"}, + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"InvalidDBClusterSnapshotStateFault"}, + {"shape":"StorageQuotaExceededFault"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"InvalidRestoreFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"InvalidSubnet"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"KMSKeyNotAccessibleFault"} + ], + "documentation":"

Creates a new DB cluster from a DB snapshot or DB cluster snapshot.

If a DB snapshot is specified, the target DB cluster is created from the source DB snapshot with a default configuration and default security group.

If a DB cluster snapshot is specified, the target DB cluster is created from the source DB cluster restore point with the same configuration as the original source DB cluster, except that the new DB cluster is created with the default security group.

" + }, + "RestoreDBClusterToPointInTime":{ + "name":"RestoreDBClusterToPointInTime", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreDBClusterToPointInTimeMessage"}, + "output":{ + "shape":"RestoreDBClusterToPointInTimeResult", + "resultWrapper":"RestoreDBClusterToPointInTimeResult" + }, + "errors":[ + {"shape":"DBClusterAlreadyExistsFault"}, + {"shape":"DBClusterNotFoundFault"}, + {"shape":"DBClusterQuotaExceededFault"}, + {"shape":"DBClusterSnapshotNotFoundFault"}, + {"shape":"DBSubnetGroupNotFoundFault"}, + {"shape":"InsufficientDBClusterCapacityFault"}, + {"shape":"InsufficientStorageClusterCapacityFault"}, + {"shape":"InvalidDBClusterSnapshotStateFault"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"InvalidDBSnapshotStateFault"}, + {"shape":"InvalidRestoreFault"}, + {"shape":"InvalidSubnet"}, + {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"OptionGroupNotFoundFault"}, + {"shape":"StorageQuotaExceededFault"} + ], + "documentation":"

Restores a DB cluster to an arbitrary point in time. Users can restore to any point in time before LatestRestorableTime for up to BackupRetentionPeriod days. The target DB cluster is created from the source DB cluster with the same configuration as the original DB cluster, except that the new DB cluster is created with the default DB security group.

This action only restores the DB cluster, not the DB instances for that DB cluster. You must invoke the CreateDBInstance action to create DB instances for the restored DB cluster, specifying the identifier of the restored DB cluster in DBClusterIdentifier. You can create DB instances only after the RestoreDBClusterToPointInTime action has completed and the DB cluster is available.

" + } + }, + "shapes":{ + "AddRoleToDBClusterMessage":{ + "type":"structure", + "required":[ + "DBClusterIdentifier", + "RoleArn" + ], + "members":{ + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

The name of the DB cluster to associate the IAM role with.

" + }, + "RoleArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role to associate with the Neptune DB cluster, for example arn:aws:iam::123456789012:role/NeptuneAccessRole.

" + } + } + }, + "AddSourceIdentifierToSubscriptionMessage":{ + "type":"structure", + "required":[ + "SubscriptionName", + "SourceIdentifier" + ], + "members":{ + "SubscriptionName":{ + "shape":"String", + "documentation":"

The name of the event notification subscription you want to add a source identifier to.

" + }, + "SourceIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the event source to be added.

Constraints:

  • If the source type is a DB instance, then a DBInstanceIdentifier must be supplied.

  • If the source type is a DB security group, a DBSecurityGroupName must be supplied.

  • If the source type is a DB parameter group, a DBParameterGroupName must be supplied.

  • If the source type is a DB snapshot, a DBSnapshotIdentifier must be supplied.

" + } + }, + "documentation":"

" + }, + "AddSourceIdentifierToSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "AddTagsToResourceMessage":{ + "type":"structure", + "required":[ + "ResourceName", + "Tags" + ], + "members":{ + "ResourceName":{ + "shape":"String", + "documentation":"

The Amazon Neptune resource that the tags are added to. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an Amazon Resource Name (ARN).

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags to be assigned to the Amazon Neptune resource.

" + } + }, + "documentation":"

" + }, + "ApplyMethod":{ + "type":"string", + "enum":[ + "immediate", + "pending-reboot" + ] + }, + "ApplyPendingMaintenanceActionMessage":{ + "type":"structure", + "required":[ + "ResourceIdentifier", + "ApplyAction", + "OptInType" + ], + "members":{ + "ResourceIdentifier":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the resource that the pending maintenance action applies to. For information about creating an ARN, see Constructing an Amazon Resource Name (ARN).

" + }, + "ApplyAction":{ + "shape":"String", + "documentation":"

The pending maintenance action to apply to this resource.

Valid values: system-update, db-upgrade

" + }, + "OptInType":{ + "shape":"String", + "documentation":"

A value that specifies the type of opt-in request, or undoes an opt-in request. An opt-in request of type immediate can't be undone.

Valid values:

  • immediate - Apply the maintenance action immediately.

  • next-maintenance - Apply the maintenance action during the next maintenance window for the resource.

  • undo-opt-in - Cancel any existing next-maintenance opt-in requests.

" + } + }, + "documentation":"

" + }, + "ApplyPendingMaintenanceActionResult":{ + "type":"structure", + "members":{ + "ResourcePendingMaintenanceActions":{"shape":"ResourcePendingMaintenanceActions"} + } + }, + "AttributeValueList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"AttributeValue" + } + }, + "AuthorizationNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

Specified CIDRIP or EC2 security group is not authorized for the specified DB security group.

Neptune may not also be authorized via IAM to perform necessary actions on your behalf.

", + "error":{ + "code":"AuthorizationNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "AvailabilityZone":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"String", + "documentation":"

The name of the availability zone.

" + } + }, + "documentation":"

Contains Availability Zone information.

This data type is used as an element in the following data type:

", + "wrapper":true + }, + "AvailabilityZoneList":{ + "type":"list", + "member":{ + "shape":"AvailabilityZone", + "locationName":"AvailabilityZone" + } + }, + "AvailabilityZones":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"AvailabilityZone" + } + }, + "Boolean":{"type":"boolean"}, + "BooleanOptional":{"type":"boolean"}, + "CertificateNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

CertificateIdentifier does not refer to an existing certificate.

", + "error":{ + "code":"CertificateNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "CharacterSet":{ + "type":"structure", + "members":{ + "CharacterSetName":{ + "shape":"String", + "documentation":"

The name of the character set.

" + }, + "CharacterSetDescription":{ + "shape":"String", + "documentation":"

The description of the character set.

" + } + }, + "documentation":"

This data type is used as a response element in the action DescribeDBEngineVersions.

" + }, + "CloudwatchLogsExportConfiguration":{ + "type":"structure", + "members":{ + "EnableLogTypes":{ + "shape":"LogTypeList", + "documentation":"

The list of log types to enable.

" + }, + "DisableLogTypes":{ + "shape":"LogTypeList", + "documentation":"

The list of log types to disable.

" + } + }, + "documentation":"

The configuration setting for the log types to be enabled for export to CloudWatch Logs for a specific DB instance or DB cluster.

" + }, + "CopyDBClusterParameterGroupMessage":{ + "type":"structure", + "required":[ + "SourceDBClusterParameterGroupIdentifier", + "TargetDBClusterParameterGroupIdentifier", + "TargetDBClusterParameterGroupDescription" + ], + "members":{ + "SourceDBClusterParameterGroupIdentifier":{ + "shape":"String", + "documentation":"

The identifier or Amazon Resource Name (ARN) for the source DB cluster parameter group. For information about creating an ARN, see Constructing an Amazon Resource Name (ARN).

Constraints:

  • Must specify a valid DB cluster parameter group.

  • If the source DB cluster parameter group is in the same AWS Region as the copy, specify a valid DB parameter group identifier, for example my-db-cluster-param-group, or a valid ARN.

  • If the source DB parameter group is in a different AWS Region than the copy, specify a valid DB cluster parameter group ARN, for example arn:aws:rds:us-east-1:123456789012:cluster-pg:custom-cluster-group1.

" + }, + "TargetDBClusterParameterGroupIdentifier":{ + "shape":"String", + "documentation":"

The identifier for the copied DB cluster parameter group.

Constraints:

  • Cannot be null, empty, or blank

  • Must contain from 1 to 255 letters, numbers, or hyphens

  • First character must be a letter

  • Cannot end with a hyphen or contain two consecutive hyphens

Example: my-cluster-param-group1

" + }, + "TargetDBClusterParameterGroupDescription":{ + "shape":"String", + "documentation":"

A description for the copied DB cluster parameter group.

" + }, + "Tags":{"shape":"TagList"} + } + }, + "CopyDBClusterParameterGroupResult":{ + "type":"structure", + "members":{ + "DBClusterParameterGroup":{"shape":"DBClusterParameterGroup"} + } + }, + "CopyDBClusterSnapshotMessage":{ + "type":"structure", + "required":[ + "SourceDBClusterSnapshotIdentifier", + "TargetDBClusterSnapshotIdentifier" + ], + "members":{ + "SourceDBClusterSnapshotIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the DB cluster snapshot to copy. This parameter is not case-sensitive.

You can't copy an encrypted, shared DB cluster snapshot from one AWS Region to another.

Constraints:

  • Must specify a valid system snapshot in the \"available\" state.

  • If the source snapshot is in the same AWS Region as the copy, specify a valid DB snapshot identifier.

  • If the source snapshot is in a different AWS Region than the copy, specify a valid DB cluster snapshot ARN.

Example: my-cluster-snapshot1

" + }, + "TargetDBClusterSnapshotIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the new DB cluster snapshot to create from the source DB cluster snapshot. This parameter is not case-sensitive.

Constraints:

  • Must contain from 1 to 63 letters, numbers, or hyphens.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

Example: my-cluster-snapshot2

" + }, + "KmsKeyId":{ + "shape":"String", + "documentation":"

The AWS AWS KMS key ID for an encrypted DB cluster snapshot. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.

If you copy an unencrypted DB cluster snapshot and specify a value for the KmsKeyId parameter, Amazon Neptune encrypts the target DB cluster snapshot using the specified KMS encryption key.

If you copy an encrypted DB cluster snapshot from your AWS account, you can specify a value for KmsKeyId to encrypt the copy with a new KMS encryption key. If you don't specify a value for KmsKeyId, then the copy of the DB cluster snapshot is encrypted with the same KMS key as the source DB cluster snapshot.

If you copy an encrypted DB cluster snapshot that is shared from another AWS account, then you must specify a value for KmsKeyId.

To copy an encrypted DB cluster snapshot to another AWS Region, you must set KmsKeyId to the KMS key ID you want to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region. KMS encryption keys are specific to the AWS Region that they are created in, and you can't use encryption keys from one AWS Region in another AWS Region.

" + }, + "PreSignedUrl":{ + "shape":"String", + "documentation":"

The URL that contains a Signature Version 4 signed request for the CopyDBClusterSnapshot API action in the AWS Region that contains the source DB cluster snapshot to copy. The PreSignedUrl parameter must be used when copying an encrypted DB cluster snapshot from another AWS Region.

The pre-signed URL must be a valid request for the CopyDBSClusterSnapshot API action that can be executed in the source AWS Region that contains the encrypted DB cluster snapshot to be copied. The pre-signed URL request must contain the following parameter values:

  • KmsKeyId - The AWS KMS key identifier for the key to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region. This is the same identifier for both the CopyDBClusterSnapshot action that is called in the destination AWS Region, and the action contained in the pre-signed URL.

  • DestinationRegion - The name of the AWS Region that the DB cluster snapshot will be created in.

  • SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source AWS Region. For example, if you are copying an encrypted DB cluster snapshot from the us-west-2 AWS Region, then your SourceDBClusterSnapshotIdentifier looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:neptune-cluster1-snapshot-20161115.

To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process.

" + }, + "CopyTags":{ + "shape":"BooleanOptional", + "documentation":"

True to copy all tags from the source DB cluster snapshot to the target DB cluster snapshot, and otherwise false. The default is false.

" + }, + "Tags":{"shape":"TagList"} + }, + "documentation":"

" + }, + "CopyDBClusterSnapshotResult":{ + "type":"structure", + "members":{ + "DBClusterSnapshot":{"shape":"DBClusterSnapshot"} + } + }, + "CopyDBParameterGroupMessage":{ + "type":"structure", + "required":[ + "SourceDBParameterGroupIdentifier", + "TargetDBParameterGroupIdentifier", + "TargetDBParameterGroupDescription" + ], + "members":{ + "SourceDBParameterGroupIdentifier":{ + "shape":"String", + "documentation":"

The identifier or ARN for the source DB parameter group. For information about creating an ARN, see Constructing an Amazon Resource Name (ARN).

Constraints:

  • Must specify a valid DB parameter group.

  • Must specify a valid DB parameter group identifier, for example my-db-param-group, or a valid ARN.

" + }, + "TargetDBParameterGroupIdentifier":{ + "shape":"String", + "documentation":"

The identifier for the copied DB parameter group.

Constraints:

  • Cannot be null, empty, or blank

  • Must contain from 1 to 255 letters, numbers, or hyphens

  • First character must be a letter

  • Cannot end with a hyphen or contain two consecutive hyphens

Example: my-db-parameter-group

" + }, + "TargetDBParameterGroupDescription":{ + "shape":"String", + "documentation":"

A description for the copied DB parameter group.

" + }, + "Tags":{"shape":"TagList"} + }, + "documentation":"

" + }, + "CopyDBParameterGroupResult":{ + "type":"structure", + "members":{ + "DBParameterGroup":{"shape":"DBParameterGroup"} + } + }, + "CreateDBClusterMessage":{ + "type":"structure", + "required":[ + "DBClusterIdentifier", + "Engine" + ], + "members":{ + "AvailabilityZones":{ + "shape":"AvailabilityZones", + "documentation":"

A list of EC2 Availability Zones that instances in the DB cluster can be created in.

" + }, + "BackupRetentionPeriod":{ + "shape":"IntegerOptional", + "documentation":"

The number of days for which automated backups are retained. You must specify a minimum value of 1.

Default: 1

Constraints:

  • Must be a value from 1 to 35

" + }, + "CharacterSetName":{ + "shape":"String", + "documentation":"

A value that indicates that the DB cluster should be associated with the specified CharacterSet.

" + }, + "DatabaseName":{ + "shape":"String", + "documentation":"

The name for your database of up to 64 alpha-numeric characters. If you do not provide a name, Amazon Neptune will not create a database in the DB cluster you are creating.

" + }, + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

The DB cluster identifier. This parameter is stored as a lowercase string.

Constraints:

  • Must contain from 1 to 63 letters, numbers, or hyphens.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

Example: my-cluster1

" + }, + "DBClusterParameterGroupName":{ + "shape":"String", + "documentation":"

The name of the DB cluster parameter group to associate with this DB cluster. If this argument is omitted, the default is used.

Constraints:

  • If supplied, must match the name of an existing DBClusterParameterGroup.

" + }, + "VpcSecurityGroupIds":{ + "shape":"VpcSecurityGroupIdList", + "documentation":"

A list of EC2 VPC security groups to associate with this DB cluster.

" + }, + "DBSubnetGroupName":{ + "shape":"String", + "documentation":"

A DB subnet group to associate with this DB cluster.

Constraints: Must match the name of an existing DBSubnetGroup. Must not be default.

Example: mySubnetgroup

" + }, + "Engine":{ + "shape":"String", + "documentation":"

The name of the database engine to be used for this DB cluster.

Valid Values: neptune

" + }, + "EngineVersion":{ + "shape":"String", + "documentation":"

The version number of the database engine to use.

Example: 1.0.1

" + }, + "Port":{ + "shape":"IntegerOptional", + "documentation":"

The port number on which the instances in the DB cluster accept connections.

Default: 8182

" + }, + "MasterUsername":{ + "shape":"String", + "documentation":"

The name of the master user for the DB cluster.

Constraints:

  • Must be 1 to 16 letters or numbers.

  • First character must be a letter.

  • Cannot be a reserved word for the chosen database engine.

" + }, + "MasterUserPassword":{ + "shape":"String", + "documentation":"

The password for the master database user. This password can contain any printable ASCII character except \"/\", \"\"\", or \"@\".

Constraints: Must contain from 8 to 41 characters.

" + }, + "OptionGroupName":{ + "shape":"String", + "documentation":"

A value that indicates that the DB cluster should be associated with the specified option group.

Permanent options can't be removed from an option group. The option group can't be removed from a DB cluster once it is associated with a DB cluster.

" + }, + "PreferredBackupWindow":{ + "shape":"String", + "documentation":"

The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter.

The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon Neptune User Guide.

Constraints:

  • Must be in the format hh24:mi-hh24:mi.

  • Must be in Universal Coordinated Time (UTC).

  • Must not conflict with the preferred maintenance window.

  • Must be at least 30 minutes.

" + }, + "PreferredMaintenanceWindow":{ + "shape":"String", + "documentation":"

The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

Format: ddd:hh24:mi-ddd:hh24:mi

The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon Neptune User Guide.

Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.

Constraints: Minimum 30-minute window.

" + }, + "ReplicationSourceIdentifier":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the source DB instance or DB cluster if this DB cluster is created as a Read Replica.

" + }, + "Tags":{"shape":"TagList"}, + "StorageEncrypted":{ + "shape":"BooleanOptional", + "documentation":"

Specifies whether the DB cluster is encrypted.

" + }, + "KmsKeyId":{ + "shape":"String", + "documentation":"

The AWS KMS key identifier for an encrypted DB cluster.

The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are creating a DB cluster with the same AWS account that owns the KMS encryption key used to encrypt the new DB cluster, then you can use the KMS key alias instead of the ARN for the KMS encryption key.

If an encryption key is not specified in KmsKeyId:

  • If ReplicationSourceIdentifier identifies an encrypted source, then Amazon Neptune will use the encryption key used to encrypt the source. Otherwise, Amazon Neptune will use your default encryption key.

  • If the StorageEncrypted parameter is true and ReplicationSourceIdentifier is not specified, then Amazon Neptune will use your default encryption key.

AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.

If you create a Read Replica of an encrypted DB cluster in another AWS Region, you must set KmsKeyId to a KMS key ID that is valid in the destination AWS Region. This key is used to encrypt the Read Replica in that AWS Region.

" + }, + "PreSignedUrl":{ + "shape":"String", + "documentation":"

A URL that contains a Signature Version 4 signed request for the CreateDBCluster action to be called in the source AWS Region where the DB cluster is replicated from. You only need to specify PreSignedUrl when you are performing cross-region replication from an encrypted DB cluster.

The pre-signed URL must be a valid request for the CreateDBCluster API action that can be executed in the source AWS Region that contains the encrypted DB cluster to be copied.

The pre-signed URL request must contain the following parameter values:

  • KmsKeyId - The AWS KMS key identifier for the key to use to encrypt the copy of the DB cluster in the destination AWS Region. This should refer to the same KMS key for both the CreateDBCluster action that is called in the destination AWS Region, and the action contained in the pre-signed URL.

  • DestinationRegion - The name of the AWS Region that Read Replica will be created in.

  • ReplicationSourceIdentifier - The DB cluster identifier for the encrypted DB cluster to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source AWS Region. For example, if you are copying an encrypted DB cluster from the us-west-2 AWS Region, then your ReplicationSourceIdentifier would look like Example: arn:aws:rds:us-west-2:123456789012:cluster:neptune-cluster1.

To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process.

" + }, + "EnableIAMDatabaseAuthentication":{ + "shape":"BooleanOptional", + "documentation":"

True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts, and otherwise false.

Default: false

" + } + }, + "documentation":"

" + }, + "CreateDBClusterParameterGroupMessage":{ + "type":"structure", + "required":[ + "DBClusterParameterGroupName", + "DBParameterGroupFamily", + "Description" + ], + "members":{ + "DBClusterParameterGroupName":{ + "shape":"String", + "documentation":"

The name of the DB cluster parameter group.

Constraints:

  • Must match the name of an existing DBClusterParameterGroup.

This value is stored as a lowercase string.

" + }, + "DBParameterGroupFamily":{ + "shape":"String", + "documentation":"

The DB cluster parameter group family name. A DB cluster parameter group can be associated with one and only one DB cluster parameter group family, and can be applied only to a DB cluster running a database engine and engine version compatible with that DB cluster parameter group family.

" + }, + "Description":{ + "shape":"String", + "documentation":"

The description for the DB cluster parameter group.

" + }, + "Tags":{"shape":"TagList"} + }, + "documentation":"

" + }, + "CreateDBClusterParameterGroupResult":{ + "type":"structure", + "members":{ + "DBClusterParameterGroup":{"shape":"DBClusterParameterGroup"} + } + }, + "CreateDBClusterResult":{ + "type":"structure", + "members":{ + "DBCluster":{"shape":"DBCluster"} + } + }, + "CreateDBClusterSnapshotMessage":{ + "type":"structure", + "required":[ + "DBClusterSnapshotIdentifier", + "DBClusterIdentifier" + ], + "members":{ + "DBClusterSnapshotIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the DB cluster snapshot. This parameter is stored as a lowercase string.

Constraints:

  • Must contain from 1 to 63 letters, numbers, or hyphens.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

Example: my-cluster1-snapshot1

" + }, + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the DB cluster to create a snapshot for. This parameter is not case-sensitive.

Constraints:

  • Must match the identifier of an existing DBCluster.

Example: my-cluster1

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags to be assigned to the DB cluster snapshot.

" + } + }, + "documentation":"

" + }, + "CreateDBClusterSnapshotResult":{ + "type":"structure", + "members":{ + "DBClusterSnapshot":{"shape":"DBClusterSnapshot"} + } + }, + "CreateDBInstanceMessage":{ + "type":"structure", + "required":[ + "DBInstanceIdentifier", + "DBInstanceClass", + "Engine" + ], + "members":{ + "DBName":{ + "shape":"String", + "documentation":"

The database name.

Type: String

" + }, + "DBInstanceIdentifier":{ + "shape":"String", + "documentation":"

The DB instance identifier. This parameter is stored as a lowercase string.

Constraints:

  • Must contain from 1 to 63 letters, numbers, or hyphens.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

Example: mydbinstance

" + }, + "AllocatedStorage":{ + "shape":"IntegerOptional", + "documentation":"

The amount of storage (in gibibytes) to allocate for the DB instance.

Type: Integer

Not applicable. Neptune cluster volumes automatically grow as the amount of data in your database increases, though you are only charged for the space that you use in a Neptune cluster volume.

" + }, + "DBInstanceClass":{ + "shape":"String", + "documentation":"

The compute and memory capacity of the DB instance, for example, db.m4.large. Not all DB instance classes are available in all AWS Regions.

" + }, + "Engine":{ + "shape":"String", + "documentation":"

The name of the database engine to be used for this instance.

Valid Values: neptune

" + }, + "MasterUsername":{ + "shape":"String", + "documentation":"

The name for the master user. Not used.

" + }, + "MasterUserPassword":{ + "shape":"String", + "documentation":"

The password for the master user. The password can include any printable ASCII character except \"/\", \"\"\", or \"@\".

Not used.

" + }, + "DBSecurityGroups":{ + "shape":"DBSecurityGroupNameList", + "documentation":"

A list of DB security groups to associate with this DB instance.

Default: The default DB security group for the database engine.

" + }, + "VpcSecurityGroupIds":{ + "shape":"VpcSecurityGroupIdList", + "documentation":"

A list of EC2 VPC security groups to associate with this DB instance.

Not applicable. The associated list of EC2 VPC security groups is managed by the DB cluster. For more information, see CreateDBCluster.

Default: The default EC2 VPC security group for the DB subnet group's VPC.

" + }, + "AvailabilityZone":{ + "shape":"String", + "documentation":"

The EC2 Availability Zone that the DB instance is created in.

Default: A random, system-chosen Availability Zone in the endpoint's AWS Region.

Example: us-east-1d

Constraint: The AvailabilityZone parameter can't be specified if the MultiAZ parameter is set to true. The specified Availability Zone must be in the same AWS Region as the current endpoint.

" + }, + "DBSubnetGroupName":{ + "shape":"String", + "documentation":"

A DB subnet group to associate with this DB instance.

If there is no DB subnet group, then it is a non-VPC DB instance.

" + }, + "PreferredMaintenanceWindow":{ + "shape":"String", + "documentation":"

The time range each week during which system maintenance can occur, in Universal Coordinated Time (UTC).

Format: ddd:hh24:mi-ddd:hh24:mi

The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week.

Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.

Constraints: Minimum 30-minute window.

" + }, + "DBParameterGroupName":{ + "shape":"String", + "documentation":"

The name of the DB parameter group to associate with this DB instance. If this argument is omitted, the default DBParameterGroup for the specified engine is used.

Constraints:

  • Must be 1 to 255 letters, numbers, or hyphens.

  • First character must be a letter

  • Cannot end with a hyphen or contain two consecutive hyphens

" + }, + "BackupRetentionPeriod":{ + "shape":"IntegerOptional", + "documentation":"

The number of days for which automated backups are retained.

Not applicable. The retention period for automated backups is managed by the DB cluster. For more information, see CreateDBCluster.

Default: 1

Constraints:

  • Must be a value from 0 to 35

  • Cannot be set to 0 if the DB instance is a source to Read Replicas

" + }, + "PreferredBackupWindow":{ + "shape":"String", + "documentation":"

The daily time range during which automated backups are created.

Not applicable. The daily time range for creating automated backups is managed by the DB cluster. For more information, see CreateDBCluster.

" + }, + "Port":{ + "shape":"IntegerOptional", + "documentation":"

The port number on which the database accepts connections.

Not applicable. The port is managed by the DB cluster. For more information, see CreateDBCluster.

Default: 8182

Type: Integer

" + }, + "MultiAZ":{ + "shape":"BooleanOptional", + "documentation":"

Specifies if the DB instance is a Multi-AZ deployment. You can't set the AvailabilityZone parameter if the MultiAZ parameter is set to true.

" + }, + "EngineVersion":{ + "shape":"String", + "documentation":"

The version number of the database engine to use.

" + }, + "AutoMinorVersionUpgrade":{ + "shape":"BooleanOptional", + "documentation":"

Indicates that minor engine upgrades are applied automatically to the DB instance during the maintenance window.

Default: true

" + }, + "LicenseModel":{ + "shape":"String", + "documentation":"

License model information for this DB instance.

Valid values: license-included | bring-your-own-license | general-public-license

" + }, + "Iops":{ + "shape":"IntegerOptional", + "documentation":"

The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance.

" + }, + "OptionGroupName":{ + "shape":"String", + "documentation":"

Indicates that the DB instance should be associated with the specified option group.

Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed from an option group, and that option group can't be removed from a DB instance once it is associated with a DB instance

" + }, + "CharacterSetName":{ + "shape":"String", + "documentation":"

Indicates that the DB instance should be associated with the specified CharacterSet.

Not applicable. The character set is managed by the DB cluster. For more information, see CreateDBCluster.

" + }, + "PubliclyAccessible":{ + "shape":"BooleanOptional", + "documentation":"

This parameter is not supported.

", + "deprecated":true + }, + "Tags":{"shape":"TagList"}, + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the DB cluster that the instance will belong to.

For information on creating a DB cluster, see CreateDBCluster.

Type: String

" + }, + "StorageType":{ + "shape":"String", + "documentation":"

Specifies the storage type to be associated with the DB instance.

Not applicable. Storage is managed by the DB Cluster.

" + }, + "TdeCredentialArn":{ + "shape":"String", + "documentation":"

The ARN from the key store with which to associate the instance for TDE encryption.

" + }, + "TdeCredentialPassword":{ + "shape":"String", + "documentation":"

The password for the given ARN from the key store in order to access the device.

" + }, + "StorageEncrypted":{ + "shape":"BooleanOptional", + "documentation":"

Specifies whether the DB instance is encrypted.

Not applicable. The encryption for DB instances is managed by the DB cluster. For more information, see CreateDBCluster.

Default: false

" + }, + "KmsKeyId":{ + "shape":"String", + "documentation":"

The AWS KMS key identifier for an encrypted DB instance.

The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are creating a DB instance with the same AWS account that owns the KMS encryption key used to encrypt the new DB instance, then you can use the KMS key alias instead of the ARN for the KM encryption key.

Not applicable. The KMS key identifier is managed by the DB cluster. For more information, see CreateDBCluster.

If the StorageEncrypted parameter is true, and you do not specify a value for the KmsKeyId parameter, then Amazon Neptune will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.

" + }, + "Domain":{ + "shape":"String", + "documentation":"

Specify the Active Directory Domain to create the instance in.

" + }, + "CopyTagsToSnapshot":{ + "shape":"BooleanOptional", + "documentation":"

True to copy all tags from the DB instance to snapshots of the DB instance, and otherwise false. The default is false.

" + }, + "MonitoringInterval":{ + "shape":"IntegerOptional", + "documentation":"

The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0.

If MonitoringRoleArn is specified, then you must also set MonitoringInterval to a value other than 0.

Valid Values: 0, 1, 5, 10, 15, 30, 60

" + }, + "MonitoringRoleArn":{ + "shape":"String", + "documentation":"

The ARN for the IAM role that permits Neptune to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess.

If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value.

" + }, + "DomainIAMRoleName":{ + "shape":"String", + "documentation":"

Specify the name of the IAM role to be used when making API calls to the Directory Service.

" + }, + "PromotionTier":{ + "shape":"IntegerOptional", + "documentation":"

A value that specifies the order in which an Read Replica is promoted to the primary instance after a failure of the existing primary instance.

Default: 1

Valid Values: 0 - 15

" + }, + "Timezone":{ + "shape":"String", + "documentation":"

The time zone of the DB instance.

" + }, + "EnableIAMDatabaseAuthentication":{ + "shape":"BooleanOptional", + "documentation":"

True to enable AWS Identity and Access Management (IAM) authentication for Neptune.

Default: false

" + }, + "EnablePerformanceInsights":{ + "shape":"BooleanOptional", + "documentation":"

True to enable Performance Insights for the DB instance, and otherwise false.

" + }, + "PerformanceInsightsKMSKeyId":{ + "shape":"String", + "documentation":"

The AWS KMS key identifier for encryption of Performance Insights data. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.

" + }, + "EnableCloudwatchLogsExports":{ + "shape":"LogTypeList", + "documentation":"

The list of log types that need to be enabled for exporting to CloudWatch Logs.

" + } + }, + "documentation":"

" + }, + "CreateDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "CreateDBParameterGroupMessage":{ + "type":"structure", + "required":[ + "DBParameterGroupName", + "DBParameterGroupFamily", + "Description" + ], + "members":{ + "DBParameterGroupName":{ + "shape":"String", + "documentation":"

The name of the DB parameter group.

Constraints:

  • Must be 1 to 255 letters, numbers, or hyphens.

  • First character must be a letter

  • Cannot end with a hyphen or contain two consecutive hyphens

This value is stored as a lowercase string.

" + }, + "DBParameterGroupFamily":{ + "shape":"String", + "documentation":"

The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a database engine and engine version compatible with that DB parameter group family.

" + }, + "Description":{ + "shape":"String", + "documentation":"

The description for the DB parameter group.

" + }, + "Tags":{"shape":"TagList"} + }, + "documentation":"

" + }, + "CreateDBParameterGroupResult":{ + "type":"structure", + "members":{ + "DBParameterGroup":{"shape":"DBParameterGroup"} + } + }, + "CreateDBSubnetGroupMessage":{ + "type":"structure", + "required":[ + "DBSubnetGroupName", + "DBSubnetGroupDescription", + "SubnetIds" + ], + "members":{ + "DBSubnetGroupName":{ + "shape":"String", + "documentation":"

The name for the DB subnet group. This value is stored as a lowercase string.

Constraints: Must contain no more than 255 letters, numbers, periods, underscores, spaces, or hyphens. Must not be default.

Example: mySubnetgroup

" + }, + "DBSubnetGroupDescription":{ + "shape":"String", + "documentation":"

The description for the DB subnet group.

" + }, + "SubnetIds":{ + "shape":"SubnetIdentifierList", + "documentation":"

The EC2 Subnet IDs for the DB subnet group.

" + }, + "Tags":{"shape":"TagList"} + }, + "documentation":"

" + }, + "CreateDBSubnetGroupResult":{ + "type":"structure", + "members":{ + "DBSubnetGroup":{"shape":"DBSubnetGroup"} + } + }, + "CreateEventSubscriptionMessage":{ + "type":"structure", + "required":[ + "SubscriptionName", + "SnsTopicArn" + ], + "members":{ + "SubscriptionName":{ + "shape":"String", + "documentation":"

The name of the subscription.

Constraints: The name must be less than 255 characters.

" + }, + "SnsTopicArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.

" + }, + "SourceType":{ + "shape":"String", + "documentation":"

The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. if this value is not specified, all events are returned.

Valid values: db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot

" + }, + "EventCategories":{ + "shape":"EventCategoriesList", + "documentation":"

A list of event categories for a SourceType that you want to subscribe to. You can see a list of the categories for a given SourceType by using the DescribeEventCategories action.

" + }, + "SourceIds":{ + "shape":"SourceIdsList", + "documentation":"

The list of identifiers of the event sources for which events are returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it can't end with a hyphen or contain two consecutive hyphens.

Constraints:

  • If SourceIds are supplied, SourceType must also be provided.

  • If the source type is a DB instance, then a DBInstanceIdentifier must be supplied.

  • If the source type is a DB security group, a DBSecurityGroupName must be supplied.

  • If the source type is a DB parameter group, a DBParameterGroupName must be supplied.

  • If the source type is a DB snapshot, a DBSnapshotIdentifier must be supplied.

" + }, + "Enabled":{ + "shape":"BooleanOptional", + "documentation":"

A Boolean value; set to true to activate the subscription, set to false to create the subscription but not active it.

" + }, + "Tags":{"shape":"TagList"} + }, + "documentation":"

" + }, + "CreateEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "DBCluster":{ + "type":"structure", + "members":{ + "AllocatedStorage":{ + "shape":"IntegerOptional", + "documentation":"

AllocatedStorage always returns 1, because Neptune DB cluster storage size is not fixed, but instead automatically adjusts as needed.

" + }, + "AvailabilityZones":{ + "shape":"AvailabilityZones", + "documentation":"

Provides the list of EC2 Availability Zones that instances in the DB cluster can be created in.

" + }, + "BackupRetentionPeriod":{ + "shape":"IntegerOptional", + "documentation":"

Specifies the number of days for which automatic DB snapshots are retained.

" + }, + "CharacterSetName":{ + "shape":"String", + "documentation":"

If present, specifies the name of the character set that this cluster is associated with.

" + }, + "DatabaseName":{ + "shape":"String", + "documentation":"

Contains the name of the initial database of this DB cluster that was provided at create time, if one was specified when the DB cluster was created. This same name is returned for the life of the DB cluster.

" + }, + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

Contains a user-supplied DB cluster identifier. This identifier is the unique key that identifies a DB cluster.

" + }, + "DBClusterParameterGroup":{ + "shape":"String", + "documentation":"

Specifies the name of the DB cluster parameter group for the DB cluster.

" + }, + "DBSubnetGroup":{ + "shape":"String", + "documentation":"

Specifies information on the subnet group associated with the DB cluster, including the name, description, and subnets in the subnet group.

" + }, + "Status":{ + "shape":"String", + "documentation":"

Specifies the current state of this DB cluster.

" + }, + "PercentProgress":{ + "shape":"String", + "documentation":"

Specifies the progress of the operation as a percentage.

" + }, + "EarliestRestorableTime":{ + "shape":"TStamp", + "documentation":"

Specifies the earliest time to which a database can be restored with point-in-time restore.

" + }, + "Endpoint":{ + "shape":"String", + "documentation":"

Specifies the connection endpoint for the primary instance of the DB cluster.

" + }, + "ReaderEndpoint":{ + "shape":"String", + "documentation":"

The reader endpoint for the DB cluster. The reader endpoint for a DB cluster load-balances connections across the Read Replicas that are available in a DB cluster. As clients request new connections to the reader endpoint, Neptune distributes the connection requests among the Read Replicas in the DB cluster. This functionality can help balance your read workload across multiple Read Replicas in your DB cluster.

If a failover occurs, and the Read Replica that you are connected to is promoted to be the primary instance, your connection is dropped. To continue sending your read workload to other Read Replicas in the cluster, you can then reconnect to the reader endpoint.

" + }, + "MultiAZ":{ + "shape":"Boolean", + "documentation":"

Specifies whether the DB cluster has instances in multiple Availability Zones.

" + }, + "Engine":{ + "shape":"String", + "documentation":"

Provides the name of the database engine to be used for this DB cluster.

" + }, + "EngineVersion":{ + "shape":"String", + "documentation":"

Indicates the database engine version.

" + }, + "LatestRestorableTime":{ + "shape":"TStamp", + "documentation":"

Specifies the latest time to which a database can be restored with point-in-time restore.

" + }, + "Port":{ + "shape":"IntegerOptional", + "documentation":"

Specifies the port that the database engine is listening on.

" + }, + "MasterUsername":{ + "shape":"String", + "documentation":"

Contains the master username for the DB cluster.

" + }, + "DBClusterOptionGroupMemberships":{ + "shape":"DBClusterOptionGroupMemberships", + "documentation":"

Provides the list of option group memberships for this DB cluster.

" + }, + "PreferredBackupWindow":{ + "shape":"String", + "documentation":"

Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod.

" + }, + "PreferredMaintenanceWindow":{ + "shape":"String", + "documentation":"

Specifies the weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

" + }, + "ReplicationSourceIdentifier":{ + "shape":"String", + "documentation":"

Contains the identifier of the source DB cluster if this DB cluster is a Read Replica.

" + }, + "ReadReplicaIdentifiers":{ + "shape":"ReadReplicaIdentifierList", + "documentation":"

Contains one or more identifiers of the Read Replicas associated with this DB cluster.

" + }, + "DBClusterMembers":{ + "shape":"DBClusterMemberList", + "documentation":"

Provides the list of instances that make up the DB cluster.

" + }, + "VpcSecurityGroups":{ + "shape":"VpcSecurityGroupMembershipList", + "documentation":"

Provides a list of VPC security groups that the DB cluster belongs to.

" + }, + "HostedZoneId":{ + "shape":"String", + "documentation":"

Specifies the ID that Amazon Route 53 assigns when you create a hosted zone.

" + }, + "StorageEncrypted":{ + "shape":"Boolean", + "documentation":"

Specifies whether the DB cluster is encrypted.

" + }, + "KmsKeyId":{ + "shape":"String", + "documentation":"

If StorageEncrypted is true, the AWS KMS key identifier for the encrypted DB cluster.

" + }, + "DbClusterResourceId":{ + "shape":"String", + "documentation":"

The AWS Region-unique, immutable identifier for the DB cluster. This identifier is found in AWS CloudTrail log entries whenever the AWS KMS key for the DB cluster is accessed.

" + }, + "DBClusterArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) for the DB cluster.

" + }, + "AssociatedRoles":{ + "shape":"DBClusterRoles", + "documentation":"

Provides a list of the AWS Identity and Access Management (IAM) roles that are associated with the DB cluster. IAM roles that are associated with a DB cluster grant permission for the DB cluster to access other AWS services on your behalf.

" + }, + "IAMDatabaseAuthenticationEnabled":{ + "shape":"Boolean", + "documentation":"

True if mapping of AWS Identity and Access Management (IAM) accounts to database accounts is enabled, and otherwise false.

" + }, + "CloneGroupId":{ + "shape":"String", + "documentation":"

Identifies the clone group to which the DB cluster is associated.

" + }, + "ClusterCreateTime":{ + "shape":"TStamp", + "documentation":"

Specifies the time when the DB cluster was created, in Universal Coordinated Time (UTC).

" + } + }, + "documentation":"

Contains the details of an Amazon Neptune DB cluster.

This data type is used as a response element in the DescribeDBClusters action.

", + "wrapper":true + }, + "DBClusterAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

User already has a DB cluster with the given identifier.

", + "error":{ + "code":"DBClusterAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBClusterList":{ + "type":"list", + "member":{ + "shape":"DBCluster", + "locationName":"DBCluster" + } + }, + "DBClusterMember":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{ + "shape":"String", + "documentation":"

Specifies the instance identifier for this member of the DB cluster.

" + }, + "IsClusterWriter":{ + "shape":"Boolean", + "documentation":"

Value that is true if the cluster member is the primary instance for the DB cluster and false otherwise.

" + }, + "DBClusterParameterGroupStatus":{ + "shape":"String", + "documentation":"

Specifies the status of the DB cluster parameter group for this member of the DB cluster.

" + }, + "PromotionTier":{ + "shape":"IntegerOptional", + "documentation":"

A value that specifies the order in which a Read Replica is promoted to the primary instance after a failure of the existing primary instance.

" + } + }, + "documentation":"

Contains information about an instance that is part of a DB cluster.

", + "wrapper":true + }, + "DBClusterMemberList":{ + "type":"list", + "member":{ + "shape":"DBClusterMember", + "locationName":"DBClusterMember" + } + }, + "DBClusterMessage":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "documentation":"

A pagination token that can be used in a subsequent DescribeDBClusters request.

" + }, + "DBClusters":{ + "shape":"DBClusterList", + "documentation":"

Contains a list of DB clusters for the user.

" + } + }, + "documentation":"

Contains the result of a successful invocation of the DescribeDBClusters action.

" + }, + "DBClusterNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

DBClusterIdentifier does not refer to an existing DB cluster.

", + "error":{ + "code":"DBClusterNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBClusterOptionGroupMemberships":{ + "type":"list", + "member":{ + "shape":"DBClusterOptionGroupStatus", + "locationName":"DBClusterOptionGroup" + } + }, + "DBClusterOptionGroupStatus":{ + "type":"structure", + "members":{ + "DBClusterOptionGroupName":{ + "shape":"String", + "documentation":"

Specifies the name of the DB cluster option group.

" + }, + "Status":{ + "shape":"String", + "documentation":"

Specifies the status of the DB cluster option group.

" + } + }, + "documentation":"

Contains status information for a DB cluster option group.

" + }, + "DBClusterParameterGroup":{ + "type":"structure", + "members":{ + "DBClusterParameterGroupName":{ + "shape":"String", + "documentation":"

Provides the name of the DB cluster parameter group.

" + }, + "DBParameterGroupFamily":{ + "shape":"String", + "documentation":"

Provides the name of the DB parameter group family that this DB cluster parameter group is compatible with.

" + }, + "Description":{ + "shape":"String", + "documentation":"

Provides the customer-specified description for this DB cluster parameter group.

" + }, + "DBClusterParameterGroupArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) for the DB cluster parameter group.

" + } + }, + "documentation":"

Contains the details of an Amazon Neptune DB cluster parameter group.

This data type is used as a response element in the DescribeDBClusterParameterGroups action.

", + "wrapper":true + }, + "DBClusterParameterGroupDetails":{ + "type":"structure", + "members":{ + "Parameters":{ + "shape":"ParametersList", + "documentation":"

Provides a list of parameters for the DB cluster parameter group.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous DescribeDBClusterParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

" + } + }, + "documentation":"

Provides details about a DB cluster parameter group including the parameters in the DB cluster parameter group.

" + }, + "DBClusterParameterGroupList":{ + "type":"list", + "member":{ + "shape":"DBClusterParameterGroup", + "locationName":"DBClusterParameterGroup" + } + }, + "DBClusterParameterGroupNameMessage":{ + "type":"structure", + "members":{ + "DBClusterParameterGroupName":{ + "shape":"String", + "documentation":"

The name of the DB cluster parameter group.

Constraints:

  • Must be 1 to 255 letters or numbers.

  • First character must be a letter

  • Cannot end with a hyphen or contain two consecutive hyphens

This value is stored as a lowercase string.

" + } + }, + "documentation":"

" + }, + "DBClusterParameterGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

DBClusterParameterGroupName does not refer to an existing DB Cluster parameter group.

", + "error":{ + "code":"DBClusterParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBClusterParameterGroupsMessage":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous DescribeDBClusterParameterGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + }, + "DBClusterParameterGroups":{ + "shape":"DBClusterParameterGroupList", + "documentation":"

A list of DB cluster parameter groups.

" + } + }, + "documentation":"

" + }, + "DBClusterQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

User attempted to create a new DB cluster and the user has already reached the maximum allowed DB cluster quota.

", + "error":{ + "code":"DBClusterQuotaExceededFault", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "DBClusterRole":{ + "type":"structure", + "members":{ + "RoleArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role that is associated with the DB cluster.

" + }, + "Status":{ + "shape":"String", + "documentation":"

Describes the state of association between the IAM role and the DB cluster. The Status property returns one of the following values:

  • ACTIVE - the IAM role ARN is associated with the DB cluster and can be used to access other AWS services on your behalf.

  • PENDING - the IAM role ARN is being associated with the DB cluster.

  • INVALID - the IAM role ARN is associated with the DB cluster, but the DB cluster is unable to assume the IAM role in order to access other AWS services on your behalf.

" + } + }, + "documentation":"

Describes an AWS Identity and Access Management (IAM) role that is associated with a DB cluster.

" + }, + "DBClusterRoleAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The specified IAM role Amazon Resource Name (ARN) is already associated with the specified DB cluster.

", + "error":{ + "code":"DBClusterRoleAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBClusterRoleNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The specified IAM role Amazon Resource Name (ARN) is not associated with the specified DB cluster.

", + "error":{ + "code":"DBClusterRoleNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBClusterRoleQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

You have exceeded the maximum number of IAM roles that can be associated with the specified DB cluster.

", + "error":{ + "code":"DBClusterRoleQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBClusterRoles":{ + "type":"list", + "member":{ + "shape":"DBClusterRole", + "locationName":"DBClusterRole" + } + }, + "DBClusterSnapshot":{ + "type":"structure", + "members":{ + "AvailabilityZones":{ + "shape":"AvailabilityZones", + "documentation":"

Provides the list of EC2 Availability Zones that instances in the DB cluster snapshot can be restored in.

" + }, + "DBClusterSnapshotIdentifier":{ + "shape":"String", + "documentation":"

Specifies the identifier for the DB cluster snapshot.

" + }, + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

Specifies the DB cluster identifier of the DB cluster that this DB cluster snapshot was created from.

" + }, + "SnapshotCreateTime":{ + "shape":"TStamp", + "documentation":"

Provides the time when the snapshot was taken, in Universal Coordinated Time (UTC).

" + }, + "Engine":{ + "shape":"String", + "documentation":"

Specifies the name of the database engine.

" + }, + "AllocatedStorage":{ + "shape":"Integer", + "documentation":"

Specifies the allocated storage size in gibibytes (GiB).

" + }, + "Status":{ + "shape":"String", + "documentation":"

Specifies the status of this DB cluster snapshot.

" + }, + "Port":{ + "shape":"Integer", + "documentation":"

Specifies the port that the DB cluster was listening on at the time of the snapshot.

" + }, + "VpcId":{ + "shape":"String", + "documentation":"

Provides the VPC ID associated with the DB cluster snapshot.

" + }, + "ClusterCreateTime":{ + "shape":"TStamp", + "documentation":"

Specifies the time when the DB cluster was created, in Universal Coordinated Time (UTC).

" + }, + "MasterUsername":{ + "shape":"String", + "documentation":"

Provides the master username for the DB cluster snapshot.

" + }, + "EngineVersion":{ + "shape":"String", + "documentation":"

Provides the version of the database engine for this DB cluster snapshot.

" + }, + "LicenseModel":{ + "shape":"String", + "documentation":"

Provides the license model information for this DB cluster snapshot.

" + }, + "SnapshotType":{ + "shape":"String", + "documentation":"

Provides the type of the DB cluster snapshot.

" + }, + "PercentProgress":{ + "shape":"Integer", + "documentation":"

Specifies the percentage of the estimated data that has been transferred.

" + }, + "StorageEncrypted":{ + "shape":"Boolean", + "documentation":"

Specifies whether the DB cluster snapshot is encrypted.

" + }, + "KmsKeyId":{ + "shape":"String", + "documentation":"

If StorageEncrypted is true, the AWS KMS key identifier for the encrypted DB cluster snapshot.

" + }, + "DBClusterSnapshotArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) for the DB cluster snapshot.

" + }, + "SourceDBClusterSnapshotArn":{ + "shape":"String", + "documentation":"

If the DB cluster snapshot was copied from a source DB cluster snapshot, the Amazon Resource Name (ARN) for the source DB cluster snapshot, otherwise, a null value.

" + }, + "IAMDatabaseAuthenticationEnabled":{ + "shape":"Boolean", + "documentation":"

True if mapping of AWS Identity and Access Management (IAM) accounts to database accounts is enabled, and otherwise false.

" + } + }, + "documentation":"

Contains the details for an Amazon Neptune DB cluster snapshot

This data type is used as a response element in the DescribeDBClusterSnapshots action.

", + "wrapper":true + }, + "DBClusterSnapshotAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

User already has a DB cluster snapshot with the given identifier.

", + "error":{ + "code":"DBClusterSnapshotAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBClusterSnapshotAttribute":{ + "type":"structure", + "members":{ + "AttributeName":{ + "shape":"String", + "documentation":"

The name of the manual DB cluster snapshot attribute.

The attribute named restore refers to the list of AWS accounts that have permission to copy or restore the manual DB cluster snapshot. For more information, see the ModifyDBClusterSnapshotAttribute API action.

" + }, + "AttributeValues":{ + "shape":"AttributeValueList", + "documentation":"

The value(s) for the manual DB cluster snapshot attribute.

If the AttributeName field is set to restore, then this element returns a list of IDs of the AWS accounts that are authorized to copy or restore the manual DB cluster snapshot. If a value of all is in the list, then the manual DB cluster snapshot is public and available for any AWS account to copy or restore.

" + } + }, + "documentation":"

Contains the name and values of a manual DB cluster snapshot attribute.

Manual DB cluster snapshot attributes are used to authorize other AWS accounts to restore a manual DB cluster snapshot. For more information, see the ModifyDBClusterSnapshotAttribute API action.

" + }, + "DBClusterSnapshotAttributeList":{ + "type":"list", + "member":{ + "shape":"DBClusterSnapshotAttribute", + "locationName":"DBClusterSnapshotAttribute" + } + }, + "DBClusterSnapshotAttributesResult":{ + "type":"structure", + "members":{ + "DBClusterSnapshotIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the manual DB cluster snapshot that the attributes apply to.

" + }, + "DBClusterSnapshotAttributes":{ + "shape":"DBClusterSnapshotAttributeList", + "documentation":"

The list of attributes and values for the manual DB cluster snapshot.

" + } + }, + "documentation":"

Contains the results of a successful call to the DescribeDBClusterSnapshotAttributes API action.

Manual DB cluster snapshot attributes are used to authorize other AWS accounts to copy or restore a manual DB cluster snapshot. For more information, see the ModifyDBClusterSnapshotAttribute API action.

", + "wrapper":true + }, + "DBClusterSnapshotList":{ + "type":"list", + "member":{ + "shape":"DBClusterSnapshot", + "locationName":"DBClusterSnapshot" + } + }, + "DBClusterSnapshotMessage":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous DescribeDBClusterSnapshots request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + }, + "DBClusterSnapshots":{ + "shape":"DBClusterSnapshotList", + "documentation":"

Provides a list of DB cluster snapshots for the user.

" + } + }, + "documentation":"

Provides a list of DB cluster snapshots for the user as the result of a call to the DescribeDBClusterSnapshots action.

" + }, + "DBClusterSnapshotNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

DBClusterSnapshotIdentifier does not refer to an existing DB cluster snapshot.

", + "error":{ + "code":"DBClusterSnapshotNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBEngineVersion":{ + "type":"structure", + "members":{ + "Engine":{ + "shape":"String", + "documentation":"

The name of the database engine.

" + }, + "EngineVersion":{ + "shape":"String", + "documentation":"

The version number of the database engine.

" + }, + "DBParameterGroupFamily":{ + "shape":"String", + "documentation":"

The name of the DB parameter group family for the database engine.

" + }, + "DBEngineDescription":{ + "shape":"String", + "documentation":"

The description of the database engine.

" + }, + "DBEngineVersionDescription":{ + "shape":"String", + "documentation":"

The description of the database engine version.

" + }, + "DefaultCharacterSet":{ + "shape":"CharacterSet", + "documentation":"

The default character set for new instances of this engine version, if the CharacterSetName parameter of the CreateDBInstance API is not specified.

" + }, + "SupportedCharacterSets":{ + "shape":"SupportedCharacterSetsList", + "documentation":"

A list of the character sets supported by this engine for the CharacterSetName parameter of the CreateDBInstance action.

" + }, + "ValidUpgradeTarget":{ + "shape":"ValidUpgradeTargetList", + "documentation":"

A list of engine versions that this database engine version can be upgraded to.

" + }, + "SupportedTimezones":{ + "shape":"SupportedTimezonesList", + "documentation":"

A list of the time zones supported by this engine for the Timezone parameter of the CreateDBInstance action.

" + }, + "ExportableLogTypes":{ + "shape":"LogTypeList", + "documentation":"

The types of logs that the database engine has available for export to CloudWatch Logs.

" + }, + "SupportsLogExportsToCloudwatchLogs":{ + "shape":"Boolean", + "documentation":"

A value that indicates whether the engine version supports exporting the log types specified by ExportableLogTypes to CloudWatch Logs.

" + }, + "SupportsReadReplica":{ + "shape":"Boolean", + "documentation":"

Indicates whether the database engine version supports read replicas.

" + } + }, + "documentation":"

This data type is used as a response element in the action DescribeDBEngineVersions.

" + }, + "DBEngineVersionList":{ + "type":"list", + "member":{ + "shape":"DBEngineVersion", + "locationName":"DBEngineVersion" + } + }, + "DBEngineVersionMessage":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + }, + "DBEngineVersions":{ + "shape":"DBEngineVersionList", + "documentation":"

A list of DBEngineVersion elements.

" + } + }, + "documentation":"

Contains the result of a successful invocation of the DescribeDBEngineVersions action.

" + }, + "DBInstance":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{ + "shape":"String", + "documentation":"

Contains a user-supplied database identifier. This identifier is the unique key that identifies a DB instance.

" + }, + "DBInstanceClass":{ + "shape":"String", + "documentation":"

Contains the name of the compute and memory capacity class of the DB instance.

" + }, + "Engine":{ + "shape":"String", + "documentation":"

Provides the name of the database engine to be used for this DB instance.

" + }, + "DBInstanceStatus":{ + "shape":"String", + "documentation":"

Specifies the current state of this database.

" + }, + "MasterUsername":{ + "shape":"String", + "documentation":"

Contains the master username for the DB instance.

" + }, + "DBName":{ + "shape":"String", + "documentation":"

The database name.

" + }, + "Endpoint":{ + "shape":"Endpoint", + "documentation":"

Specifies the connection endpoint.

" + }, + "AllocatedStorage":{ + "shape":"Integer", + "documentation":"

Specifies the allocated storage size specified in gibibytes.

" + }, + "InstanceCreateTime":{ + "shape":"TStamp", + "documentation":"

Provides the date and time the DB instance was created.

" + }, + "PreferredBackupWindow":{ + "shape":"String", + "documentation":"

Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod.

" + }, + "BackupRetentionPeriod":{ + "shape":"Integer", + "documentation":"

Specifies the number of days for which automatic DB snapshots are retained.

" + }, + "DBSecurityGroups":{ + "shape":"DBSecurityGroupMembershipList", + "documentation":"

Provides List of DB security group elements containing only DBSecurityGroup.Name and DBSecurityGroup.Status subelements.

" + }, + "VpcSecurityGroups":{ + "shape":"VpcSecurityGroupMembershipList", + "documentation":"

Provides a list of VPC security group elements that the DB instance belongs to.

" + }, + "DBParameterGroups":{ + "shape":"DBParameterGroupStatusList", + "documentation":"

Provides the list of DB parameter groups applied to this DB instance.

" + }, + "AvailabilityZone":{ + "shape":"String", + "documentation":"

Specifies the name of the Availability Zone the DB instance is located in.

" + }, + "DBSubnetGroup":{ + "shape":"DBSubnetGroup", + "documentation":"

Specifies information on the subnet group associated with the DB instance, including the name, description, and subnets in the subnet group.

" + }, + "PreferredMaintenanceWindow":{ + "shape":"String", + "documentation":"

Specifies the weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

" + }, + "PendingModifiedValues":{ + "shape":"PendingModifiedValues", + "documentation":"

Specifies that changes to the DB instance are pending. This element is only included when changes are pending. Specific changes are identified by subelements.

" + }, + "LatestRestorableTime":{ + "shape":"TStamp", + "documentation":"

Specifies the latest time to which a database can be restored with point-in-time restore.

" + }, + "MultiAZ":{ + "shape":"Boolean", + "documentation":"

Specifies if the DB instance is a Multi-AZ deployment.

" + }, + "EngineVersion":{ + "shape":"String", + "documentation":"

Indicates the database engine version.

" + }, + "AutoMinorVersionUpgrade":{ + "shape":"Boolean", + "documentation":"

Indicates that minor version patches are applied automatically.

" + }, + "ReadReplicaSourceDBInstanceIdentifier":{ + "shape":"String", + "documentation":"

Contains the identifier of the source DB instance if this DB instance is a Read Replica.

" + }, + "ReadReplicaDBInstanceIdentifiers":{ + "shape":"ReadReplicaDBInstanceIdentifierList", + "documentation":"

Contains one or more identifiers of the Read Replicas associated with this DB instance.

" + }, + "ReadReplicaDBClusterIdentifiers":{ + "shape":"ReadReplicaDBClusterIdentifierList", + "documentation":"

Contains one or more identifiers of DB clusters that are Read Replicas of this DB instance.

" + }, + "LicenseModel":{ + "shape":"String", + "documentation":"

License model information for this DB instance.

" + }, + "Iops":{ + "shape":"IntegerOptional", + "documentation":"

Specifies the Provisioned IOPS (I/O operations per second) value.

" + }, + "OptionGroupMemberships":{ + "shape":"OptionGroupMembershipList", + "documentation":"

Provides the list of option group memberships for this DB instance.

" + }, + "CharacterSetName":{ + "shape":"String", + "documentation":"

If present, specifies the name of the character set that this instance is associated with.

" + }, + "SecondaryAvailabilityZone":{ + "shape":"String", + "documentation":"

If present, specifies the name of the secondary Availability Zone for a DB instance with multi-AZ support.

" + }, + "PubliclyAccessible":{ + "shape":"Boolean", + "documentation":"

This parameter is not supported.

", + "deprecated":true + }, + "StatusInfos":{ + "shape":"DBInstanceStatusInfoList", + "documentation":"

The status of a Read Replica. If the instance is not a Read Replica, this is blank.

" + }, + "StorageType":{ + "shape":"String", + "documentation":"

Specifies the storage type associated with DB instance.

" + }, + "TdeCredentialArn":{ + "shape":"String", + "documentation":"

The ARN from the key store with which the instance is associated for TDE encryption.

" + }, + "DbInstancePort":{ + "shape":"Integer", + "documentation":"

Specifies the port that the DB instance listens on. If the DB instance is part of a DB cluster, this can be a different port than the DB cluster port.

" + }, + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

If the DB instance is a member of a DB cluster, contains the name of the DB cluster that the DB instance is a member of.

" + }, + "StorageEncrypted":{ + "shape":"Boolean", + "documentation":"

Specifies whether the DB instance is encrypted.

" + }, + "KmsKeyId":{ + "shape":"String", + "documentation":"

If StorageEncrypted is true, the AWS KMS key identifier for the encrypted DB instance.

" + }, + "DbiResourceId":{ + "shape":"String", + "documentation":"

The AWS Region-unique, immutable identifier for the DB instance. This identifier is found in AWS CloudTrail log entries whenever the AWS KMS key for the DB instance is accessed.

" + }, + "CACertificateIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the CA certificate for this DB instance.

" + }, + "DomainMemberships":{ + "shape":"DomainMembershipList", + "documentation":"

Not supported

" + }, + "CopyTagsToSnapshot":{ + "shape":"Boolean", + "documentation":"

Specifies whether tags are copied from the DB instance to snapshots of the DB instance.

" + }, + "MonitoringInterval":{ + "shape":"IntegerOptional", + "documentation":"

The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance.

" + }, + "EnhancedMonitoringResourceArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon CloudWatch Logs log stream that receives the Enhanced Monitoring metrics data for the DB instance.

" + }, + "MonitoringRoleArn":{ + "shape":"String", + "documentation":"

The ARN for the IAM role that permits Neptune to send Enhanced Monitoring metrics to Amazon CloudWatch Logs.

" + }, + "PromotionTier":{ + "shape":"IntegerOptional", + "documentation":"

A value that specifies the order in which a Read Replica is promoted to the primary instance after a failure of the existing primary instance.

" + }, + "DBInstanceArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) for the DB instance.

" + }, + "Timezone":{ + "shape":"String", + "documentation":"

Not supported.

" + }, + "IAMDatabaseAuthenticationEnabled":{ + "shape":"Boolean", + "documentation":"

True if AWS Identity and Access Management (IAM) authentication is enabled, and otherwise false.

" + }, + "PerformanceInsightsEnabled":{ + "shape":"BooleanOptional", + "documentation":"

True if Performance Insights is enabled for the DB instance, and otherwise false.

" + }, + "PerformanceInsightsKMSKeyId":{ + "shape":"String", + "documentation":"

The AWS KMS key identifier for encryption of Performance Insights data. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.

" + }, + "EnabledCloudwatchLogsExports":{ + "shape":"LogTypeList", + "documentation":"

A list of log types that this DB instance is configured to export to CloudWatch Logs.

" + } + }, + "documentation":"

Contains the details of an Amazon Neptune DB instance.

This data type is used as a response element in the DescribeDBInstances action.

", + "wrapper":true + }, + "DBInstanceAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

User already has a DB instance with the given identifier.

", + "error":{ + "code":"DBInstanceAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBInstanceList":{ + "type":"list", + "member":{ + "shape":"DBInstance", + "locationName":"DBInstance" + } + }, + "DBInstanceMessage":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

" + }, + "DBInstances":{ + "shape":"DBInstanceList", + "documentation":"

A list of DBInstance instances.

" + } + }, + "documentation":"

Contains the result of a successful invocation of the DescribeDBInstances action.

" + }, + "DBInstanceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

DBInstanceIdentifier does not refer to an existing DB instance.

", + "error":{ + "code":"DBInstanceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBInstanceStatusInfo":{ + "type":"structure", + "members":{ + "StatusType":{ + "shape":"String", + "documentation":"

This value is currently \"read replication.\"

" + }, + "Normal":{ + "shape":"Boolean", + "documentation":"

Boolean value that is true if the instance is operating normally, or false if the instance is in an error state.

" + }, + "Status":{ + "shape":"String", + "documentation":"

Status of the DB instance. For a StatusType of read replica, the values can be replicating, error, stopped, or terminated.

" + }, + "Message":{ + "shape":"String", + "documentation":"

Details of the error if there is an error for the instance. If the instance is not in an error state, this value is blank.

" + } + }, + "documentation":"

Provides a list of status information for a DB instance.

" + }, + "DBInstanceStatusInfoList":{ + "type":"list", + "member":{ + "shape":"DBInstanceStatusInfo", + "locationName":"DBInstanceStatusInfo" + } + }, + "DBParameterGroup":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{ + "shape":"String", + "documentation":"

Provides the name of the DB parameter group.

" + }, + "DBParameterGroupFamily":{ + "shape":"String", + "documentation":"

Provides the name of the DB parameter group family that this DB parameter group is compatible with.

" + }, + "Description":{ + "shape":"String", + "documentation":"

Provides the customer-specified description for this DB parameter group.

" + }, + "DBParameterGroupArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) for the DB parameter group.

" + } + }, + "documentation":"

Contains the details of an Amazon Neptune DB parameter group.

This data type is used as a response element in the DescribeDBParameterGroups action.

", + "wrapper":true + }, + "DBParameterGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

A DB parameter group with the same name exists.

", + "error":{ + "code":"DBParameterGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroupDetails":{ + "type":"structure", + "members":{ + "Parameters":{ + "shape":"ParametersList", + "documentation":"

A list of Parameter values.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + } + }, + "documentation":"

Contains the result of a successful invocation of the DescribeDBParameters action.

" + }, + "DBParameterGroupList":{ + "type":"list", + "member":{ + "shape":"DBParameterGroup", + "locationName":"DBParameterGroup" + } + }, + "DBParameterGroupNameMessage":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{ + "shape":"String", + "documentation":"

Provides the name of the DB parameter group.

" + } + }, + "documentation":"

Contains the result of a successful invocation of the ModifyDBParameterGroup or ResetDBParameterGroup action.

" + }, + "DBParameterGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

DBParameterGroupName does not refer to an existing DB parameter group.

", + "error":{ + "code":"DBParameterGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

Request would result in user exceeding the allowed number of DB parameter groups.

", + "error":{ + "code":"DBParameterGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBParameterGroupStatus":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{ + "shape":"String", + "documentation":"

The name of the DP parameter group.

" + }, + "ParameterApplyStatus":{ + "shape":"String", + "documentation":"

The status of parameter updates.

" + } + }, + "documentation":"

The status of the DB parameter group.

This data type is used as a response element in the following actions:

" + }, + "DBParameterGroupStatusList":{ + "type":"list", + "member":{ + "shape":"DBParameterGroupStatus", + "locationName":"DBParameterGroup" + } + }, + "DBParameterGroupsMessage":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + }, + "DBParameterGroups":{ + "shape":"DBParameterGroupList", + "documentation":"

A list of DBParameterGroup instances.

" + } + }, + "documentation":"

Contains the result of a successful invocation of the DescribeDBParameterGroups action.

" + }, + "DBSecurityGroupMembership":{ + "type":"structure", + "members":{ + "DBSecurityGroupName":{ + "shape":"String", + "documentation":"

The name of the DB security group.

" + }, + "Status":{ + "shape":"String", + "documentation":"

The status of the DB security group.

" + } + }, + "documentation":"

This data type is used as a response element in the following actions:

" + }, + "DBSecurityGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"DBSecurityGroupMembership", + "locationName":"DBSecurityGroup" + } + }, + "DBSecurityGroupNameList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"DBSecurityGroupName" + } + }, + "DBSecurityGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

DBSecurityGroupName does not refer to an existing DB security group.

", + "error":{ + "code":"DBSecurityGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSnapshotAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

DBSnapshotIdentifier is already used by an existing snapshot.

", + "error":{ + "code":"DBSnapshotAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSnapshotNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

DBSnapshotIdentifier does not refer to an existing DB snapshot.

", + "error":{ + "code":"DBSnapshotNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroup":{ + "type":"structure", + "members":{ + "DBSubnetGroupName":{ + "shape":"String", + "documentation":"

The name of the DB subnet group.

" + }, + "DBSubnetGroupDescription":{ + "shape":"String", + "documentation":"

Provides the description of the DB subnet group.

" + }, + "VpcId":{ + "shape":"String", + "documentation":"

Provides the VpcId of the DB subnet group.

" + }, + "SubnetGroupStatus":{ + "shape":"String", + "documentation":"

Provides the status of the DB subnet group.

" + }, + "Subnets":{ + "shape":"SubnetList", + "documentation":"

Contains a list of Subnet elements.

" + }, + "DBSubnetGroupArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) for the DB subnet group.

" + } + }, + "documentation":"

Contains the details of an Amazon Neptune DB subnet group.

This data type is used as a response element in the DescribeDBSubnetGroups action.

", + "wrapper":true + }, + "DBSubnetGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

DBSubnetGroupName is already used by an existing DB subnet group.

", + "error":{ + "code":"DBSubnetGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupDoesNotCoverEnoughAZs":{ + "type":"structure", + "members":{ + }, + "documentation":"

Subnets in the DB subnet group should cover at least two Availability Zones unless there is only one Availability Zone.

", + "error":{ + "code":"DBSubnetGroupDoesNotCoverEnoughAZs", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupMessage":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + }, + "DBSubnetGroups":{ + "shape":"DBSubnetGroups", + "documentation":"

A list of DBSubnetGroup instances.

" + } + }, + "documentation":"

Contains the result of a successful invocation of the DescribeDBSubnetGroups action.

" + }, + "DBSubnetGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

DBSubnetGroupName does not refer to an existing DB subnet group.

", + "error":{ + "code":"DBSubnetGroupNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

Request would result in user exceeding the allowed number of DB subnet groups.

", + "error":{ + "code":"DBSubnetGroupQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBSubnetGroups":{ + "type":"list", + "member":{ + "shape":"DBSubnetGroup", + "locationName":"DBSubnetGroup" + } + }, + "DBSubnetQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

Request would result in user exceeding the allowed number of subnets in a DB subnet groups.

", + "error":{ + "code":"DBSubnetQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBUpgradeDependencyFailureFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The DB upgrade failed because a resource the DB depends on could not be modified.

", + "error":{ + "code":"DBUpgradeDependencyFailure", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DeleteDBClusterMessage":{ + "type":"structure", + "required":["DBClusterIdentifier"], + "members":{ + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

The DB cluster identifier for the DB cluster to be deleted. This parameter isn't case-sensitive.

Constraints:

  • Must match an existing DBClusterIdentifier.

" + }, + "SkipFinalSnapshot":{ + "shape":"Boolean", + "documentation":"

Determines whether a final DB cluster snapshot is created before the DB cluster is deleted. If true is specified, no DB cluster snapshot is created. If false is specified, a DB cluster snapshot is created before the DB cluster is deleted.

You must specify a FinalDBSnapshotIdentifier parameter if SkipFinalSnapshot is false.

Default: false

" + }, + "FinalDBSnapshotIdentifier":{ + "shape":"String", + "documentation":"

The DB cluster snapshot identifier of the new DB cluster snapshot created when SkipFinalSnapshot is set to false.

Specifying this parameter and also setting the SkipFinalShapshot parameter to true results in an error.

Constraints:

  • Must be 1 to 255 letters, numbers, or hyphens.

  • First character must be a letter

  • Cannot end with a hyphen or contain two consecutive hyphens

" + } + }, + "documentation":"

" + }, + "DeleteDBClusterParameterGroupMessage":{ + "type":"structure", + "required":["DBClusterParameterGroupName"], + "members":{ + "DBClusterParameterGroupName":{ + "shape":"String", + "documentation":"

The name of the DB cluster parameter group.

Constraints:

  • Must be the name of an existing DB cluster parameter group.

  • You can't delete a default DB cluster parameter group.

  • Cannot be associated with any DB clusters.

" + } + }, + "documentation":"

" + }, + "DeleteDBClusterResult":{ + "type":"structure", + "members":{ + "DBCluster":{"shape":"DBCluster"} + } + }, + "DeleteDBClusterSnapshotMessage":{ + "type":"structure", + "required":["DBClusterSnapshotIdentifier"], + "members":{ + "DBClusterSnapshotIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the DB cluster snapshot to delete.

Constraints: Must be the name of an existing DB cluster snapshot in the available state.

" + } + }, + "documentation":"

" + }, + "DeleteDBClusterSnapshotResult":{ + "type":"structure", + "members":{ + "DBClusterSnapshot":{"shape":"DBClusterSnapshot"} + } + }, + "DeleteDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{ + "shape":"String", + "documentation":"

The DB instance identifier for the DB instance to be deleted. This parameter isn't case-sensitive.

Constraints:

  • Must match the name of an existing DB instance.

" + }, + "SkipFinalSnapshot":{ + "shape":"Boolean", + "documentation":"

Determines whether a final DB snapshot is created before the DB instance is deleted. If true is specified, no DBSnapshot is created. If false is specified, a DB snapshot is created before the DB instance is deleted.

Note that when a DB instance is in a failure state and has a status of 'failed', 'incompatible-restore', or 'incompatible-network', it can only be deleted when the SkipFinalSnapshot parameter is set to \"true\".

Specify true when deleting a Read Replica.

The FinalDBSnapshotIdentifier parameter must be specified if SkipFinalSnapshot is false.

Default: false

" + }, + "FinalDBSnapshotIdentifier":{ + "shape":"String", + "documentation":"

The DBSnapshotIdentifier of the new DBSnapshot created when SkipFinalSnapshot is set to false.

Specifying this parameter and also setting the SkipFinalShapshot parameter to true results in an error.

Constraints:

  • Must be 1 to 255 letters or numbers.

  • First character must be a letter

  • Cannot end with a hyphen or contain two consecutive hyphens

  • Cannot be specified when deleting a Read Replica.

" + } + }, + "documentation":"

" + }, + "DeleteDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "DeleteDBParameterGroupMessage":{ + "type":"structure", + "required":["DBParameterGroupName"], + "members":{ + "DBParameterGroupName":{ + "shape":"String", + "documentation":"

The name of the DB parameter group.

Constraints:

  • Must be the name of an existing DB parameter group

  • You can't delete a default DB parameter group

  • Cannot be associated with any DB instances

" + } + }, + "documentation":"

" + }, + "DeleteDBSubnetGroupMessage":{ + "type":"structure", + "required":["DBSubnetGroupName"], + "members":{ + "DBSubnetGroupName":{ + "shape":"String", + "documentation":"

The name of the database subnet group to delete.

You can't delete the default subnet group.

Constraints:

Constraints: Must match the name of an existing DBSubnetGroup. Must not be default.

Example: mySubnetgroup

" + } + }, + "documentation":"

" + }, + "DeleteEventSubscriptionMessage":{ + "type":"structure", + "required":["SubscriptionName"], + "members":{ + "SubscriptionName":{ + "shape":"String", + "documentation":"

The name of the event notification subscription you want to delete.

" + } + }, + "documentation":"

" + }, + "DeleteEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "DescribeDBClusterParameterGroupsMessage":{ + "type":"structure", + "members":{ + "DBClusterParameterGroupName":{ + "shape":"String", + "documentation":"

The name of a specific DB cluster parameter group to return details for.

Constraints:

  • If supplied, must match the name of an existing DBClusterParameterGroup.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

This parameter is not currently supported.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous DescribeDBClusterParameterGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + } + }, + "documentation":"

" + }, + "DescribeDBClusterParametersMessage":{ + "type":"structure", + "required":["DBClusterParameterGroupName"], + "members":{ + "DBClusterParameterGroupName":{ + "shape":"String", + "documentation":"

The name of a specific DB cluster parameter group to return parameter details for.

Constraints:

  • If supplied, must match the name of an existing DBClusterParameterGroup.

" + }, + "Source":{ + "shape":"String", + "documentation":"

A value that indicates to return only parameters for a specific source. Parameter sources can be engine, service, or customer.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

This parameter is not currently supported.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous DescribeDBClusterParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + } + }, + "documentation":"

" + }, + "DescribeDBClusterSnapshotAttributesMessage":{ + "type":"structure", + "required":["DBClusterSnapshotIdentifier"], + "members":{ + "DBClusterSnapshotIdentifier":{ + "shape":"String", + "documentation":"

The identifier for the DB cluster snapshot to describe the attributes for.

" + } + }, + "documentation":"

" + }, + "DescribeDBClusterSnapshotAttributesResult":{ + "type":"structure", + "members":{ + "DBClusterSnapshotAttributesResult":{"shape":"DBClusterSnapshotAttributesResult"} + } + }, + "DescribeDBClusterSnapshotsMessage":{ + "type":"structure", + "members":{ + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

The ID of the DB cluster to retrieve the list of DB cluster snapshots for. This parameter can't be used in conjunction with the DBClusterSnapshotIdentifier parameter. This parameter is not case-sensitive.

Constraints:

  • If supplied, must match the identifier of an existing DBCluster.

" + }, + "DBClusterSnapshotIdentifier":{ + "shape":"String", + "documentation":"

A specific DB cluster snapshot identifier to describe. This parameter can't be used in conjunction with the DBClusterIdentifier parameter. This value is stored as a lowercase string.

Constraints:

  • If supplied, must match the identifier of an existing DBClusterSnapshot.

  • If this identifier is for an automated snapshot, the SnapshotType parameter must also be specified.

" + }, + "SnapshotType":{ + "shape":"String", + "documentation":"

The type of DB cluster snapshots to be returned. You can specify one of the following values:

  • automated - Return all DB cluster snapshots that have been automatically taken by Amazon Neptune for my AWS account.

  • manual - Return all DB cluster snapshots that have been taken by my AWS account.

  • shared - Return all manual DB cluster snapshots that have been shared to my AWS account.

  • public - Return all DB cluster snapshots that have been marked as public.

If you don't specify a SnapshotType value, then both automated and manual DB cluster snapshots are returned. You can include shared DB cluster snapshots with these results by setting the IncludeShared parameter to true. You can include public DB cluster snapshots with these results by setting the IncludePublic parameter to true.

The IncludeShared and IncludePublic parameters don't apply for SnapshotType values of manual or automated. The IncludePublic parameter doesn't apply when SnapshotType is set to shared. The IncludeShared parameter doesn't apply when SnapshotType is set to public.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

This parameter is not currently supported.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous DescribeDBClusterSnapshots request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + }, + "IncludeShared":{ + "shape":"Boolean", + "documentation":"

True to include shared manual DB cluster snapshots from other AWS accounts that this AWS account has been given permission to copy or restore, and otherwise false. The default is false.

You can give an AWS account permission to restore a manual DB cluster snapshot from another AWS account by the ModifyDBClusterSnapshotAttribute API action.

" + }, + "IncludePublic":{ + "shape":"Boolean", + "documentation":"

True to include manual DB cluster snapshots that are public and can be copied or restored by any AWS account, and otherwise false. The default is false. The default is false.

You can share a manual DB cluster snapshot as public by using the ModifyDBClusterSnapshotAttribute API action.

" + } + }, + "documentation":"

" + }, + "DescribeDBClustersMessage":{ + "type":"structure", + "members":{ + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

The user-supplied DB cluster identifier. If this parameter is specified, information from only the specific DB cluster is returned. This parameter isn't case-sensitive.

Constraints:

  • If supplied, must match an existing DBClusterIdentifier.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

A filter that specifies one or more DB clusters to describe.

Supported filters:

  • db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list will only include information about the DB clusters identified by these ARNs.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous DescribeDBClusters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + } + }, + "documentation":"

" + }, + "DescribeDBEngineVersionsMessage":{ + "type":"structure", + "members":{ + "Engine":{ + "shape":"String", + "documentation":"

The database engine to return.

" + }, + "EngineVersion":{ + "shape":"String", + "documentation":"

The database engine version to return.

Example: 5.1.49

" + }, + "DBParameterGroupFamily":{ + "shape":"String", + "documentation":"

The name of a specific DB parameter group family to return details for.

Constraints:

  • If supplied, must match an existing DBParameterGroupFamily.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

Not currently supported.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + }, + "DefaultOnly":{ + "shape":"Boolean", + "documentation":"

Indicates that only the default version of the specified engine or engine and major version combination is returned.

" + }, + "ListSupportedCharacterSets":{ + "shape":"BooleanOptional", + "documentation":"

If this parameter is specified and the requested engine supports the CharacterSetName parameter for CreateDBInstance, the response includes a list of supported character sets for each engine version.

" + }, + "ListSupportedTimezones":{ + "shape":"BooleanOptional", + "documentation":"

If this parameter is specified and the requested engine supports the TimeZone parameter for CreateDBInstance, the response includes a list of supported time zones for each engine version.

" + } + } + }, + "DescribeDBInstancesMessage":{ + "type":"structure", + "members":{ + "DBInstanceIdentifier":{ + "shape":"String", + "documentation":"

The user-supplied instance identifier. If this parameter is specified, information from only the specific DB instance is returned. This parameter isn't case-sensitive.

Constraints:

  • If supplied, must match the identifier of an existing DBInstance.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

A filter that specifies one or more DB instances to describe.

Supported filters:

  • db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list will only include information about the DB instances associated with the DB clusters identified by these ARNs.

  • db-instance-id - Accepts DB instance identifiers and DB instance Amazon Resource Names (ARNs). The results list will only include information about the DB instances identified by these ARNs.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous DescribeDBInstances request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + } + }, + "documentation":"

" + }, + "DescribeDBParameterGroupsMessage":{ + "type":"structure", + "members":{ + "DBParameterGroupName":{ + "shape":"String", + "documentation":"

The name of a specific DB parameter group to return details for.

Constraints:

  • If supplied, must match the name of an existing DBClusterParameterGroup.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

This parameter is not currently supported.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous DescribeDBParameterGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + } + }, + "documentation":"

" + }, + "DescribeDBParametersMessage":{ + "type":"structure", + "required":["DBParameterGroupName"], + "members":{ + "DBParameterGroupName":{ + "shape":"String", + "documentation":"

The name of a specific DB parameter group to return details for.

Constraints:

  • If supplied, must match the name of an existing DBParameterGroup.

" + }, + "Source":{ + "shape":"String", + "documentation":"

The parameter types to return.

Default: All parameter types returned

Valid Values: user | system | engine-default

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

This parameter is not currently supported.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous DescribeDBParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + } + } + }, + "DescribeDBSubnetGroupsMessage":{ + "type":"structure", + "members":{ + "DBSubnetGroupName":{ + "shape":"String", + "documentation":"

The name of the DB subnet group to return details for.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

This parameter is not currently supported.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous DescribeDBSubnetGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + } + }, + "documentation":"

" + }, + "DescribeEngineDefaultClusterParametersMessage":{ + "type":"structure", + "required":["DBParameterGroupFamily"], + "members":{ + "DBParameterGroupFamily":{ + "shape":"String", + "documentation":"

The name of the DB cluster parameter group family to return engine parameter information for.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

This parameter is not currently supported.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous DescribeEngineDefaultClusterParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + } + }, + "documentation":"

" + }, + "DescribeEngineDefaultClusterParametersResult":{ + "type":"structure", + "members":{ + "EngineDefaults":{"shape":"EngineDefaults"} + } + }, + "DescribeEngineDefaultParametersMessage":{ + "type":"structure", + "required":["DBParameterGroupFamily"], + "members":{ + "DBParameterGroupFamily":{ + "shape":"String", + "documentation":"

The name of the DB parameter group family.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

Not currently supported.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous DescribeEngineDefaultParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + } + }, + "documentation":"

" + }, + "DescribeEngineDefaultParametersResult":{ + "type":"structure", + "members":{ + "EngineDefaults":{"shape":"EngineDefaults"} + } + }, + "DescribeEventCategoriesMessage":{ + "type":"structure", + "members":{ + "SourceType":{ + "shape":"String", + "documentation":"

The type of source that is generating the events.

Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

This parameter is not currently supported.

" + } + }, + "documentation":"

" + }, + "DescribeEventSubscriptionsMessage":{ + "type":"structure", + "members":{ + "SubscriptionName":{ + "shape":"String", + "documentation":"

The name of the event notification subscription you want to describe.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

This parameter is not currently supported.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

" + } + }, + "documentation":"

" + }, + "DescribeEventsMessage":{ + "type":"structure", + "members":{ + "SourceIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the event source for which events are returned. If not specified, then all sources are included in the response.

Constraints:

  • If SourceIdentifier is supplied, SourceType must also be provided.

  • If the source type is DBInstance, then a DBInstanceIdentifier must be supplied.

  • If the source type is DBSecurityGroup, a DBSecurityGroupName must be supplied.

  • If the source type is DBParameterGroup, a DBParameterGroupName must be supplied.

  • If the source type is DBSnapshot, a DBSnapshotIdentifier must be supplied.

  • Cannot end with a hyphen or contain two consecutive hyphens.

" + }, + "SourceType":{ + "shape":"SourceType", + "documentation":"

The event source to retrieve events for. If no value is specified, all events are returned.

" + }, + "StartTime":{ + "shape":"TStamp", + "documentation":"

The beginning of the time interval to retrieve events for, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

Example: 2009-07-08T18:00Z

" + }, + "EndTime":{ + "shape":"TStamp", + "documentation":"

The end of the time interval for which to retrieve events, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

Example: 2009-07-08T18:00Z

" + }, + "Duration":{ + "shape":"IntegerOptional", + "documentation":"

The number of minutes to retrieve events for.

Default: 60

" + }, + "EventCategories":{ + "shape":"EventCategoriesList", + "documentation":"

A list of event categories that trigger notifications for a event notification subscription.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

This parameter is not currently supported.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous DescribeEvents request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + } + }, + "documentation":"

" + }, + "DescribeOrderableDBInstanceOptionsMessage":{ + "type":"structure", + "required":["Engine"], + "members":{ + "Engine":{ + "shape":"String", + "documentation":"

The name of the engine to retrieve DB instance options for.

" + }, + "EngineVersion":{ + "shape":"String", + "documentation":"

The engine version filter value. Specify this parameter to show only the available offerings matching the specified engine version.

" + }, + "DBInstanceClass":{ + "shape":"String", + "documentation":"

The DB instance class filter value. Specify this parameter to show only the available offerings matching the specified DB instance class.

" + }, + "LicenseModel":{ + "shape":"String", + "documentation":"

The license model filter value. Specify this parameter to show only the available offerings matching the specified license model.

" + }, + "Vpc":{ + "shape":"BooleanOptional", + "documentation":"

The VPC filter value. Specify this parameter to show only the available VPC or non-VPC offerings.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

This parameter is not currently supported.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

" + } + }, + "documentation":"

" + }, + "DescribePendingMaintenanceActionsMessage":{ + "type":"structure", + "members":{ + "ResourceIdentifier":{ + "shape":"String", + "documentation":"

The ARN of a resource to return pending maintenance actions for.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

A filter that specifies one or more resources to return pending maintenance actions for.

Supported filters:

  • db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list will only include pending maintenance actions for the DB clusters identified by these ARNs.

  • db-instance-id - Accepts DB instance identifiers and DB instance ARNs. The results list will only include pending maintenance actions for the DB instances identified by these ARNs.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous DescribePendingMaintenanceActions request. If this parameter is specified, the response includes only records beyond the marker, up to a number of records specified by MaxRecords.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

" + } + }, + "documentation":"

" + }, + "DescribeValidDBInstanceModificationsMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{ + "shape":"String", + "documentation":"

The customer identifier or the ARN of your DB instance.

" + } + }, + "documentation":"

" + }, + "DescribeValidDBInstanceModificationsResult":{ + "type":"structure", + "members":{ + "ValidDBInstanceModificationsMessage":{"shape":"ValidDBInstanceModificationsMessage"} + } + }, + "DomainMembership":{ + "type":"structure", + "members":{ + "Domain":{ + "shape":"String", + "documentation":"

The identifier of the Active Directory Domain.

" + }, + "Status":{ + "shape":"String", + "documentation":"

The status of the DB instance's Active Directory Domain membership, such as joined, pending-join, failed etc).

" + }, + "FQDN":{ + "shape":"String", + "documentation":"

The fully qualified domain name of the Active Directory Domain.

" + }, + "IAMRoleName":{ + "shape":"String", + "documentation":"

The name of the IAM role to be used when making API calls to the Directory Service.

" + } + }, + "documentation":"

An Active Directory Domain membership record associated with the DB instance.

" + }, + "DomainMembershipList":{ + "type":"list", + "member":{ + "shape":"DomainMembership", + "locationName":"DomainMembership" + }, + "documentation":"

List of Active Directory Domain membership records associated with a DB instance.

" + }, + "DomainNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

Domain does not refer to an existing Active Directory Domain.

", + "error":{ + "code":"DomainNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "Double":{"type":"double"}, + "DoubleOptional":{"type":"double"}, + "DoubleRange":{ + "type":"structure", + "members":{ + "From":{ + "shape":"Double", + "documentation":"

The minimum value in the range.

" + }, + "To":{ + "shape":"Double", + "documentation":"

The maximum value in the range.

" + } + }, + "documentation":"

A range of double values.

" + }, + "DoubleRangeList":{ + "type":"list", + "member":{ + "shape":"DoubleRange", + "locationName":"DoubleRange" + } + }, + "Endpoint":{ + "type":"structure", + "members":{ + "Address":{ + "shape":"String", + "documentation":"

Specifies the DNS address of the DB instance.

" + }, + "Port":{ + "shape":"Integer", + "documentation":"

Specifies the port that the database engine is listening on.

" + }, + "HostedZoneId":{ + "shape":"String", + "documentation":"

Specifies the ID that Amazon Route 53 assigns when you create a hosted zone.

" + } + }, + "documentation":"

This data type is used as a response element in the following actions:

" + }, + "EngineDefaults":{ + "type":"structure", + "members":{ + "DBParameterGroupFamily":{ + "shape":"String", + "documentation":"

Specifies the name of the DB parameter group family that the engine default parameters apply to.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous EngineDefaults request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

" + }, + "Parameters":{ + "shape":"ParametersList", + "documentation":"

Contains a list of engine default parameters.

" + } + }, + "documentation":"

Contains the result of a successful invocation of the DescribeEngineDefaultParameters action.

", + "wrapper":true + }, + "Event":{ + "type":"structure", + "members":{ + "SourceIdentifier":{ + "shape":"String", + "documentation":"

Provides the identifier for the source of the event.

" + }, + "SourceType":{ + "shape":"SourceType", + "documentation":"

Specifies the source type for this event.

" + }, + "Message":{ + "shape":"String", + "documentation":"

Provides the text of this event.

" + }, + "EventCategories":{ + "shape":"EventCategoriesList", + "documentation":"

Specifies the category for the event.

" + }, + "Date":{ + "shape":"TStamp", + "documentation":"

Specifies the date and time of the event.

" + }, + "SourceArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) for the event.

" + } + }, + "documentation":"

This data type is used as a response element in the DescribeEvents action.

" + }, + "EventCategoriesList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"EventCategory" + } + }, + "EventCategoriesMap":{ + "type":"structure", + "members":{ + "SourceType":{ + "shape":"String", + "documentation":"

The source type that the returned categories belong to

" + }, + "EventCategories":{ + "shape":"EventCategoriesList", + "documentation":"

The event categories for the specified source type

" + } + }, + "documentation":"

Contains the results of a successful invocation of the DescribeEventCategories action.

", + "wrapper":true + }, + "EventCategoriesMapList":{ + "type":"list", + "member":{ + "shape":"EventCategoriesMap", + "locationName":"EventCategoriesMap" + } + }, + "EventCategoriesMessage":{ + "type":"structure", + "members":{ + "EventCategoriesMapList":{ + "shape":"EventCategoriesMapList", + "documentation":"

A list of EventCategoriesMap data types.

" + } + }, + "documentation":"

Data returned from the DescribeEventCategories action.

" + }, + "EventList":{ + "type":"list", + "member":{ + "shape":"Event", + "locationName":"Event" + } + }, + "EventSubscription":{ + "type":"structure", + "members":{ + "CustomerAwsId":{ + "shape":"String", + "documentation":"

The AWS customer account associated with the event notification subscription.

" + }, + "CustSubscriptionId":{ + "shape":"String", + "documentation":"

The event notification subscription Id.

" + }, + "SnsTopicArn":{ + "shape":"String", + "documentation":"

The topic ARN of the event notification subscription.

" + }, + "Status":{ + "shape":"String", + "documentation":"

The status of the event notification subscription.

Constraints:

Can be one of the following: creating | modifying | deleting | active | no-permission | topic-not-exist

The status \"no-permission\" indicates that Neptune no longer has permission to post to the SNS topic. The status \"topic-not-exist\" indicates that the topic was deleted after the subscription was created.

" + }, + "SubscriptionCreationTime":{ + "shape":"String", + "documentation":"

The time the event notification subscription was created.

" + }, + "SourceType":{ + "shape":"String", + "documentation":"

The source type for the event notification subscription.

" + }, + "SourceIdsList":{ + "shape":"SourceIdsList", + "documentation":"

A list of source IDs for the event notification subscription.

" + }, + "EventCategoriesList":{ + "shape":"EventCategoriesList", + "documentation":"

A list of event categories for the event notification subscription.

" + }, + "Enabled":{ + "shape":"Boolean", + "documentation":"

A Boolean value indicating if the subscription is enabled. True indicates the subscription is enabled.

" + }, + "EventSubscriptionArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) for the event subscription.

" + } + }, + "documentation":"

Contains the results of a successful invocation of the DescribeEventSubscriptions action.

", + "wrapper":true + }, + "EventSubscriptionQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"EventSubscriptionQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "EventSubscriptionsList":{ + "type":"list", + "member":{ + "shape":"EventSubscription", + "locationName":"EventSubscription" + } + }, + "EventSubscriptionsMessage":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + }, + "EventSubscriptionsList":{ + "shape":"EventSubscriptionsList", + "documentation":"

A list of EventSubscriptions data types.

" + } + }, + "documentation":"

Data returned by the DescribeEventSubscriptions action.

" + }, + "EventsMessage":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous Events request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

" + }, + "Events":{ + "shape":"EventList", + "documentation":"

A list of Event instances.

" + } + }, + "documentation":"

Contains the result of a successful invocation of the DescribeEvents action.

" + }, + "FailoverDBClusterMessage":{ + "type":"structure", + "members":{ + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

A DB cluster identifier to force a failover for. This parameter is not case-sensitive.

Constraints:

  • Must match the identifier of an existing DBCluster.

" + }, + "TargetDBInstanceIdentifier":{ + "shape":"String", + "documentation":"

The name of the instance to promote to the primary instance.

You must specify the instance identifier for an Read Replica in the DB cluster. For example, mydbcluster-replica1.

" + } + }, + "documentation":"

" + }, + "FailoverDBClusterResult":{ + "type":"structure", + "members":{ + "DBCluster":{"shape":"DBCluster"} + } + }, + "Filter":{ + "type":"structure", + "required":[ + "Name", + "Values" + ], + "members":{ + "Name":{ + "shape":"String", + "documentation":"

This parameter is not currently supported.

" + }, + "Values":{ + "shape":"FilterValueList", + "documentation":"

This parameter is not currently supported.

" + } + }, + "documentation":"

This type is not currently supported.

" + }, + "FilterList":{ + "type":"list", + "member":{ + "shape":"Filter", + "locationName":"Filter" + } + }, + "FilterValueList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"Value" + } + }, + "InstanceQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

Request would result in user exceeding the allowed number of DB instances.

", + "error":{ + "code":"InstanceQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InsufficientDBClusterCapacityFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The DB cluster does not have enough capacity for the current operation.

", + "error":{ + "code":"InsufficientDBClusterCapacityFault", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "InsufficientDBInstanceCapacityFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

Specified DB instance class is not available in the specified Availability Zone.

", + "error":{ + "code":"InsufficientDBInstanceCapacity", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InsufficientStorageClusterCapacityFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

There is insufficient storage available for the current action. You may be able to resolve this error by updating your subnet group to use different Availability Zones that have more storage available.

", + "error":{ + "code":"InsufficientStorageClusterCapacity", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Integer":{"type":"integer"}, + "IntegerOptional":{"type":"integer"}, + "InvalidDBClusterSnapshotStateFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The supplied value is not a valid DB cluster snapshot state.

", + "error":{ + "code":"InvalidDBClusterSnapshotStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBClusterStateFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The DB cluster is not in a valid state.

", + "error":{ + "code":"InvalidDBClusterStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBInstanceStateFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The specified DB instance is not in the available state.

", + "error":{ + "code":"InvalidDBInstanceState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBParameterGroupStateFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The DB parameter group is in use or is in an invalid state. If you are attempting to delete the parameter group, you cannot delete it when the parameter group is in this state.

", + "error":{ + "code":"InvalidDBParameterGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSecurityGroupStateFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The state of the DB security group does not allow deletion.

", + "error":{ + "code":"InvalidDBSecurityGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSnapshotStateFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The state of the DB snapshot does not allow deletion.

", + "error":{ + "code":"InvalidDBSnapshotState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSubnetGroupStateFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The DB subnet group cannot be deleted because it is in use.

", + "error":{ + "code":"InvalidDBSubnetGroupStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidDBSubnetStateFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The DB subnet is not in the available state.

", + "error":{ + "code":"InvalidDBSubnetStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidEventSubscriptionStateFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidEventSubscriptionState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidRestoreFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

Cannot restore from vpc backup to non-vpc DB instance.

", + "error":{ + "code":"InvalidRestoreFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidSubnet":{ + "type":"structure", + "members":{ + }, + "documentation":"

The requested subnet is invalid, or multiple subnets were requested that are not all in a common VPC.

", + "error":{ + "code":"InvalidSubnet", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidVPCNetworkStateFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

DB subnet group does not cover all Availability Zones after it is created because users' change.

", + "error":{ + "code":"InvalidVPCNetworkStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "KMSKeyNotAccessibleFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

Error accessing KMS key.

", + "error":{ + "code":"KMSKeyNotAccessibleFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "KeyList":{ + "type":"list", + "member":{"shape":"String"} + }, + "ListTagsForResourceMessage":{ + "type":"structure", + "required":["ResourceName"], + "members":{ + "ResourceName":{ + "shape":"String", + "documentation":"

The Amazon Neptune resource with tags to be listed. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an Amazon Resource Name (ARN).

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

This parameter is not currently supported.

" + } + }, + "documentation":"

" + }, + "LogTypeList":{ + "type":"list", + "member":{"shape":"String"} + }, + "ModifyDBClusterMessage":{ + "type":"structure", + "required":["DBClusterIdentifier"], + "members":{ + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

The DB cluster identifier for the cluster being modified. This parameter is not case-sensitive.

Constraints:

  • Must match the identifier of an existing DBCluster.

" + }, + "NewDBClusterIdentifier":{ + "shape":"String", + "documentation":"

The new DB cluster identifier for the DB cluster when renaming a DB cluster. This value is stored as a lowercase string.

Constraints:

  • Must contain from 1 to 63 letters, numbers, or hyphens

  • The first character must be a letter

  • Cannot end with a hyphen or contain two consecutive hyphens

Example: my-cluster2

" + }, + "ApplyImmediately":{ + "shape":"Boolean", + "documentation":"

A value that specifies whether the modifications in this request and any pending modifications are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB cluster. If this parameter is set to false, changes to the DB cluster are applied during the next maintenance window.

The ApplyImmediately parameter only affects the NewDBClusterIdentifier and MasterUserPassword values. If you set the ApplyImmediately parameter value to false, then changes to the NewDBClusterIdentifier and MasterUserPassword values are applied during the next maintenance window. All other changes are applied immediately, regardless of the value of the ApplyImmediately parameter.

Default: false

" + }, + "BackupRetentionPeriod":{ + "shape":"IntegerOptional", + "documentation":"

The number of days for which automated backups are retained. You must specify a minimum value of 1.

Default: 1

Constraints:

  • Must be a value from 1 to 35

" + }, + "DBClusterParameterGroupName":{ + "shape":"String", + "documentation":"

The name of the DB cluster parameter group to use for the DB cluster.

" + }, + "VpcSecurityGroupIds":{ + "shape":"VpcSecurityGroupIdList", + "documentation":"

A list of VPC security groups that the DB cluster will belong to.

" + }, + "Port":{ + "shape":"IntegerOptional", + "documentation":"

The port number on which the DB cluster accepts connections.

Constraints: Value must be 1150-65535

Default: The same port as the original DB cluster.

" + }, + "MasterUserPassword":{ + "shape":"String", + "documentation":"

The new password for the master database user. This password can contain any printable ASCII character except \"/\", \"\"\", or \"@\".

Constraints: Must contain from 8 to 41 characters.

" + }, + "OptionGroupName":{ + "shape":"String", + "documentation":"

A value that indicates that the DB cluster should be associated with the specified option group. Changing this parameter doesn't result in an outage except in the following case, and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If the parameter change results in an option group that enables OEM, this change can cause a brief (sub-second) period during which new connections are rejected but existing connections are not interrupted.

Permanent options can't be removed from an option group. The option group can't be removed from a DB cluster once it is associated with a DB cluster.

" + }, + "PreferredBackupWindow":{ + "shape":"String", + "documentation":"

The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter.

The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region.

Constraints:

  • Must be in the format hh24:mi-hh24:mi.

  • Must be in Universal Coordinated Time (UTC).

  • Must not conflict with the preferred maintenance window.

  • Must be at least 30 minutes.

" + }, + "PreferredMaintenanceWindow":{ + "shape":"String", + "documentation":"

The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

Format: ddd:hh24:mi-ddd:hh24:mi

The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week.

Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.

Constraints: Minimum 30-minute window.

" + }, + "EnableIAMDatabaseAuthentication":{ + "shape":"BooleanOptional", + "documentation":"

True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts, and otherwise false.

Default: false

" + }, + "EngineVersion":{ + "shape":"String", + "documentation":"

The version number of the database engine to which you want to upgrade. Changing this parameter results in an outage. The change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true.

For a list of valid engine versions, see CreateDBInstance, or call DescribeDBEngineVersions.

" + } + }, + "documentation":"

" + }, + "ModifyDBClusterParameterGroupMessage":{ + "type":"structure", + "required":[ + "DBClusterParameterGroupName", + "Parameters" + ], + "members":{ + "DBClusterParameterGroupName":{ + "shape":"String", + "documentation":"

The name of the DB cluster parameter group to modify.

" + }, + "Parameters":{ + "shape":"ParametersList", + "documentation":"

A list of parameters in the DB cluster parameter group to modify.

" + } + }, + "documentation":"

" + }, + "ModifyDBClusterResult":{ + "type":"structure", + "members":{ + "DBCluster":{"shape":"DBCluster"} + } + }, + "ModifyDBClusterSnapshotAttributeMessage":{ + "type":"structure", + "required":[ + "DBClusterSnapshotIdentifier", + "AttributeName" + ], + "members":{ + "DBClusterSnapshotIdentifier":{ + "shape":"String", + "documentation":"

The identifier for the DB cluster snapshot to modify the attributes for.

" + }, + "AttributeName":{ + "shape":"String", + "documentation":"

The name of the DB cluster snapshot attribute to modify.

To manage authorization for other AWS accounts to copy or restore a manual DB cluster snapshot, set this value to restore.

" + }, + "ValuesToAdd":{ + "shape":"AttributeValueList", + "documentation":"

A list of DB cluster snapshot attributes to add to the attribute specified by AttributeName.

To authorize other AWS accounts to copy or restore a manual DB cluster snapshot, set this list to include one or more AWS account IDs, or all to make the manual DB cluster snapshot restorable by any AWS account. Do not add the all value for any manual DB cluster snapshots that contain private information that you don't want available to all AWS accounts.

" + }, + "ValuesToRemove":{ + "shape":"AttributeValueList", + "documentation":"

A list of DB cluster snapshot attributes to remove from the attribute specified by AttributeName.

To remove authorization for other AWS accounts to copy or restore a manual DB cluster snapshot, set this list to include one or more AWS account identifiers, or all to remove authorization for any AWS account to copy or restore the DB cluster snapshot. If you specify all, an AWS account whose account ID is explicitly added to the restore attribute can still copy or restore a manual DB cluster snapshot.

" + } + }, + "documentation":"

" + }, + "ModifyDBClusterSnapshotAttributeResult":{ + "type":"structure", + "members":{ + "DBClusterSnapshotAttributesResult":{"shape":"DBClusterSnapshotAttributesResult"} + } + }, + "ModifyDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{ + "shape":"String", + "documentation":"

The DB instance identifier. This value is stored as a lowercase string.

Constraints:

  • Must match the identifier of an existing DBInstance.

" + }, + "AllocatedStorage":{ + "shape":"IntegerOptional", + "documentation":"

The new amount of storage (in gibibytes) to allocate for the DB instance.

Not applicable. Storage is managed by the DB Cluster.

" + }, + "DBInstanceClass":{ + "shape":"String", + "documentation":"

The new compute and memory capacity of the DB instance, for example, db.m4.large. Not all DB instance classes are available in all AWS Regions.

If you modify the DB instance class, an outage occurs during the change. The change is applied during the next maintenance window, unless ApplyImmediately is specified as true for this request.

Default: Uses existing setting

" + }, + "DBSubnetGroupName":{ + "shape":"String", + "documentation":"

The new DB subnet group for the DB instance. You can use this parameter to move your DB instance to a different VPC.

Changing the subnet group causes an outage during the change. The change is applied during the next maintenance window, unless you specify true for the ApplyImmediately parameter.

Constraints: If supplied, must match the name of an existing DBSubnetGroup.

Example: mySubnetGroup

" + }, + "DBSecurityGroups":{ + "shape":"DBSecurityGroupNameList", + "documentation":"

A list of DB security groups to authorize on this DB instance. Changing this setting doesn't result in an outage and the change is asynchronously applied as soon as possible.

Constraints:

  • If supplied, must match existing DBSecurityGroups.

" + }, + "VpcSecurityGroupIds":{ + "shape":"VpcSecurityGroupIdList", + "documentation":"

A list of EC2 VPC security groups to authorize on this DB instance. This change is asynchronously applied as soon as possible.

Not applicable. The associated list of EC2 VPC security groups is managed by the DB cluster. For more information, see ModifyDBCluster.

Constraints:

  • If supplied, must match existing VpcSecurityGroupIds.

" + }, + "ApplyImmediately":{ + "shape":"Boolean", + "documentation":"

Specifies whether the modifications in this request and any pending modifications are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB instance.

If this parameter is set to false, changes to the DB instance are applied during the next maintenance window. Some parameter changes can cause an outage and are applied on the next call to RebootDBInstance, or the next failure reboot.

Default: false

" + }, + "MasterUserPassword":{ + "shape":"String", + "documentation":"

The new password for the master user. The password can include any printable ASCII character except \"/\", \"\"\", or \"@\".

Not applicable.

Default: Uses existing setting

" + }, + "DBParameterGroupName":{ + "shape":"String", + "documentation":"

The name of the DB parameter group to apply to the DB instance. Changing this setting doesn't result in an outage. The parameter group name itself is changed immediately, but the actual parameter changes are not applied until you reboot the instance without failover. The db instance will NOT be rebooted automatically and the parameter changes will NOT be applied during the next maintenance window.

Default: Uses existing setting

Constraints: The DB parameter group must be in the same DB parameter group family as this DB instance.

" + }, + "BackupRetentionPeriod":{ + "shape":"IntegerOptional", + "documentation":"

The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

Not applicable. The retention period for automated backups is managed by the DB cluster. For more information, see ModifyDBCluster.

Default: Uses existing setting

" + }, + "PreferredBackupWindow":{ + "shape":"String", + "documentation":"

The daily time range during which automated backups are created if automated backups are enabled.

Not applicable. The daily time range for creating automated backups is managed by the DB cluster. For more information, see ModifyDBCluster.

Constraints:

  • Must be in the format hh24:mi-hh24:mi

  • Must be in Universal Time Coordinated (UTC)

  • Must not conflict with the preferred maintenance window

  • Must be at least 30 minutes

" + }, + "PreferredMaintenanceWindow":{ + "shape":"String", + "documentation":"

The weekly time range (in UTC) during which system maintenance can occur, which might result in an outage. Changing this parameter doesn't result in an outage, except in the following situation, and the change is asynchronously applied as soon as possible. If there are pending actions that cause a reboot, and the maintenance window is changed to include the current time, then changing this parameter will cause a reboot of the DB instance. If moving this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure pending changes are applied.

Default: Uses existing setting

Format: ddd:hh24:mi-ddd:hh24:mi

Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun

Constraints: Must be at least 30 minutes

" + }, + "MultiAZ":{ + "shape":"BooleanOptional", + "documentation":"

Specifies if the DB instance is a Multi-AZ deployment. Changing this parameter doesn't result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

" + }, + "EngineVersion":{ + "shape":"String", + "documentation":"

The version number of the database engine to upgrade to. Changing this parameter results in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

For major version upgrades, if a nondefault DB parameter group is currently in use, a new DB parameter group in the DB parameter group family for the new engine version must be specified. The new DB parameter group can be the default for that DB parameter group family.

" + }, + "AllowMajorVersionUpgrade":{ + "shape":"Boolean", + "documentation":"

Indicates that major version upgrades are allowed. Changing this parameter doesn't result in an outage and the change is asynchronously applied as soon as possible.

Constraints: This parameter must be set to true when specifying a value for the EngineVersion parameter that is a different major version than the DB instance's current version.

" + }, + "AutoMinorVersionUpgrade":{ + "shape":"BooleanOptional", + "documentation":"

Indicates that minor version upgrades are applied automatically to the DB instance during the maintenance window. Changing this parameter doesn't result in an outage except in the following case and the change is asynchronously applied as soon as possible. An outage will result if this parameter is set to true during the maintenance window, and a newer minor version is available, and Neptune has enabled auto patching for that engine version.

" + }, + "LicenseModel":{ + "shape":"String", + "documentation":"

The license model for the DB instance.

Valid values: license-included | bring-your-own-license | general-public-license

" + }, + "Iops":{ + "shape":"IntegerOptional", + "documentation":"

The new Provisioned IOPS (I/O operations per second) value for the instance.

Changing this setting doesn't result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

Default: Uses existing setting

" + }, + "OptionGroupName":{ + "shape":"String", + "documentation":"

Indicates that the DB instance should be associated with the specified option group. Changing this parameter doesn't result in an outage except in the following case and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If the parameter change results in an option group that enables OEM, this change can cause a brief (sub-second) period during which new connections are rejected but existing connections are not interrupted.

Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed from an option group, and that option group can't be removed from a DB instance once it is associated with a DB instance

" + }, + "NewDBInstanceIdentifier":{ + "shape":"String", + "documentation":"

The new DB instance identifier for the DB instance when renaming a DB instance. When you change the DB instance identifier, an instance reboot will occur immediately if you set Apply Immediately to true, or will occur during the next maintenance window if Apply Immediately to false. This value is stored as a lowercase string.

Constraints:

  • Must contain from 1 to 63 letters, numbers, or hyphens.

  • The first character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

Example: mydbinstance

" + }, + "StorageType":{ + "shape":"String", + "documentation":"

Specifies the storage type to be associated with the DB instance.

If you specify Provisioned IOPS (io1), you must also include a value for the Iops parameter.

If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance is available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance are suspended. No other Amazon Neptune operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a Read Replica for the instance, and creating a DB snapshot of the instance.

Valid values: standard | gp2 | io1

Default: io1 if the Iops parameter is specified, otherwise standard

" + }, + "TdeCredentialArn":{ + "shape":"String", + "documentation":"

The ARN from the key store with which to associate the instance for TDE encryption.

" + }, + "TdeCredentialPassword":{ + "shape":"String", + "documentation":"

The password for the given ARN from the key store in order to access the device.

" + }, + "CACertificateIdentifier":{ + "shape":"String", + "documentation":"

Indicates the certificate that needs to be associated with the instance.

" + }, + "Domain":{ + "shape":"String", + "documentation":"

Not supported.

" + }, + "CopyTagsToSnapshot":{ + "shape":"BooleanOptional", + "documentation":"

True to copy all tags from the DB instance to snapshots of the DB instance, and otherwise false. The default is false.

" + }, + "MonitoringInterval":{ + "shape":"IntegerOptional", + "documentation":"

The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0.

If MonitoringRoleArn is specified, then you must also set MonitoringInterval to a value other than 0.

Valid Values: 0, 1, 5, 10, 15, 30, 60

" + }, + "DBPortNumber":{ + "shape":"IntegerOptional", + "documentation":"

The port number on which the database accepts connections.

The value of the DBPortNumber parameter must not match any of the port values specified for options in the option group for the DB instance.

Your database will restart when you change the DBPortNumber value regardless of the value of the ApplyImmediately parameter.

Default: 8182

" + }, + "PubliclyAccessible":{ + "shape":"BooleanOptional", + "documentation":"

This parameter is not supported.

", + "deprecated":true + }, + "MonitoringRoleArn":{ + "shape":"String", + "documentation":"

The ARN for the IAM role that permits Neptune to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess.

If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value.

" + }, + "DomainIAMRoleName":{ + "shape":"String", + "documentation":"

Not supported

" + }, + "PromotionTier":{ + "shape":"IntegerOptional", + "documentation":"

A value that specifies the order in which a Read Replica is promoted to the primary instance after a failure of the existing primary instance.

Default: 1

Valid Values: 0 - 15

" + }, + "EnableIAMDatabaseAuthentication":{ + "shape":"BooleanOptional", + "documentation":"

True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts, and otherwise false.

You can enable IAM database authentication for the following database engines

Not applicable. Mapping AWS IAM accounts to database accounts is managed by the DB cluster. For more information, see ModifyDBCluster.

Default: false

" + }, + "EnablePerformanceInsights":{ + "shape":"BooleanOptional", + "documentation":"

True to enable Performance Insights for the DB instance, and otherwise false.

" + }, + "PerformanceInsightsKMSKeyId":{ + "shape":"String", + "documentation":"

The AWS KMS key identifier for encryption of Performance Insights data. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.

" + }, + "CloudwatchLogsExportConfiguration":{ + "shape":"CloudwatchLogsExportConfiguration", + "documentation":"

The configuration setting for the log types to be enabled for export to CloudWatch Logs for a specific DB instance or DB cluster.

" + } + }, + "documentation":"

" + }, + "ModifyDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "ModifyDBParameterGroupMessage":{ + "type":"structure", + "required":[ + "DBParameterGroupName", + "Parameters" + ], + "members":{ + "DBParameterGroupName":{ + "shape":"String", + "documentation":"

The name of the DB parameter group.

Constraints:

  • If supplied, must match the name of an existing DBParameterGroup.

" + }, + "Parameters":{ + "shape":"ParametersList", + "documentation":"

An array of parameter names, values, and the apply method for the parameter update. At least one parameter name, value, and apply method must be supplied; subsequent arguments are optional. A maximum of 20 parameters can be modified in a single request.

Valid Values (for the application method): immediate | pending-reboot

You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters, and changes are applied when you reboot the DB instance without failover.

" + } + }, + "documentation":"

" + }, + "ModifyDBSubnetGroupMessage":{ + "type":"structure", + "required":[ + "DBSubnetGroupName", + "SubnetIds" + ], + "members":{ + "DBSubnetGroupName":{ + "shape":"String", + "documentation":"

The name for the DB subnet group. This value is stored as a lowercase string. You can't modify the default subnet group.

Constraints: Must match the name of an existing DBSubnetGroup. Must not be default.

Example: mySubnetgroup

" + }, + "DBSubnetGroupDescription":{ + "shape":"String", + "documentation":"

The description for the DB subnet group.

" + }, + "SubnetIds":{ + "shape":"SubnetIdentifierList", + "documentation":"

The EC2 subnet IDs for the DB subnet group.

" + } + }, + "documentation":"

" + }, + "ModifyDBSubnetGroupResult":{ + "type":"structure", + "members":{ + "DBSubnetGroup":{"shape":"DBSubnetGroup"} + } + }, + "ModifyEventSubscriptionMessage":{ + "type":"structure", + "required":["SubscriptionName"], + "members":{ + "SubscriptionName":{ + "shape":"String", + "documentation":"

The name of the event notification subscription.

" + }, + "SnsTopicArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.

" + }, + "SourceType":{ + "shape":"String", + "documentation":"

The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. if this value is not specified, all events are returned.

Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot

" + }, + "EventCategories":{ + "shape":"EventCategoriesList", + "documentation":"

A list of event categories for a SourceType that you want to subscribe to. You can see a list of the categories for a given SourceType by using the DescribeEventCategories action.

" + }, + "Enabled":{ + "shape":"BooleanOptional", + "documentation":"

A Boolean value; set to true to activate the subscription.

" + } + }, + "documentation":"

" + }, + "ModifyEventSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "OptionGroupMembership":{ + "type":"structure", + "members":{ + "OptionGroupName":{ + "shape":"String", + "documentation":"

The name of the option group that the instance belongs to.

" + }, + "Status":{ + "shape":"String", + "documentation":"

The status of the DB instance's option group membership. Valid values are: in-sync, pending-apply, pending-removal, pending-maintenance-apply, pending-maintenance-removal, applying, removing, and failed.

" + } + }, + "documentation":"

Provides information on the option groups the DB instance is a member of.

" + }, + "OptionGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"OptionGroupMembership", + "locationName":"OptionGroupMembership" + } + }, + "OptionGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"OptionGroupNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "OrderableDBInstanceOption":{ + "type":"structure", + "members":{ + "Engine":{ + "shape":"String", + "documentation":"

The engine type of a DB instance.

" + }, + "EngineVersion":{ + "shape":"String", + "documentation":"

The engine version of a DB instance.

" + }, + "DBInstanceClass":{ + "shape":"String", + "documentation":"

The DB instance class for a DB instance.

" + }, + "LicenseModel":{ + "shape":"String", + "documentation":"

The license model for a DB instance.

" + }, + "AvailabilityZones":{ + "shape":"AvailabilityZoneList", + "documentation":"

A list of Availability Zones for a DB instance.

" + }, + "MultiAZCapable":{ + "shape":"Boolean", + "documentation":"

Indicates whether a DB instance is Multi-AZ capable.

" + }, + "ReadReplicaCapable":{ + "shape":"Boolean", + "documentation":"

Indicates whether a DB instance can have a Read Replica.

" + }, + "Vpc":{ + "shape":"Boolean", + "documentation":"

Indicates whether a DB instance is in a VPC.

" + }, + "SupportsStorageEncryption":{ + "shape":"Boolean", + "documentation":"

Indicates whether a DB instance supports encrypted storage.

" + }, + "StorageType":{ + "shape":"String", + "documentation":"

Indicates the storage type for a DB instance.

" + }, + "SupportsIops":{ + "shape":"Boolean", + "documentation":"

Indicates whether a DB instance supports provisioned IOPS.

" + }, + "SupportsEnhancedMonitoring":{ + "shape":"Boolean", + "documentation":"

Indicates whether a DB instance supports Enhanced Monitoring at intervals from 1 to 60 seconds.

" + }, + "SupportsIAMDatabaseAuthentication":{ + "shape":"Boolean", + "documentation":"

Indicates whether a DB instance supports IAM database authentication.

" + }, + "SupportsPerformanceInsights":{ + "shape":"Boolean", + "documentation":"

True if a DB instance supports Performance Insights, otherwise false.

" + }, + "MinStorageSize":{ + "shape":"IntegerOptional", + "documentation":"

Minimum storage size for a DB instance.

" + }, + "MaxStorageSize":{ + "shape":"IntegerOptional", + "documentation":"

Maximum storage size for a DB instance.

" + }, + "MinIopsPerDbInstance":{ + "shape":"IntegerOptional", + "documentation":"

Minimum total provisioned IOPS for a DB instance.

" + }, + "MaxIopsPerDbInstance":{ + "shape":"IntegerOptional", + "documentation":"

Maximum total provisioned IOPS for a DB instance.

" + }, + "MinIopsPerGib":{ + "shape":"DoubleOptional", + "documentation":"

Minimum provisioned IOPS per GiB for a DB instance.

" + }, + "MaxIopsPerGib":{ + "shape":"DoubleOptional", + "documentation":"

Maximum provisioned IOPS per GiB for a DB instance.

" + } + }, + "documentation":"

Contains a list of available options for a DB instance.

This data type is used as a response element in the DescribeOrderableDBInstanceOptions action.

", + "wrapper":true + }, + "OrderableDBInstanceOptionsList":{ + "type":"list", + "member":{ + "shape":"OrderableDBInstanceOption", + "locationName":"OrderableDBInstanceOption" + } + }, + "OrderableDBInstanceOptionsMessage":{ + "type":"structure", + "members":{ + "OrderableDBInstanceOptions":{ + "shape":"OrderableDBInstanceOptionsList", + "documentation":"

An OrderableDBInstanceOption structure containing information about orderable options for the DB instance.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous OrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .

" + } + }, + "documentation":"

Contains the result of a successful invocation of the DescribeOrderableDBInstanceOptions action.

" + }, + "Parameter":{ + "type":"structure", + "members":{ + "ParameterName":{ + "shape":"String", + "documentation":"

Specifies the name of the parameter.

" + }, + "ParameterValue":{ + "shape":"String", + "documentation":"

Specifies the value of the parameter.

" + }, + "Description":{ + "shape":"String", + "documentation":"

Provides a description of the parameter.

" + }, + "Source":{ + "shape":"String", + "documentation":"

Indicates the source of the parameter value.

" + }, + "ApplyType":{ + "shape":"String", + "documentation":"

Specifies the engine specific parameters type.

" + }, + "DataType":{ + "shape":"String", + "documentation":"

Specifies the valid data type for the parameter.

" + }, + "AllowedValues":{ + "shape":"String", + "documentation":"

Specifies the valid range of values for the parameter.

" + }, + "IsModifiable":{ + "shape":"Boolean", + "documentation":"

Indicates whether (true) or not (false) the parameter can be modified. Some parameters have security or operational implications that prevent them from being changed.

" + }, + "MinimumEngineVersion":{ + "shape":"String", + "documentation":"

The earliest engine version to which the parameter can apply.

" + }, + "ApplyMethod":{ + "shape":"ApplyMethod", + "documentation":"

Indicates when to apply parameter updates.

" + } + }, + "documentation":"

This data type is used as a request parameter in the ModifyDBParameterGroup and ResetDBParameterGroup actions.

This data type is used as a response element in the DescribeEngineDefaultParameters and DescribeDBParameters actions.

" + }, + "ParametersList":{ + "type":"list", + "member":{ + "shape":"Parameter", + "locationName":"Parameter" + } + }, + "PendingCloudwatchLogsExports":{ + "type":"structure", + "members":{ + "LogTypesToEnable":{ + "shape":"LogTypeList", + "documentation":"

Log types that are in the process of being deactivated. After they are deactivated, these log types aren't exported to CloudWatch Logs.

" + }, + "LogTypesToDisable":{ + "shape":"LogTypeList", + "documentation":"

Log types that are in the process of being enabled. After they are enabled, these log types are exported to CloudWatch Logs.

" + } + }, + "documentation":"

A list of the log types whose configuration is still pending. In other words, these log types are in the process of being activated or deactivated.

" + }, + "PendingMaintenanceAction":{ + "type":"structure", + "members":{ + "Action":{ + "shape":"String", + "documentation":"

The type of pending maintenance action that is available for the resource.

" + }, + "AutoAppliedAfterDate":{ + "shape":"TStamp", + "documentation":"

The date of the maintenance window when the action is applied. The maintenance action is applied to the resource during its first maintenance window after this date. If this date is specified, any next-maintenance opt-in requests are ignored.

" + }, + "ForcedApplyDate":{ + "shape":"TStamp", + "documentation":"

The date when the maintenance action is automatically applied. The maintenance action is applied to the resource on this date regardless of the maintenance window for the resource. If this date is specified, any immediate opt-in requests are ignored.

" + }, + "OptInStatus":{ + "shape":"String", + "documentation":"

Indicates the type of opt-in request that has been received for the resource.

" + }, + "CurrentApplyDate":{ + "shape":"TStamp", + "documentation":"

The effective date when the pending maintenance action is applied to the resource. This date takes into account opt-in requests received from the ApplyPendingMaintenanceAction API, the AutoAppliedAfterDate, and the ForcedApplyDate. This value is blank if an opt-in request has not been received and nothing has been specified as AutoAppliedAfterDate or ForcedApplyDate.

" + }, + "Description":{ + "shape":"String", + "documentation":"

A description providing more detail about the maintenance action.

" + } + }, + "documentation":"

Provides information about a pending maintenance action for a resource.

" + }, + "PendingMaintenanceActionDetails":{ + "type":"list", + "member":{ + "shape":"PendingMaintenanceAction", + "locationName":"PendingMaintenanceAction" + } + }, + "PendingMaintenanceActions":{ + "type":"list", + "member":{ + "shape":"ResourcePendingMaintenanceActions", + "locationName":"ResourcePendingMaintenanceActions" + } + }, + "PendingMaintenanceActionsMessage":{ + "type":"structure", + "members":{ + "PendingMaintenanceActions":{ + "shape":"PendingMaintenanceActions", + "documentation":"

A list of the pending maintenance actions for the resource.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous DescribePendingMaintenanceActions request. If this parameter is specified, the response includes only records beyond the marker, up to a number of records specified by MaxRecords.

" + } + }, + "documentation":"

Data returned from the DescribePendingMaintenanceActions action.

" + }, + "PendingModifiedValues":{ + "type":"structure", + "members":{ + "DBInstanceClass":{ + "shape":"String", + "documentation":"

Contains the new DBInstanceClass for the DB instance that will be applied or is currently being applied.

" + }, + "AllocatedStorage":{ + "shape":"IntegerOptional", + "documentation":"

Contains the new AllocatedStorage size for the DB instance that will be applied or is currently being applied.

" + }, + "MasterUserPassword":{ + "shape":"String", + "documentation":"

Contains the pending or currently-in-progress change of the master credentials for the DB instance.

" + }, + "Port":{ + "shape":"IntegerOptional", + "documentation":"

Specifies the pending port for the DB instance.

" + }, + "BackupRetentionPeriod":{ + "shape":"IntegerOptional", + "documentation":"

Specifies the pending number of days for which automated backups are retained.

" + }, + "MultiAZ":{ + "shape":"BooleanOptional", + "documentation":"

Indicates that the Single-AZ DB instance is to change to a Multi-AZ deployment.

" + }, + "EngineVersion":{ + "shape":"String", + "documentation":"

Indicates the database engine version.

" + }, + "LicenseModel":{ + "shape":"String", + "documentation":"

The license model for the DB instance.

Valid values: license-included | bring-your-own-license | general-public-license

" + }, + "Iops":{ + "shape":"IntegerOptional", + "documentation":"

Specifies the new Provisioned IOPS value for the DB instance that will be applied or is currently being applied.

" + }, + "DBInstanceIdentifier":{ + "shape":"String", + "documentation":"

Contains the new DBInstanceIdentifier for the DB instance that will be applied or is currently being applied.

" + }, + "StorageType":{ + "shape":"String", + "documentation":"

Specifies the storage type to be associated with the DB instance.

" + }, + "CACertificateIdentifier":{ + "shape":"String", + "documentation":"

Specifies the identifier of the CA certificate for the DB instance.

" + }, + "DBSubnetGroupName":{ + "shape":"String", + "documentation":"

The new DB subnet group for the DB instance.

" + }, + "PendingCloudwatchLogsExports":{"shape":"PendingCloudwatchLogsExports"} + }, + "documentation":"

This data type is used as a response element in the ModifyDBInstance action.

" + }, + "PromoteReadReplicaDBClusterMessage":{ + "type":"structure", + "required":["DBClusterIdentifier"], + "members":{ + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the DB cluster Read Replica to promote. This parameter is not case-sensitive.

Constraints:

  • Must match the identifier of an existing DBCluster Read Replica.

Example: my-cluster-replica1

" + } + }, + "documentation":"

" + }, + "PromoteReadReplicaDBClusterResult":{ + "type":"structure", + "members":{ + "DBCluster":{"shape":"DBCluster"} + } + }, + "ProvisionedIopsNotAvailableInAZFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

Provisioned IOPS not available in the specified Availability Zone.

", + "error":{ + "code":"ProvisionedIopsNotAvailableInAZFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Range":{ + "type":"structure", + "members":{ + "From":{ + "shape":"Integer", + "documentation":"

The minimum value in the range.

" + }, + "To":{ + "shape":"Integer", + "documentation":"

The maximum value in the range.

" + }, + "Step":{ + "shape":"IntegerOptional", + "documentation":"

The step value for the range. For example, if you have a range of 5,000 to 10,000, with a step value of 1,000, the valid values start at 5,000 and step up by 1,000. Even though 7,500 is within the range, it isn't a valid value for the range. The valid values are 5,000, 6,000, 7,000, 8,000...

" + } + }, + "documentation":"

A range of integer values.

" + }, + "RangeList":{ + "type":"list", + "member":{ + "shape":"Range", + "locationName":"Range" + } + }, + "ReadReplicaDBClusterIdentifierList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ReadReplicaDBClusterIdentifier" + } + }, + "ReadReplicaDBInstanceIdentifierList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ReadReplicaDBInstanceIdentifier" + } + }, + "ReadReplicaIdentifierList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"ReadReplicaIdentifier" + } + }, + "RebootDBInstanceMessage":{ + "type":"structure", + "required":["DBInstanceIdentifier"], + "members":{ + "DBInstanceIdentifier":{ + "shape":"String", + "documentation":"

The DB instance identifier. This parameter is stored as a lowercase string.

Constraints:

  • Must match the identifier of an existing DBInstance.

" + }, + "ForceFailover":{ + "shape":"BooleanOptional", + "documentation":"

When true, the reboot is conducted through a MultiAZ failover.

Constraint: You can't specify true if the instance is not configured for MultiAZ.

" + } + }, + "documentation":"

" + }, + "RebootDBInstanceResult":{ + "type":"structure", + "members":{ + "DBInstance":{"shape":"DBInstance"} + } + }, + "RemoveRoleFromDBClusterMessage":{ + "type":"structure", + "required":[ + "DBClusterIdentifier", + "RoleArn" + ], + "members":{ + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

The name of the DB cluster to disassociate the IAM role from.

" + }, + "RoleArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role to disassociate from the DB cluster, for example arn:aws:iam::123456789012:role/NeptuneAccessRole.

" + } + } + }, + "RemoveSourceIdentifierFromSubscriptionMessage":{ + "type":"structure", + "required":[ + "SubscriptionName", + "SourceIdentifier" + ], + "members":{ + "SubscriptionName":{ + "shape":"String", + "documentation":"

The name of the event notification subscription you want to remove a source identifier from.

" + }, + "SourceIdentifier":{ + "shape":"String", + "documentation":"

The source identifier to be removed from the subscription, such as the DB instance identifier for a DB instance or the name of a security group.

" + } + }, + "documentation":"

" + }, + "RemoveSourceIdentifierFromSubscriptionResult":{ + "type":"structure", + "members":{ + "EventSubscription":{"shape":"EventSubscription"} + } + }, + "RemoveTagsFromResourceMessage":{ + "type":"structure", + "required":[ + "ResourceName", + "TagKeys" + ], + "members":{ + "ResourceName":{ + "shape":"String", + "documentation":"

The Amazon Neptune resource that the tags are removed from. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an Amazon Resource Name (ARN).

" + }, + "TagKeys":{ + "shape":"KeyList", + "documentation":"

The tag key (name) of the tag to be removed.

" + } + }, + "documentation":"

" + }, + "ResetDBClusterParameterGroupMessage":{ + "type":"structure", + "required":["DBClusterParameterGroupName"], + "members":{ + "DBClusterParameterGroupName":{ + "shape":"String", + "documentation":"

The name of the DB cluster parameter group to reset.

" + }, + "ResetAllParameters":{ + "shape":"Boolean", + "documentation":"

A value that is set to true to reset all parameters in the DB cluster parameter group to their default values, and false otherwise. You can't use this parameter if there is a list of parameter names specified for the Parameters parameter.

" + }, + "Parameters":{ + "shape":"ParametersList", + "documentation":"

A list of parameter names in the DB cluster parameter group to reset to the default values. You can't use this parameter if the ResetAllParameters parameter is set to true.

" + } + }, + "documentation":"

" + }, + "ResetDBParameterGroupMessage":{ + "type":"structure", + "required":["DBParameterGroupName"], + "members":{ + "DBParameterGroupName":{ + "shape":"String", + "documentation":"

The name of the DB parameter group.

Constraints:

  • Must match the name of an existing DBParameterGroup.

" + }, + "ResetAllParameters":{ + "shape":"Boolean", + "documentation":"

Specifies whether (true) or not (false) to reset all parameters in the DB parameter group to default values.

Default: true

" + }, + "Parameters":{ + "shape":"ParametersList", + "documentation":"

To reset the entire DB parameter group, specify the DBParameterGroup name and ResetAllParameters parameters. To reset specific parameters, provide a list of the following: ParameterName and ApplyMethod. A maximum of 20 parameters can be modified in a single request.

Valid Values (for Apply method): pending-reboot

" + } + }, + "documentation":"

" + }, + "ResourceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The specified resource ID was not found.

", + "error":{ + "code":"ResourceNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ResourcePendingMaintenanceActions":{ + "type":"structure", + "members":{ + "ResourceIdentifier":{ + "shape":"String", + "documentation":"

The ARN of the resource that has pending maintenance actions.

" + }, + "PendingMaintenanceActionDetails":{ + "shape":"PendingMaintenanceActionDetails", + "documentation":"

A list that provides details about the pending maintenance actions for the resource.

" + } + }, + "documentation":"

Describes the pending maintenance actions for a resource.

", + "wrapper":true + }, + "RestoreDBClusterFromSnapshotMessage":{ + "type":"structure", + "required":[ + "DBClusterIdentifier", + "SnapshotIdentifier", + "Engine" + ], + "members":{ + "AvailabilityZones":{ + "shape":"AvailabilityZones", + "documentation":"

Provides the list of EC2 Availability Zones that instances in the restored DB cluster can be created in.

" + }, + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

The name of the DB cluster to create from the DB snapshot or DB cluster snapshot. This parameter isn't case-sensitive.

Constraints:

  • Must contain from 1 to 63 letters, numbers, or hyphens

  • First character must be a letter

  • Cannot end with a hyphen or contain two consecutive hyphens

Example: my-snapshot-id

" + }, + "SnapshotIdentifier":{ + "shape":"String", + "documentation":"

The identifier for the DB snapshot or DB cluster snapshot to restore from.

You can use either the name or the Amazon Resource Name (ARN) to specify a DB cluster snapshot. However, you can use only the ARN to specify a DB snapshot.

Constraints:

  • Must match the identifier of an existing Snapshot.

" + }, + "Engine":{ + "shape":"String", + "documentation":"

The database engine to use for the new DB cluster.

Default: The same as source

Constraint: Must be compatible with the engine of the source

" + }, + "EngineVersion":{ + "shape":"String", + "documentation":"

The version of the database engine to use for the new DB cluster.

" + }, + "Port":{ + "shape":"IntegerOptional", + "documentation":"

The port number on which the new DB cluster accepts connections.

Constraints: Value must be 1150-65535

Default: The same port as the original DB cluster.

" + }, + "DBSubnetGroupName":{ + "shape":"String", + "documentation":"

The name of the DB subnet group to use for the new DB cluster.

Constraints: If supplied, must match the name of an existing DBSubnetGroup.

Example: mySubnetgroup

" + }, + "DatabaseName":{ + "shape":"String", + "documentation":"

The database name for the restored DB cluster.

" + }, + "OptionGroupName":{ + "shape":"String", + "documentation":"

The name of the option group to use for the restored DB cluster.

" + }, + "VpcSecurityGroupIds":{ + "shape":"VpcSecurityGroupIdList", + "documentation":"

A list of VPC security groups that the new DB cluster will belong to.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags to be assigned to the restored DB cluster.

" + }, + "KmsKeyId":{ + "shape":"String", + "documentation":"

The AWS KMS key identifier to use when restoring an encrypted DB cluster from a DB snapshot or DB cluster snapshot.

The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are restoring a DB cluster with the same AWS account that owns the KMS encryption key used to encrypt the new DB cluster, then you can use the KMS key alias instead of the ARN for the KMS encryption key.

If you do not specify a value for the KmsKeyId parameter, then the following will occur:

  • If the DB snapshot or DB cluster snapshot in SnapshotIdentifier is encrypted, then the restored DB cluster is encrypted using the KMS key that was used to encrypt the DB snapshot or DB cluster snapshot.

  • If the DB snapshot or DB cluster snapshot in SnapshotIdentifier is not encrypted, then the restored DB cluster is not encrypted.

" + }, + "EnableIAMDatabaseAuthentication":{ + "shape":"BooleanOptional", + "documentation":"

True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts, and otherwise false.

Default: false

" + } + }, + "documentation":"

" + }, + "RestoreDBClusterFromSnapshotResult":{ + "type":"structure", + "members":{ + "DBCluster":{"shape":"DBCluster"} + } + }, + "RestoreDBClusterToPointInTimeMessage":{ + "type":"structure", + "required":[ + "DBClusterIdentifier", + "SourceDBClusterIdentifier" + ], + "members":{ + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

The name of the new DB cluster to be created.

Constraints:

  • Must contain from 1 to 63 letters, numbers, or hyphens

  • First character must be a letter

  • Cannot end with a hyphen or contain two consecutive hyphens

" + }, + "RestoreType":{ + "shape":"String", + "documentation":"

The type of restore to be performed. You can specify one of the following values:

  • full-copy - The new DB cluster is restored as a full copy of the source DB cluster.

  • copy-on-write - The new DB cluster is restored as a clone of the source DB cluster.

Constraints: You can't specify copy-on-write if the engine version of the source DB cluster is earlier than 1.11.

If you don't specify a RestoreType value, then the new DB cluster is restored as a full copy of the source DB cluster.

" + }, + "SourceDBClusterIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the source DB cluster from which to restore.

Constraints:

  • Must match the identifier of an existing DBCluster.

" + }, + "RestoreToTime":{ + "shape":"TStamp", + "documentation":"

The date and time to restore the DB cluster to.

Valid Values: Value must be a time in Universal Coordinated Time (UTC) format

Constraints:

  • Must be before the latest restorable time for the DB instance

  • Must be specified if UseLatestRestorableTime parameter is not provided

  • Cannot be specified if UseLatestRestorableTime parameter is true

  • Cannot be specified if RestoreType parameter is copy-on-write

Example: 2015-03-07T23:45:00Z

" + }, + "UseLatestRestorableTime":{ + "shape":"Boolean", + "documentation":"

A value that is set to true to restore the DB cluster to the latest restorable backup time, and false otherwise.

Default: false

Constraints: Cannot be specified if RestoreToTime parameter is provided.

" + }, + "Port":{ + "shape":"IntegerOptional", + "documentation":"

The port number on which the new DB cluster accepts connections.

Constraints: Value must be 1150-65535

Default: The same port as the original DB cluster.

" + }, + "DBSubnetGroupName":{ + "shape":"String", + "documentation":"

The DB subnet group name to use for the new DB cluster.

Constraints: If supplied, must match the name of an existing DBSubnetGroup.

Example: mySubnetgroup

" + }, + "OptionGroupName":{ + "shape":"String", + "documentation":"

The name of the option group for the new DB cluster.

" + }, + "VpcSecurityGroupIds":{ + "shape":"VpcSecurityGroupIdList", + "documentation":"

A list of VPC security groups that the new DB cluster belongs to.

" + }, + "Tags":{"shape":"TagList"}, + "KmsKeyId":{ + "shape":"String", + "documentation":"

The AWS KMS key identifier to use when restoring an encrypted DB cluster from an encrypted DB cluster.

The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are restoring a DB cluster with the same AWS account that owns the KMS encryption key used to encrypt the new DB cluster, then you can use the KMS key alias instead of the ARN for the KMS encryption key.

You can restore to a new DB cluster and encrypt the new DB cluster with a KMS key that is different than the KMS key used to encrypt the source DB cluster. The new DB cluster is encrypted with the KMS key identified by the KmsKeyId parameter.

If you do not specify a value for the KmsKeyId parameter, then the following will occur:

  • If the DB cluster is encrypted, then the restored DB cluster is encrypted using the KMS key that was used to encrypt the source DB cluster.

  • If the DB cluster is not encrypted, then the restored DB cluster is not encrypted.

If DBClusterIdentifier refers to a DB cluster that is not encrypted, then the restore request is rejected.

" + }, + "EnableIAMDatabaseAuthentication":{ + "shape":"BooleanOptional", + "documentation":"

True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts, and otherwise false.

Default: false

" + } + }, + "documentation":"

" + }, + "RestoreDBClusterToPointInTimeResult":{ + "type":"structure", + "members":{ + "DBCluster":{"shape":"DBCluster"} + } + }, + "SNSInvalidTopicFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSInvalidTopic", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SNSNoAuthorizationFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSNoAuthorization", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SNSTopicArnNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SNSTopicArnNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SharedSnapshotQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

You have exceeded the maximum number of accounts that you can share a manual DB snapshot with.

", + "error":{ + "code":"SharedSnapshotQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SnapshotQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

Request would result in user exceeding the allowed number of DB snapshots.

", + "error":{ + "code":"SnapshotQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SourceIdsList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SourceId" + } + }, + "SourceNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SourceType":{ + "type":"string", + "enum":[ + "db-instance", + "db-parameter-group", + "db-security-group", + "db-snapshot", + "db-cluster", + "db-cluster-snapshot" + ] + }, + "StorageQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

Request would result in user exceeding the allowed amount of storage available across all DB instances.

", + "error":{ + "code":"StorageQuotaExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "StorageTypeNotSupportedFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

StorageType specified cannot be associated with the DB Instance.

", + "error":{ + "code":"StorageTypeNotSupported", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "String":{"type":"string"}, + "Subnet":{ + "type":"structure", + "members":{ + "SubnetIdentifier":{ + "shape":"String", + "documentation":"

Specifies the identifier of the subnet.

" + }, + "SubnetAvailabilityZone":{"shape":"AvailabilityZone"}, + "SubnetStatus":{ + "shape":"String", + "documentation":"

Specifies the status of the subnet.

" + } + }, + "documentation":"

This data type is used as a response element in the DescribeDBSubnetGroups action.

" + }, + "SubnetAlreadyInUse":{ + "type":"structure", + "members":{ + }, + "documentation":"

The DB subnet is already in use in the Availability Zone.

", + "error":{ + "code":"SubnetAlreadyInUse", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SubnetIdentifierList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"SubnetIdentifier" + } + }, + "SubnetList":{ + "type":"list", + "member":{ + "shape":"Subnet", + "locationName":"Subnet" + } + }, + "SubscriptionAlreadyExistFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionAlreadyExist", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "SubscriptionCategoryNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionCategoryNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SubscriptionNotFoundFault":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"SubscriptionNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SupportedCharacterSetsList":{ + "type":"list", + "member":{ + "shape":"CharacterSet", + "locationName":"CharacterSet" + } + }, + "SupportedTimezonesList":{ + "type":"list", + "member":{ + "shape":"Timezone", + "locationName":"Timezone" + } + }, + "TStamp":{"type":"timestamp"}, + "Tag":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"String", + "documentation":"

A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with \"aws:\" or \"rds:\". The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

" + }, + "Value":{ + "shape":"String", + "documentation":"

A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with \"aws:\" or \"rds:\". The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

" + } + }, + "documentation":"

Metadata assigned to an Amazon Neptune resource consisting of a key-value pair.

" + }, + "TagList":{ + "type":"list", + "member":{ + "shape":"Tag", + "locationName":"Tag" + }, + "documentation":"

A list of tags. For more information, see Tagging Amazon Neptune Resources.

" + }, + "TagListMessage":{ + "type":"structure", + "members":{ + "TagList":{ + "shape":"TagList", + "documentation":"

List of tags returned by the ListTagsForResource operation.

" + } + }, + "documentation":"

" + }, + "Timezone":{ + "type":"structure", + "members":{ + "TimezoneName":{ + "shape":"String", + "documentation":"

The name of the time zone.

" + } + }, + "documentation":"

A time zone associated with a DBInstance. This data type is an element in the response to the DescribeDBInstances, and the DescribeDBEngineVersions actions.

" + }, + "UpgradeTarget":{ + "type":"structure", + "members":{ + "Engine":{ + "shape":"String", + "documentation":"

The name of the upgrade target database engine.

" + }, + "EngineVersion":{ + "shape":"String", + "documentation":"

The version number of the upgrade target database engine.

" + }, + "Description":{ + "shape":"String", + "documentation":"

The version of the database engine that a DB instance can be upgraded to.

" + }, + "AutoUpgrade":{ + "shape":"Boolean", + "documentation":"

A value that indicates whether the target version is applied to any source DB instances that have AutoMinorVersionUpgrade set to true.

" + }, + "IsMajorVersionUpgrade":{ + "shape":"Boolean", + "documentation":"

A value that indicates whether a database engine is upgraded to a major version.

" + } + }, + "documentation":"

The version of the database engine that a DB instance can be upgraded to.

" + }, + "ValidDBInstanceModificationsMessage":{ + "type":"structure", + "members":{ + "Storage":{ + "shape":"ValidStorageOptionsList", + "documentation":"

Valid storage options for your DB instance.

" + } + }, + "documentation":"

Information about valid modifications that you can make to your DB instance. Contains the result of a successful call to the DescribeValidDBInstanceModifications action. You can use this information when you call ModifyDBInstance.

", + "wrapper":true + }, + "ValidStorageOptions":{ + "type":"structure", + "members":{ + "StorageType":{ + "shape":"String", + "documentation":"

The valid storage types for your DB instance. For example, gp2, io1.

" + }, + "StorageSize":{ + "shape":"RangeList", + "documentation":"

The valid range of storage in gibibytes. For example, 100 to 16384.

" + }, + "ProvisionedIops":{ + "shape":"RangeList", + "documentation":"

The valid range of provisioned IOPS. For example, 1000-20000.

" + }, + "IopsToStorageRatio":{ + "shape":"DoubleRangeList", + "documentation":"

The valid range of Provisioned IOPS to gibibytes of storage multiplier. For example, 3-10, which means that provisioned IOPS can be between 3 and 10 times storage.

" + } + }, + "documentation":"

Information about valid modifications that you can make to your DB instance. Contains the result of a successful call to the DescribeValidDBInstanceModifications action.

" + }, + "ValidStorageOptionsList":{ + "type":"list", + "member":{ + "shape":"ValidStorageOptions", + "locationName":"ValidStorageOptions" + } + }, + "ValidUpgradeTargetList":{ + "type":"list", + "member":{ + "shape":"UpgradeTarget", + "locationName":"UpgradeTarget" + } + }, + "VpcSecurityGroupIdList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"VpcSecurityGroupId" + } + }, + "VpcSecurityGroupMembership":{ + "type":"structure", + "members":{ + "VpcSecurityGroupId":{ + "shape":"String", + "documentation":"

The name of the VPC security group.

" + }, + "Status":{ + "shape":"String", + "documentation":"

The status of the VPC security group.

" + } + }, + "documentation":"

This data type is used as a response element for queries on VPC security group membership.

" + }, + "VpcSecurityGroupMembershipList":{ + "type":"list", + "member":{ + "shape":"VpcSecurityGroupMembership", + "locationName":"VpcSecurityGroupMembership" + } + } + }, + "documentation":"Amazon Neptune

Amazon Neptune is a fast, reliable, fully-managed graph database service that makes it easy to build and run applications that work with highly connected datasets. The core of Amazon Neptune is a purpose-built, high-performance graph database engine optimized for storing billions of relationships and querying the graph with milliseconds latency. Amazon Neptune supports popular graph models Property Graph and W3C's RDF, and their respective query languages Apache TinkerPop Gremlin and SPARQL, allowing you to easily build queries that efficiently navigate highly connected datasets. Neptune powers graph use cases such as recommendation engines, fraud detection, knowledge graphs, drug discovery, and network security.

This interface reference for Amazon Neptune contains documentation for a programming or command line interface you can use to manage Amazon Neptune. Note that Amazon Neptune is asynchronous, which means that some interfaces might require techniques such as polling or callback functions to determine when a command has been applied. In this reference, the parameter descriptions indicate whether a command is applied immediately, on the next instance reboot, or during the maintenance window. The reference structure is as follows, and we list following some related topics from the user guide.

Amazon Neptune API Reference

" +} diff --git a/botocore/data/neptune/2014-10-31/waiters-2.json b/botocore/data/neptune/2014-10-31/waiters-2.json new file mode 100644 index 00000000..e75f03b2 --- /dev/null +++ b/botocore/data/neptune/2014-10-31/waiters-2.json @@ -0,0 +1,90 @@ +{ + "version": 2, + "waiters": { + "DBInstanceAvailable": { + "delay": 30, + "operation": "DescribeDBInstances", + "maxAttempts": 60, + "acceptors": [ + { + "expected": "available", + "matcher": "pathAll", + "state": "success", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "deleted", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "deleting", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "failed", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "incompatible-restore", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "incompatible-parameters", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + } + ] + }, + "DBInstanceDeleted": { + "delay": 30, + "operation": "DescribeDBInstances", + "maxAttempts": 60, + "acceptors": [ + { + "expected": "deleted", + "matcher": "pathAll", + "state": "success", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "DBInstanceNotFound", + "matcher": "error", + "state": "success" + }, + { + "expected": "creating", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "modifying", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "rebooting", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + }, + { + "expected": "resetting-master-credentials", + "matcher": "pathAny", + "state": "failure", + "argument": "DBInstances[].DBInstanceStatus" + } + ] + } + } +} diff --git a/botocore/data/opsworks/2013-02-18/service-2.json b/botocore/data/opsworks/2013-02-18/service-2.json index 92e2fc80..a6f2ba97 100644 --- a/botocore/data/opsworks/2013-02-18/service-2.json +++ b/botocore/data/opsworks/2013-02-18/service-2.json @@ -76,7 +76,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Creates a clone of a specified stack. For more information, see Clone a Stack. By default, all parameters are set to the values used by the parent stack.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" + "documentation":"

Creates a clone of a specified stack. For more information, see Clone a Stack. By default, all parameters are set to the values used by the parent stack.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

" }, "CreateApp":{ "name":"CreateApp", @@ -145,7 +145,7 @@ "errors":[ {"shape":"ValidationException"} ], - "documentation":"

Creates a new stack. For more information, see Create a New Stack.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" + "documentation":"

Creates a new stack. For more information, see Create a New Stack.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

" }, "CreateUserProfile":{ "name":"CreateUserProfile", @@ -158,7 +158,7 @@ "errors":[ {"shape":"ValidationException"} ], - "documentation":"

Creates a new user profile.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" + "documentation":"

Creates a new user profile.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

" }, "DeleteApp":{ "name":"DeleteApp", @@ -223,7 +223,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Deletes a user profile.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" + "documentation":"

Deletes a user profile.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

" }, "DeregisterEcsCluster":{ "name":"DeregisterEcsCluster", @@ -262,7 +262,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Deregister a registered Amazon EC2 or on-premises instance. This action removes the instance from the stack and returns it to your control. This action can not be used with instances that were created with AWS OpsWorks Stacks.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" + "documentation":"

Deregister a registered Amazon EC2 or on-premises instance. This action removes the instance from the stack and returns it to your control. This action cannot be used with instances that were created with AWS OpsWorks Stacks.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" }, "DeregisterRdsDbInstance":{ "name":"DeregisterRdsDbInstance", @@ -316,7 +316,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Requests a description of a specified set of apps.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" + "documentation":"

Requests a description of a specified set of apps.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

" }, "DescribeCommands":{ "name":"DescribeCommands", @@ -330,7 +330,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Describes the results of specified commands.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" + "documentation":"

Describes the results of specified commands.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

" }, "DescribeDeployments":{ "name":"DescribeDeployments", @@ -344,7 +344,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Requests a description of a specified set of deployments.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" + "documentation":"

Requests a description of a specified set of deployments.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

" }, "DescribeEcsClusters":{ "name":"DescribeEcsClusters", @@ -358,7 +358,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Describes Amazon ECS clusters that are registered with a stack. If you specify only a stack ID, you can use the MaxResults and NextToken parameters to paginate the response. However, AWS OpsWorks Stacks currently supports only one cluster per layer, so the result set has a maximum of one element.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack or an attached policy that explicitly grants permission. For more information on user permissions, see Managing User Permissions.

This call accepts only one resource-identifying parameter.

" + "documentation":"

Describes Amazon ECS clusters that are registered with a stack. If you specify only a stack ID, you can use the MaxResults and NextToken parameters to paginate the response. However, AWS OpsWorks Stacks currently supports only one cluster per layer, so the result set has a maximum of one element.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack or an attached policy that explicitly grants permission. For more information about user permissions, see Managing User Permissions.

This call accepts only one resource-identifying parameter.

" }, "DescribeElasticIps":{ "name":"DescribeElasticIps", @@ -372,7 +372,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Describes Elastic IP addresses.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" + "documentation":"

Describes Elastic IP addresses.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

" }, "DescribeElasticLoadBalancers":{ "name":"DescribeElasticLoadBalancers", @@ -386,7 +386,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Describes a stack's Elastic Load Balancing instances.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" + "documentation":"

Describes a stack's Elastic Load Balancing instances.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

" }, "DescribeInstances":{ "name":"DescribeInstances", @@ -400,7 +400,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Requests a description of a set of instances.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" + "documentation":"

Requests a description of a set of instances.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

" }, "DescribeLayers":{ "name":"DescribeLayers", @@ -414,7 +414,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Requests a description of one or more layers in a specified stack.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" + "documentation":"

Requests a description of one or more layers in a specified stack.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

" }, "DescribeLoadBasedAutoScaling":{ "name":"DescribeLoadBasedAutoScaling", @@ -428,7 +428,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Describes load-based auto scaling configurations for specified layers.

You must specify at least one of the parameters.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" + "documentation":"

Describes load-based auto scaling configurations for specified layers.

You must specify at least one of the parameters.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

" }, "DescribeMyUserProfile":{ "name":"DescribeMyUserProfile", @@ -437,7 +437,7 @@ "requestUri":"/" }, "output":{"shape":"DescribeMyUserProfileResult"}, - "documentation":"

Describes a user's SSH information.

Required Permissions: To use this action, an IAM user must have self-management enabled or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" + "documentation":"

Describes a user's SSH information.

Required Permissions: To use this action, an IAM user must have self-management enabled or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

" }, "DescribeOperatingSystems":{ "name":"DescribeOperatingSystems", @@ -474,7 +474,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Describe an instance's RAID arrays.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" + "documentation":"

Describe an instance's RAID arrays.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

" }, "DescribeRdsDbInstances":{ "name":"DescribeRdsDbInstances", @@ -488,7 +488,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Describes Amazon RDS instances.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

This call accepts only one resource-identifying parameter.

" + "documentation":"

Describes Amazon RDS instances.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

This call accepts only one resource-identifying parameter.

" }, "DescribeServiceErrors":{ "name":"DescribeServiceErrors", @@ -502,7 +502,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Describes AWS OpsWorks Stacks service errors.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

This call accepts only one resource-identifying parameter.

" + "documentation":"

Describes AWS OpsWorks Stacks service errors.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

This call accepts only one resource-identifying parameter.

" }, "DescribeStackProvisioningParameters":{ "name":"DescribeStackProvisioningParameters", @@ -516,7 +516,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Requests a description of a stack's provisioning parameters.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" + "documentation":"

Requests a description of a stack's provisioning parameters.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

" }, "DescribeStackSummary":{ "name":"DescribeStackSummary", @@ -530,7 +530,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Describes the number of layers and apps in a specified stack, and the number of instances in each state, such as running_setup or online.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" + "documentation":"

Describes the number of layers and apps in a specified stack, and the number of instances in each state, such as running_setup or online.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

" }, "DescribeStacks":{ "name":"DescribeStacks", @@ -544,7 +544,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Requests a description of one or more stacks.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" + "documentation":"

Requests a description of one or more stacks.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

" }, "DescribeTimeBasedAutoScaling":{ "name":"DescribeTimeBasedAutoScaling", @@ -558,7 +558,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Describes time-based auto scaling configurations for specified instances.

You must specify at least one of the parameters.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" + "documentation":"

Describes time-based auto scaling configurations for specified instances.

You must specify at least one of the parameters.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

" }, "DescribeUserProfiles":{ "name":"DescribeUserProfiles", @@ -572,7 +572,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Describe specified users.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" + "documentation":"

Describe specified users.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

" }, "DescribeVolumes":{ "name":"DescribeVolumes", @@ -586,7 +586,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Describes an instance's Amazon EBS volumes.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" + "documentation":"

Describes an instance's Amazon EBS volumes.

This call accepts only one resource-identifying parameter.

Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

" }, "DetachElasticLoadBalancer":{ "name":"DetachElasticLoadBalancer", @@ -852,7 +852,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Unassigns a registered instance from all of it's layers. The instance remains in the stack as an unassigned instance and can be assigned to another layer, as needed. You cannot use this action with instances that were created with AWS OpsWorks Stacks.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" + "documentation":"

Unassigns a registered instance from all layers that are using the instance. The instance remains in the stack as an unassigned instance, and can be assigned to another layer as needed. You cannot use this action with instances that were created with AWS OpsWorks Stacks.

Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

" }, "UnassignVolume":{ "name":"UnassignVolume", @@ -942,7 +942,7 @@ "errors":[ {"shape":"ValidationException"} ], - "documentation":"

Updates a user's SSH public key.

Required Permissions: To use this action, an IAM user must have self-management enabled or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" + "documentation":"

Updates a user's SSH public key.

Required Permissions: To use this action, an IAM user must have self-management enabled or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

" }, "UpdateRdsDbInstance":{ "name":"UpdateRdsDbInstance", @@ -981,7 +981,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Updates a specified user profile.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions.

" + "documentation":"

Updates a specified user profile.

Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions.

" }, "UpdateVolume":{ "name":"UpdateVolume", @@ -1173,7 +1173,7 @@ }, "LayerId":{ "shape":"String", - "documentation":"

The ID of the layer that the Elastic Load Balancing instance is to be attached to.

" + "documentation":"

The ID of the layer to which the Elastic Load Balancing instance is to be attached.

" } } }, @@ -1283,7 +1283,7 @@ }, "VpcId":{ "shape":"String", - "documentation":"

The ID of the VPC that the cloned stack is to be launched into. It must be in the specified region. All instances are launched into this VPC, and you cannot change the ID later.

  • If your account supports EC2 Classic, the default value is no VPC.

  • If your account does not support EC2 Classic, the default value is the default VPC for the specified region.

If the VPC ID corresponds to a default VPC and you have specified either the DefaultAvailabilityZone or the DefaultSubnetId parameter only, AWS OpsWorks Stacks infers the value of the other parameter. If you specify neither parameter, AWS OpsWorks Stacks sets these parameters to the first valid Availability Zone for the specified region and the corresponding default VPC subnet ID, respectively.

If you specify a nondefault VPC ID, note the following:

  • It must belong to a VPC in your account that is in the specified region.

  • You must specify a value for DefaultSubnetId.

For more information on how to use AWS OpsWorks Stacks with a VPC, see Running a Stack in a VPC. For more information on default VPC and EC2 Classic, see Supported Platforms.

" + "documentation":"

The ID of the VPC that the cloned stack is to be launched into. It must be in the specified region. All instances are launched into this VPC, and you cannot change the ID later.

  • If your account supports EC2 Classic, the default value is no VPC.

  • If your account does not support EC2 Classic, the default value is the default VPC for the specified region.

If the VPC ID corresponds to a default VPC and you have specified either the DefaultAvailabilityZone or the DefaultSubnetId parameter only, AWS OpsWorks Stacks infers the value of the other parameter. If you specify neither parameter, AWS OpsWorks Stacks sets these parameters to the first valid Availability Zone for the specified region and the corresponding default VPC subnet ID, respectively.

If you specify a nondefault VPC ID, note the following:

  • It must belong to a VPC in your account that is in the specified region.

  • You must specify a value for DefaultSubnetId.

For more information about how to use AWS OpsWorks Stacks with a VPC, see Running a Stack in a VPC. For more information about default VPC and EC2 Classic, see Supported Platforms.

" }, "Attributes":{ "shape":"StackAttributes", @@ -1299,7 +1299,7 @@ }, "DefaultOs":{ "shape":"String", - "documentation":"

The stack's operating system, which must be set to one of the following.

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

  • A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information on how to use custom AMIs with OpsWorks, see Using Custom AMIs.

The default option is the parent stack's operating system. For more information on the supported operating systems, see AWS OpsWorks Stacks Operating Systems.

You can specify a different Linux operating system for the cloned stack, but you cannot change from Linux to Windows or Windows to Linux.

" + "documentation":"

The stack's operating system, which must be set to one of the following.

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

  • A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information about how to use custom AMIs with OpsWorks, see Using Custom AMIs.

The default option is the parent stack's operating system. For more information about supported operating systems, see AWS OpsWorks Stacks Operating Systems.

You can specify a different Linux operating system for the cloned stack, but you cannot change from Linux to Windows or Windows to Linux.

" }, "HostnameTheme":{ "shape":"String", @@ -1315,7 +1315,7 @@ }, "CustomJson":{ "shape":"String", - "documentation":"

A string that contains user-defined, custom JSON. It is used to override the corresponding default stack configuration JSON values. The string should be in the following format:

\"{\\\"key1\\\": \\\"value1\\\", \\\"key2\\\": \\\"value2\\\",...}\"

For more information on custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes

" + "documentation":"

A string that contains user-defined, custom JSON. It is used to override the corresponding default stack configuration JSON values. The string should be in the following format:

\"{\\\"key1\\\": \\\"value1\\\", \\\"key2\\\": \\\"value2\\\",...}\"

For more information about custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes

" }, "ConfigurationManager":{ "shape":"StackConfigurationManager", @@ -1700,7 +1700,7 @@ }, "CustomJson":{ "shape":"String", - "documentation":"

A string that contains user-defined, custom JSON. It is used to override the corresponding default stack configuration JSON values. The string should be in the following format:

\"{\\\"key1\\\": \\\"value1\\\", \\\"key2\\\": \\\"value2\\\",...}\"

For more information on custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes.

" + "documentation":"

A string that contains user-defined, custom JSON. It is used to override the corresponding default stack configuration JSON values. The string should be in the following format:

\"{\\\"key1\\\": \\\"value1\\\", \\\"key2\\\": \\\"value2\\\",...}\"

For more information about custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes.

" } } }, @@ -1744,7 +1744,7 @@ }, "Os":{ "shape":"String", - "documentation":"

The instance's operating system, which must be set to one of the following.

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

  • A custom AMI: Custom.

For more information on the supported operating systems, see AWS OpsWorks Stacks Operating Systems.

The default option is the current Amazon Linux version. If you set this parameter to Custom, you must use the CreateInstance action's AmiId parameter to specify the custom AMI that you want to use. Block device mappings are not supported if the value is Custom. For more information on the supported operating systems, see Operating SystemsFor more information on how to use custom AMIs with AWS OpsWorks Stacks, see Using Custom AMIs.

" + "documentation":"

The instance's operating system, which must be set to one of the following.

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

  • A custom AMI: Custom.

For more information about the supported operating systems, see AWS OpsWorks Stacks Operating Systems.

The default option is the current Amazon Linux version. If you set this parameter to Custom, you must use the CreateInstance action's AmiId parameter to specify the custom AMI that you want to use. Block device mappings are not supported if the value is Custom. For more information about supported operating systems, see Operating SystemsFor more information about how to use custom AMIs with AWS OpsWorks Stacks, see Using Custom AMIs.

" }, "AmiId":{ "shape":"String", @@ -1914,11 +1914,11 @@ }, "Region":{ "shape":"String", - "documentation":"

The stack's AWS region, such as \"ap-south-1\". For more information about Amazon regions, see Regions and Endpoints.

" + "documentation":"

The stack's AWS region, such as ap-south-1. For more information about Amazon regions, see Regions and Endpoints.

In the AWS CLI, this API maps to the --stack-region parameter. If the --stack-region parameter and the AWS CLI common parameter --region are set to the same value, the stack uses a regional endpoint. If the --stack-region parameter is not set, but the AWS CLI --region parameter is, this also results in a stack with a regional endpoint. However, if the --region parameter is set to us-east-1, and the --stack-region parameter is set to one of the following, then the stack uses a legacy or classic region: us-west-1, us-west-2, sa-east-1, eu-central-1, eu-west-1, ap-northeast-1, ap-southeast-1, ap-southeast-2. In this case, the actual API endpoint of the stack is in us-east-1. Only the preceding regions are supported as classic regions in the us-east-1 API endpoint. Because it is a best practice to choose the regional endpoint that is closest to where you manage AWS, we recommend that you use regional endpoints for new stacks. The AWS CLI common --region parameter always specifies a regional API endpoint; it cannot be used to specify a classic AWS OpsWorks Stacks region.

" }, "VpcId":{ "shape":"String", - "documentation":"

The ID of the VPC that the stack is to be launched into. The VPC must be in the stack's region. All instances are launched into this VPC. You cannot change the ID later.

  • If your account supports EC2-Classic, the default value is no VPC.

  • If your account does not support EC2-Classic, the default value is the default VPC for the specified region.

If the VPC ID corresponds to a default VPC and you have specified either the DefaultAvailabilityZone or the DefaultSubnetId parameter only, AWS OpsWorks Stacks infers the value of the other parameter. If you specify neither parameter, AWS OpsWorks Stacks sets these parameters to the first valid Availability Zone for the specified region and the corresponding default VPC subnet ID, respectively.

If you specify a nondefault VPC ID, note the following:

  • It must belong to a VPC in your account that is in the specified region.

  • You must specify a value for DefaultSubnetId.

For more information on how to use AWS OpsWorks Stacks with a VPC, see Running a Stack in a VPC. For more information on default VPC and EC2-Classic, see Supported Platforms.

" + "documentation":"

The ID of the VPC that the stack is to be launched into. The VPC must be in the stack's region. All instances are launched into this VPC. You cannot change the ID later.

  • If your account supports EC2-Classic, the default value is no VPC.

  • If your account does not support EC2-Classic, the default value is the default VPC for the specified region.

If the VPC ID corresponds to a default VPC and you have specified either the DefaultAvailabilityZone or the DefaultSubnetId parameter only, AWS OpsWorks Stacks infers the value of the other parameter. If you specify neither parameter, AWS OpsWorks Stacks sets these parameters to the first valid Availability Zone for the specified region and the corresponding default VPC subnet ID, respectively.

If you specify a nondefault VPC ID, note the following:

  • It must belong to a VPC in your account that is in the specified region.

  • You must specify a value for DefaultSubnetId.

For more information about how to use AWS OpsWorks Stacks with a VPC, see Running a Stack in a VPC. For more information about default VPC and EC2-Classic, see Supported Platforms.

" }, "Attributes":{ "shape":"StackAttributes", @@ -1934,7 +1934,7 @@ }, "DefaultOs":{ "shape":"String", - "documentation":"

The stack's default operating system, which is installed on every instance unless you specify a different operating system when you create the instance. You can specify one of the following.

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

  • A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information, see Using Custom AMIs.

The default option is the current Amazon Linux version. For more information on the supported operating systems, see AWS OpsWorks Stacks Operating Systems.

" + "documentation":"

The stack's default operating system, which is installed on every instance unless you specify a different operating system when you create the instance. You can specify one of the following.

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

  • A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information, see Using Custom AMIs.

The default option is the current Amazon Linux version. For more information about supported operating systems, see AWS OpsWorks Stacks Operating Systems.

" }, "HostnameTheme":{ "shape":"String", @@ -1950,11 +1950,11 @@ }, "CustomJson":{ "shape":"String", - "documentation":"

A string that contains user-defined, custom JSON. It can be used to override the corresponding default stack configuration attribute values or to pass data to recipes. The string should be in the following format:

\"{\\\"key1\\\": \\\"value1\\\", \\\"key2\\\": \\\"value2\\\",...}\"

For more information on custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes.

" + "documentation":"

A string that contains user-defined, custom JSON. It can be used to override the corresponding default stack configuration attribute values or to pass data to recipes. The string should be in the following format:

\"{\\\"key1\\\": \\\"value1\\\", \\\"key2\\\": \\\"value2\\\",...}\"

For more information about custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes.

" }, "ConfigurationManager":{ "shape":"StackConfigurationManager", - "documentation":"

The configuration manager. When you create a stack we recommend that you use the configuration manager to specify the Chef version: 12, 11.10, or 11.4 for Linux stacks, or 12.2 for Windows stacks. The default value for Linux stacks is currently 11.4.

" + "documentation":"

The configuration manager. When you create a stack we recommend that you use the configuration manager to specify the Chef version: 12, 11.10, or 11.4 for Linux stacks, or 12.2 for Windows stacks. The default value for Linux stacks is currently 12.

" }, "ChefConfiguration":{ "shape":"ChefConfiguration", @@ -2209,7 +2209,7 @@ "members":{ "EcsClusterArn":{ "shape":"String", - "documentation":"

The cluster's ARN.

" + "documentation":"

The cluster's Amazon Resource Number (ARN).

" } } }, @@ -2521,7 +2521,10 @@ "DescribeOperatingSystemsResponse":{ "type":"structure", "members":{ - "OperatingSystems":{"shape":"OperatingSystems"} + "OperatingSystems":{ + "shape":"OperatingSystems", + "documentation":"

Contains information in response to a DescribeOperatingSystems request.

" + } }, "documentation":"

The response to a DescribeOperatingSystems request.

" }, @@ -2581,7 +2584,7 @@ "members":{ "StackId":{ "shape":"String", - "documentation":"

The stack ID that the instances are registered with. The operation returns descriptions of all registered Amazon RDS instances.

" + "documentation":"

The ID of the stack with which the instances are registered. The operation returns descriptions of all registered Amazon RDS instances.

" }, "RdsDbInstanceArns":{ "shape":"Strings", @@ -2632,7 +2635,7 @@ "members":{ "StackId":{ "shape":"String", - "documentation":"

The stack ID

" + "documentation":"

The stack ID.

" } } }, @@ -3008,7 +3011,10 @@ "shape":"Architecture", "documentation":"

The instance architecture: \"i386\" or \"x86_64\".

" }, - "Arn":{"shape":"String"}, + "Arn":{ + "shape":"String", + "documentation":"

The instance's Amazon Resource Number (ARN).

" + }, "AutoScalingType":{ "shape":"AutoScalingType", "documentation":"

For load-based or time-based instances, the type.

" @@ -3237,7 +3243,10 @@ "shape":"Integer", "documentation":"

The number of instances with start_failed status.

" }, - "StopFailed":{"shape":"Integer"}, + "StopFailed":{ + "shape":"Integer", + "documentation":"

The number of instances with stop_failed status.

" + }, "Stopped":{ "shape":"Integer", "documentation":"

The number of instances with stopped status.

" @@ -3268,7 +3277,10 @@ "Layer":{ "type":"structure", "members":{ - "Arn":{"shape":"String"}, + "Arn":{ + "shape":"String", + "documentation":"

The Amazon Resource Number (ARN) of a layer.

" + }, "StackId":{ "shape":"String", "documentation":"

The layer stack ID.

" @@ -4013,7 +4025,7 @@ }, "Level":{ "shape":"String", - "documentation":"

The user's permission level, which must be set to one of the following strings. You cannot set your own permissions level.

  • deny

  • show

  • deploy

  • manage

  • iam_only

For more information on the permissions associated with these levels, see Managing User Permissions.

" + "documentation":"

The user's permission level, which must be set to one of the following strings. You cannot set your own permissions level.

  • deny

  • show

  • deploy

  • manage

  • iam_only

For more information about the permissions associated with these levels, see Managing User Permissions.

" } } }, @@ -4282,7 +4294,10 @@ "shape":"String", "documentation":"

The instance ID.

" }, - "Force":{"shape":"Boolean"} + "Force":{ + "shape":"Boolean", + "documentation":"

Specifies whether to force an instance to stop.

" + } } }, "StopStackRequest":{ @@ -4462,7 +4477,7 @@ "members":{ "ElasticIp":{ "shape":"String", - "documentation":"

The address.

" + "documentation":"

The IP address for which you want to update the name.

" }, "Name":{ "shape":"String", @@ -4496,7 +4511,7 @@ }, "Os":{ "shape":"String", - "documentation":"

The instance's operating system, which must be set to one of the following. You cannot update an instance that is using a custom AMI.

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

For more information on the supported operating systems, see AWS OpsWorks Stacks Operating Systems.

The default option is the current Amazon Linux version. If you set this parameter to Custom, you must use the AmiId parameter to specify the custom AMI that you want to use. For more information on the supported operating systems, see Operating Systems. For more information on how to use custom AMIs with OpsWorks, see Using Custom AMIs.

You can specify a different Linux operating system for the updated stack, but you cannot change from Linux to Windows or Windows to Linux.

" + "documentation":"

The instance's operating system, which must be set to one of the following. You cannot update an instance that is using a custom AMI.

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

For more information about supported operating systems, see AWS OpsWorks Stacks Operating Systems.

The default option is the current Amazon Linux version. If you set this parameter to Custom, you must use the AmiId parameter to specify the custom AMI that you want to use. For more information about supported operating systems, see Operating Systems. For more information about how to use custom AMIs with OpsWorks, see Using Custom AMIs.

You can specify a different Linux operating system for the updated stack, but you cannot change from Linux to Windows or Windows to Linux.

" }, "AmiId":{ "shape":"String", @@ -4651,7 +4666,7 @@ }, "DefaultOs":{ "shape":"String", - "documentation":"

The stack's operating system, which must be set to one of the following:

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

  • A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information on how to use custom AMIs with OpsWorks, see Using Custom AMIs.

The default option is the stack's current operating system. For more information on the supported operating systems, see AWS OpsWorks Stacks Operating Systems.

" + "documentation":"

The stack's operating system, which must be set to one of the following:

  • A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

  • A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

  • CentOS Linux 7

  • Red Hat Enterprise Linux 7

  • A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

  • A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information about how to use custom AMIs with OpsWorks, see Using Custom AMIs.

The default option is the stack's current operating system. For more information about supported operating systems, see AWS OpsWorks Stacks Operating Systems.

" }, "HostnameTheme":{ "shape":"String", @@ -4667,11 +4682,11 @@ }, "CustomJson":{ "shape":"String", - "documentation":"

A string that contains user-defined, custom JSON. It can be used to override the corresponding default stack configuration JSON values or to pass data to recipes. The string should be in the following format:

\"{\\\"key1\\\": \\\"value1\\\", \\\"key2\\\": \\\"value2\\\",...}\"

For more information on custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes.

" + "documentation":"

A string that contains user-defined, custom JSON. It can be used to override the corresponding default stack configuration JSON values or to pass data to recipes. The string should be in the following format:

\"{\\\"key1\\\": \\\"value1\\\", \\\"key2\\\": \\\"value2\\\",...}\"

For more information about custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes.

" }, "ConfigurationManager":{ "shape":"StackConfigurationManager", - "documentation":"

The configuration manager. When you update a stack, we recommend that you use the configuration manager to specify the Chef version: 12, 11.10, or 11.4 for Linux stacks, or 12.2 for Windows stacks. The default value for Linux stacks is currently 11.4.

" + "documentation":"

The configuration manager. When you update a stack, we recommend that you use the configuration manager to specify the Chef version: 12, 11.10, or 11.4 for Linux stacks, or 12.2 for Windows stacks. The default value for Linux stacks is currently 12.

" }, "ChefConfiguration":{ "shape":"ChefConfiguration", @@ -4843,13 +4858,16 @@ }, "VolumeType":{ "shape":"String", - "documentation":"

The volume type, standard or PIOPS.

" + "documentation":"

The volume type. For more information, see Amazon EBS Volume Types.

  • standard - Magnetic. Magnetic volumes must have a minimum size of 1 GiB and a maximum size of 1024 GiB.

  • io1 - Provisioned IOPS (SSD). PIOPS volumes must have a minimum size of 4 GiB and a maximum size of 16384 GiB.

  • gp2 - General Purpose (SSD). General purpose volumes must have a minimum size of 1 GiB and a maximum size of 16384 GiB.

  • st1 - Throughput Optimized hard disk drive (HDD). Throughput optimized HDD volumes must have a minimum size of 500 GiB and a maximum size of 16384 GiB.

  • sc1 - Cold HDD. Cold HDD volumes must have a minimum size of 500 GiB and a maximum size of 16384 GiB.

" }, "Iops":{ "shape":"Integer", "documentation":"

For PIOPS volumes, the IOPS per disk.

" }, - "Encrypted":{"shape":"Boolean"} + "Encrypted":{ + "shape":"Boolean", + "documentation":"

Specifies whether an Amazon EBS volume is encrypted. For more information, see Amazon EBS Encryption.

" + } }, "documentation":"

Describes an instance's Amazon EBS volume.

" }, @@ -4879,7 +4897,7 @@ }, "VolumeType":{ "shape":"String", - "documentation":"

The volume type. For more information, see Amazon EBS Volume Types.

  • standard - Magnetic

  • io1 - Provisioned IOPS (SSD)

  • gp2 - General Purpose (SSD)

  • st1 - Throughput Optimized hard disk drive (HDD)

  • sc1 - Cold HDD

" + "documentation":"

The volume type. For more information, see Amazon EBS Volume Types.

  • standard - Magnetic. Magnetic volumes must have a minimum size of 1 GiB and a maximum size of 1024 GiB.

  • io1 - Provisioned IOPS (SSD). PIOPS volumes must have a minimum size of 4 GiB and a maximum size of 16384 GiB.

  • gp2 - General Purpose (SSD). General purpose volumes must have a minimum size of 1 GiB and a maximum size of 16384 GiB.

  • st1 - Throughput Optimized hard disk drive (HDD). Throughput optimized HDD volumes must have a minimum size of 500 GiB and a maximum size of 16384 GiB.

  • sc1 - Cold HDD. Cold HDD volumes must have a minimum size of 500 GiB and a maximum size of 16384 GiB.

" }, "Iops":{ "shape":"Integer", diff --git a/botocore/data/organizations/2016-11-28/service-2.json b/botocore/data/organizations/2016-11-28/service-2.json index a0ca18e0..9adbd76f 100644 --- a/botocore/data/organizations/2016-11-28/service-2.json +++ b/botocore/data/organizations/2016-11-28/service-2.json @@ -57,7 +57,7 @@ {"shape":"TargetNotFoundException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Attaches a policy to a root, an organizational unit, or an individual account. How the policy affects accounts depends on the type of policy:

  • Service control policy (SCP) - An SCP specifies what permissions can be delegated to users in affected member accounts. The scope of influence for a policy depends on what you attach the policy to:

    • If you attach an SCP to a root, it affects all accounts in the organization.

    • If you attach an SCP to an OU, it affects all accounts in that OU and in any child OUs.

    • If you attach the policy directly to an account, then it affects only that account.

    SCPs essentially are permission \"filters\". When you attach one SCP to a higher level root or OU, and you also attach a different SCP to a child OU or to an account, the child policy can further restrict only the permissions that pass through the parent filter and are available to the child. An SCP that is attached to a child cannot grant a permission that is not already granted by the parent. For example, imagine that the parent SCP allows permissions A, B, C, D, and E. The child SCP allows C, D, E, F, and G. The result is that the accounts affected by the child SCP are allowed to use only C, D, and E. They cannot use A or B because they were filtered out by the child OU. They also cannot use F and G because they were filtered out by the parent OU. They cannot be granted back by the child SCP; child SCPs can only filter the permissions they receive from the parent SCP.

    AWS Organizations attaches a default SCP named \"FullAWSAccess to every root, OU, and account. This default SCP allows all services and actions, enabling any new child OU or account to inherit the permissions of the parent root or OU. If you detach the default policy, you must replace it with a policy that specifies the permissions that you want to allow in that OU or account.

    For more information about how Organizations policies permissions work, see Using Service Control Policies in the AWS Organizations User Guide.

This operation can be called only from the organization's master account.

" + "documentation":"

Attaches a policy to a root, an organizational unit (OU), or an individual account. How the policy affects accounts depends on the type of policy:

  • Service control policy (SCP) - An SCP specifies what permissions can be delegated to users in affected member accounts. The scope of influence for a policy depends on what you attach the policy to:

    • If you attach an SCP to a root, it affects all accounts in the organization.

    • If you attach an SCP to an OU, it affects all accounts in that OU and in any child OUs.

    • If you attach the policy directly to an account, then it affects only that account.

    SCPs essentially are permission \"filters\". When you attach one SCP to a higher level root or OU, and you also attach a different SCP to a child OU or to an account, the child policy can further restrict only the permissions that pass through the parent filter and are available to the child. An SCP that is attached to a child cannot grant a permission that is not already granted by the parent. For example, imagine that the parent SCP allows permissions A, B, C, D, and E. The child SCP allows C, D, E, F, and G. The result is that the accounts affected by the child SCP are allowed to use only C, D, and E. They cannot use A or B because they were filtered out by the child OU. They also cannot use F and G because they were filtered out by the parent OU. They cannot be granted back by the child SCP; child SCPs can only filter the permissions they receive from the parent SCP.

    AWS Organizations attaches a default SCP named \"FullAWSAccess to every root, OU, and account. This default SCP allows all services and actions, enabling any new child OU or account to inherit the permissions of the parent root or OU. If you detach the default policy, you must replace it with a policy that specifies the permissions that you want to allow in that OU or account.

    For more information about how Organizations policies permissions work, see Using Service Control Policies in the AWS Organizations User Guide.

This operation can be called only from the organization's master account.

" }, "CancelHandshake":{ "name":"CancelHandshake", @@ -97,7 +97,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Creates an AWS account that is automatically a member of the organization whose credentials made the request. This is an asynchronous request that AWS performs in the background. If you want to check the status of the request later, you need the OperationId response element from this operation to provide as a parameter to the DescribeCreateAccountStatus operation.

The user who calls the API for an invitation to join must have the organizations:CreateAccount permission. If you enabled all features in the organization, then the user must also have the iam:CreateServiceLinkedRole permission so that Organizations can create the required service-linked role named OrgsServiceLinkedRoleName. For more information, see AWS Organizations and Service-Linked Roles in the AWS Organizations User Guide.

The user in the master account who calls this API must also have the iam:CreateRole permission because AWS Organizations preconfigures the new member account with a role (named OrganizationAccountAccessRole by default) that grants users in the master account administrator permissions in the new member account. Principals in the master account can assume the role. AWS Organizations clones the company name and address information for the new account from the organization's master account.

This operation can be called only from the organization's master account.

For more information about creating accounts, see Creating an AWS Account in Your Organization in the AWS Organizations User Guide.

When you create an account in an organization using the AWS Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account, such as a payment method and signing the End User Licence Agreement (EULA) is not automatically collected. If you must remove an account from your organization later, you can do so only after you provide the missing information. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable this, then only the account root user can access billing information. For information about how to disable this for an account, see Granting Access to Your Billing Information and Tools.

This operation can be called only from the organization's master account.

If you get an exception that indicates that you exceeded your account limits for the organization or that you can\"t add an account because your organization is still initializing, please contact AWS Customer Support.

" + "documentation":"

Creates an AWS account that is automatically a member of the organization whose credentials made the request. This is an asynchronous request that AWS performs in the background. If you want to check the status of the request later, you need the OperationId response element from this operation to provide as a parameter to the DescribeCreateAccountStatus operation.

The user who calls the API for an invitation to join must have the organizations:CreateAccount permission. If you enabled all features in the organization, then the user must also have the iam:CreateServiceLinkedRole permission so that Organizations can create the required service-linked role named OrgsServiceLinkedRoleName. For more information, see AWS Organizations and Service-Linked Roles in the AWS Organizations User Guide.

The user in the master account who calls this API must also have the iam:CreateRole permission because AWS Organizations preconfigures the new member account with a role (named OrganizationAccountAccessRole by default) that grants users in the master account administrator permissions in the new member account. Principals in the master account can assume the role. AWS Organizations clones the company name and address information for the new account from the organization's master account.

This operation can be called only from the organization's master account.

For more information about creating accounts, see Creating an AWS Account in Your Organization in the AWS Organizations User Guide.

  • When you create an account in an organization using the AWS Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account, such as a payment method and signing the End User Licence Agreement (EULA) is not automatically collected. If you must remove an account from your organization later, you can do so only after you provide the missing information. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • If you get an exception that indicates that you exceeded your account limits for the organization or that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists after an hour, then contact AWS Customer Support.

  • Because CreateAccount operates asynchronously, it can return a successful completion message even though account initialization might still be in progress. You might need to wait a few minutes before you can successfully access the account.

When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable this, then only the account root user can access billing information. For information about how to disable this for an account, see Granting Access to Your Billing Information and Tools.

" }, "CreateOrganization":{ "name":"CreateOrganization", @@ -197,7 +197,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Deletes the organization. You can delete an organization only by using credentials from the master account. The organization must be empty of member accounts, OUs, and policies.

" + "documentation":"

Deletes the organization. You can delete an organization only by using credentials from the master account. The organization must be empty of member accounts, organizational units (OUs), and policies.

" }, "DeleteOrganizationalUnit":{ "name":"DeleteOrganizationalUnit", @@ -216,7 +216,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Deletes an organizational unit from a root or another OU. You must first remove all accounts and child OUs from the OU that you want to delete.

This operation can be called only from the organization's master account.

" + "documentation":"

Deletes an organizational unit (OU) from a root or another OU. You must first remove all accounts and child OUs from the OU that you want to delete.

This operation can be called only from the organization's master account.

" }, "DeletePolicy":{ "name":"DeletePolicy", @@ -235,7 +235,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Deletes the specified policy from your organization. Before you perform this operation, you must first detach the policy from all OUs, roots, and accounts.

This operation can be called only from the organization's master account.

" + "documentation":"

Deletes the specified policy from your organization. Before you perform this operation, you must first detach the policy from all organizational units (OUs), roots, and accounts.

This operation can be called only from the organization's master account.

" }, "DescribeAccount":{ "name":"DescribeAccount", @@ -362,7 +362,7 @@ {"shape":"TargetNotFoundException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Detaches a policy from a target root, organizational unit, or account. If the policy being detached is a service control policy (SCP), the changes to permissions for IAM users and roles in affected accounts are immediate.

Note: Every root, OU, and account must have at least one SCP attached. If you want to replace the default FullAWSAccess policy with one that limits the permissions that can be delegated, then you must attach the replacement policy before you can remove the default one. This is the authorization strategy of whitelisting. If you instead attach a second SCP and leave the FullAWSAccess SCP still attached, and specify \"Effect\": \"Deny\" in the second SCP to override the \"Effect\": \"Allow\" in the FullAWSAccess policy (or any other attached SCP), then you are using the authorization strategy of blacklisting.

This operation can be called only from the organization's master account.

" + "documentation":"

Detaches a policy from a target root, organizational unit (OU), or account. If the policy being detached is a service control policy (SCP), the changes to permissions for IAM users and roles in affected accounts are immediate.

Note: Every root, OU, and account must have at least one SCP attached. If you want to replace the default FullAWSAccess policy with one that limits the permissions that can be delegated, then you must attach the replacement policy before you can remove the default one. This is the authorization strategy of whitelisting. If you instead attach a second SCP and leave the FullAWSAccess SCP still attached, and specify \"Effect\": \"Deny\" in the second SCP to override the \"Effect\": \"Allow\" in the FullAWSAccess policy (or any other attached SCP), then you are using the authorization strategy of blacklisting.

This operation can be called only from the organization's master account.

" }, "DisableAWSServiceAccess":{ "name":"DisableAWSServiceAccess", @@ -401,7 +401,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Disables an organizational control policy type in a root. A policy of a certain type can be attached to entities in a root only if that type is enabled in the root. After you perform this operation, you no longer can attach policies of the specified type to that root or to any OU or account in that root. You can undo this by using the EnablePolicyType operation.

This operation can be called only from the organization's master account.

If you disable a policy type for a root, it still shows as enabled for the organization if all features are enabled in that organization. Use ListRoots to see the status of policy types for a specified root. Use DescribeOrganization to see the status of policy types in the organization.

" + "documentation":"

Disables an organizational control policy type in a root. A policy of a certain type can be attached to entities in a root only if that type is enabled in the root. After you perform this operation, you no longer can attach policies of the specified type to that root or to any organizational unit (OU) or account in that root. You can undo this by using the EnablePolicyType operation.

This operation can be called only from the organization's master account.

If you disable a policy type for a root, it still shows as enabled for the organization if all features are enabled in that organization. Use ListRoots to see the status of policy types for a specified root. Use DescribeOrganization to see the status of policy types in the organization.

" }, "EnableAWSServiceAccess":{ "name":"EnableAWSServiceAccess", @@ -438,7 +438,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Enables all features in an organization. This enables the use of organization policies that can restrict the services and actions that can be called in each account. Until you enable all features, you have access only to consolidated billing, and you can't use any of the advanced account administration features that AWS Organizations supports. For more information, see Enabling All Features in Your Organization in the AWS Organizations User Guide.

This operation is required only for organizations that were created explicitly with only the consolidated billing features enabled, or that were migrated from a Consolidated Billing account family to Organizations. Calling this operation sends a handshake to every invited account in the organization. The feature set change can be finalized and the additional features enabled only after all administrators in the invited accounts approve the change by accepting the handshake.

After you enable all features, you can separately enable or disable individual policy types in a root using EnablePolicyType and DisablePolicyType. To see the status of policy types in a root, use ListRoots.

After all invited member accounts accept the handshake, you finalize the feature set change by accepting the handshake that contains \"Action\": \"ENABLE_ALL_FEATURES\". This completes the change.

After you enable all features in your organization, the master account in the organization can apply policies on all member accounts. These policies can restrict what users and even administrators in those accounts can do. The master account can apply policies that prevent accounts from leaving the organization. Ensure that your account administrators are aware of this.

This operation can be called only from the organization's master account.

" + "documentation":"

Enables all features in an organization. This enables the use of organization policies that can restrict the services and actions that can be called in each account. Until you enable all features, you have access only to consolidated billing, and you can't use any of the advanced account administration features that AWS Organizations supports. For more information, see Enabling All Features in Your Organization in the AWS Organizations User Guide.

This operation is required only for organizations that were created explicitly with only the consolidated billing features enabled. Calling this operation sends a handshake to every invited account in the organization. The feature set change can be finalized and the additional features enabled only after all administrators in the invited accounts approve the change by accepting the handshake.

After you enable all features, you can separately enable or disable individual policy types in a root using EnablePolicyType and DisablePolicyType. To see the status of policy types in a root, use ListRoots.

After all invited member accounts accept the handshake, you finalize the feature set change by accepting the handshake that contains \"Action\": \"ENABLE_ALL_FEATURES\". This completes the change.

After you enable all features in your organization, the master account in the organization can apply policies on all member accounts. These policies can restrict what users and even administrators in those accounts can do. The master account can apply policies that prevent accounts from leaving the organization. Ensure that your account administrators are aware of this.

This operation can be called only from the organization's master account.

" }, "EnablePolicyType":{ "name":"EnablePolicyType", @@ -460,7 +460,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"PolicyTypeNotAvailableForOrganizationException"} ], - "documentation":"

Enables a policy type in a root. After you enable a policy type in a root, you can attach policies of that type to the root, any OU, or account in that root. You can undo this by using the DisablePolicyType operation.

This operation can be called only from the organization's master account.

You can enable a policy type in a root only if that policy type is available in the organization. Use DescribeOrganization to view the status of available policy types in the organization.

To view the status of policy type in a root, use ListRoots.

" + "documentation":"

Enables a policy type in a root. After you enable a policy type in a root, you can attach policies of that type to the root, any organizational unit (OU), or account in that root. You can undo this by using the DisablePolicyType operation.

This operation can be called only from the organization's master account.

You can enable a policy type in a root only if that policy type is available in the organization. Use DescribeOrganization to view the status of available policy types in the organization.

To view the status of policy type in a root, use ListRoots.

" }, "InviteAccountToOrganization":{ "name":"InviteAccountToOrganization", @@ -481,7 +481,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Sends an invitation to another account to join your organization as a member account. Organizations sends email on your behalf to the email address that is associated with the other account's owner. The invitation is implemented as a Handshake whose details are in the response.

You can invite AWS accounts only from the same seller as the master account. For example, if your organization's master account was created by Amazon Internet Services Pvt. Ltd (AISPL), an AWS seller in India, then you can only invite other AISPL accounts to your organization. You can't combine accounts from AISPL and AWS, or any other AWS seller. For more information, see Consolidated Billing in India.

This operation can be called only from the organization's master account.

If you get an exception that indicates that you exceeded your account limits for the organization or that you can\"t add an account because your organization is still initializing, please contact AWS Customer Support.

" + "documentation":"

Sends an invitation to another account to join your organization as a member account. Organizations sends email on your behalf to the email address that is associated with the other account's owner. The invitation is implemented as a Handshake whose details are in the response.

  • You can invite AWS accounts only from the same seller as the master account. For example, if your organization's master account was created by Amazon Internet Services Pvt. Ltd (AISPL), an AWS seller in India, then you can only invite other AISPL accounts to your organization. You can't combine accounts from AISPL and AWS, or any other AWS seller. For more information, see Consolidated Billing in India.

  • If you receive an exception that indicates that you exceeded your account limits for the organization or that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists after an hour, then contact AWS Customer Support.

This operation can be called only from the organization's master account.

" }, "LeaveOrganization":{ "name":"LeaveOrganization", @@ -535,7 +535,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Lists all the accounts in the organization. To request only the accounts in a specified root or OU, use the ListAccountsForParent operation instead.

Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

This operation can be called only from the organization's master account.

" + "documentation":"

Lists all the accounts in the organization. To request only the accounts in a specified root or organizational unit (OU), use the ListAccountsForParent operation instead.

Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

This operation can be called only from the organization's master account.

" }, "ListAccountsForParent":{ "name":"ListAccountsForParent", @@ -571,7 +571,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Lists all of the OUs or accounts that are contained in the specified parent OU or root. This operation, along with ListParents enables you to traverse the tree structure that makes up this root.

Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

This operation can be called only from the organization's master account.

" + "documentation":"

Lists all of the organizational units (OUs) or accounts that are contained in the specified parent OU or root. This operation, along with ListParents enables you to traverse the tree structure that makes up this root.

Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

This operation can be called only from the organization's master account.

" }, "ListCreateAccountStatus":{ "name":"ListCreateAccountStatus", @@ -729,7 +729,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Lists all the roots, OUs, and accounts to which the specified policy is attached.

Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

This operation can be called only from the organization's master account.

" + "documentation":"

Lists all the roots, organizaitonal units (OUs), and accounts to which the specified policy is attached.

Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

This operation can be called only from the organization's master account.

" }, "MoveAccount":{ "name":"MoveAccount", @@ -750,7 +750,7 @@ {"shape":"AWSOrganizationsNotInUseException"}, {"shape":"ServiceException"} ], - "documentation":"

Moves an account from its current source parent root or OU to the specified destination parent root or OU.

This operation can be called only from the organization's master account.

" + "documentation":"

Moves an account from its current source parent root or organizational unit (OU) to the specified destination parent root or OU.

This operation can be called only from the organization's master account.

" }, "RemoveAccountFromOrganization":{ "name":"RemoveAccountFromOrganization", @@ -770,7 +770,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Removes the specified account from the organization.

The removed account becomes a stand-alone account that is not a member of any organization. It is no longer subject to any policies and is responsible for its own bill payments. The organization's master account is no longer charged for any expenses accrued by the member account after it is removed from the organization.

This operation can be called only from the organization's master account. Member accounts can remove themselves with LeaveOrganization instead.

  • You can remove an account from your organization only if the account is configured with the information required to operate as a standalone account. When you create an account in an organization using the AWS Organizations console, API, or CLI commands, the information required of standalone accounts is not automatically collected. For an account that you want to make standalone, you must accept the End User License Agreement (EULA), choose a support plan, provide and verify the required contact information, and provide a current payment method. AWS uses the payment method to charge for any billable (not free tier) AWS activity that occurs while the account is not attached to an organization. To remove an account that does not yet have this information, you must sign in as the member account and follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • You can remove a member account only after you enable IAM user access to billing in the member account. For more information, see Activating Access to the Billing and Cost Management Console in the AWS Billing and Cost Management User Guide.

" + "documentation":"

Removes the specified account from the organization.

The removed account becomes a stand-alone account that is not a member of any organization. It is no longer subject to any policies and is responsible for its own bill payments. The organization's master account is no longer charged for any expenses accrued by the member account after it is removed from the organization.

This operation can be called only from the organization's master account. Member accounts can remove themselves with LeaveOrganization instead.

You can remove an account from your organization only if the account is configured with the information required to operate as a standalone account. When you create an account in an organization using the AWS Organizations console, API, or CLI commands, the information required of standalone accounts is not automatically collected. For an account that you want to make standalone, you must accept the End User License Agreement (EULA), choose a support plan, provide and verify the required contact information, and provide a current payment method. AWS uses the payment method to charge for any billable (not free tier) AWS activity that occurs while the account is not attached to an organization. To remove an account that does not yet have this information, you must sign in as the member account and follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

" }, "UpdateOrganizationalUnit":{ "name":"UpdateOrganizationalUnit", @@ -1043,7 +1043,7 @@ "Message":{"shape":"ExceptionMessage"}, "Reason":{"shape":"ConstraintViolationExceptionReason"} }, - "documentation":"

Performing this operation violates a minimum or maximum value limit. For example, attempting to removing the last SCP from an OU or root, inviting or creating too many accounts to the organization, or attaching too many policies to an account, OU, or root. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. If you need more accounts, contact AWS Support to request an increase in your limit.

    Or, The number of invitations that you tried to send would cause you to exceed the limit of accounts in your organization. Send fewer invitations, or contact AWS Support to request an increase in the number of accounts.

    Note: deleted and closed accounts still count toward your limit.

    If you get an exception that indicates that you exceeded your account limits for the organization or that you can\"t add an account because your organization is still initializing, please contact AWS Customer Support.

  • HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of handshakes you can send in one day.

  • OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of organizational units you can have in an organization.

  • OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an organizational unit tree that is too many levels deep.

  • POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of policies that you can have in an organization.

  • MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the number of policies of a certain type that can be attached to an entity at one time.

  • MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a policy from an entity that would cause the entity to have fewer than the minimum number of policies of a certain type required.

  • ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account from the organization that does not yet have enough information to exist as a stand-alone account. This account requires you to first agree to the AWS Customer Agreement. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove an account from the organization that does not yet have enough information to exist as a stand-alone account. This account requires you to first complete phone verification. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization with this account, you first must associate a payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation with this member account, you first must associate a payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of accounts that you can create in one day.

  • MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account in this organization, you first must migrate the organization's master account to the marketplace that corresponds to the master account's address. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be associated with the same marketplace.

  • MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you must first provide contact a valid address and phone number for the master account. Then try the operation again.

", + "documentation":"

Performing this operation violates a minimum or maximum value limit. For example, attempting to removing the last SCP from an OU or root, inviting or creating too many accounts to the organization, or attaching too many policies to an account, OU, or root. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. If you need more accounts, contact AWS Support to request an increase in your limit.

    Or, The number of invitations that you tried to send would cause you to exceed the limit of accounts in your organization. Send fewer invitations, or contact AWS Support to request an increase in the number of accounts.

    Note: deleted and closed accounts still count toward your limit.

    If you get receive this exception when running a command immediately after creating the organization, wait one hour and try again. If after an hour it continues to fail with this error, contact AWS Customer Support.

  • HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of handshakes you can send in one day.

  • OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of organizational units you can have in an organization.

  • OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an organizational unit tree that is too many levels deep.

  • ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation that requires the organization to be configured to support all features. An organization that supports consolidated billing features only cannot perform this operation.

  • POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of policies that you can have in an organization.

  • MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the number of policies of a certain type that can be attached to an entity at one time.

  • MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a policy from an entity that would cause the entity to have fewer than the minimum number of policies of a certain type required.

  • ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account from the organization that does not yet have enough information to exist as a stand-alone account. This account requires you to first agree to the AWS Customer Agreement. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove an account from the organization that does not yet have enough information to exist as a stand-alone account. This account requires you to first complete phone verification. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization with this account, you first must associate a payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation with this member account, you first must associate a payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of accounts that you can create in one day.

  • MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account in this organization, you first must migrate the organization's master account to the marketplace that corresponds to the master account's address. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be associated with the same marketplace.

  • MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you must first provide contact a valid address and phone number for the master account. Then try the operation again.

", "exception":true }, "ConstraintViolationExceptionReason":{ @@ -1580,7 +1580,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

AWS Organizations could not finalize the creation of your organization. Try again later. If this persists, contact AWS customer support.

", + "documentation":"

AWS Organizations could not perform the operation because your organization has not finished initializing. This can take up to an hour. Try again later. If after one hour you continue to receive this error, contact AWS Customer Support.

", "exception":true }, "GenericArn":{ @@ -1643,7 +1643,7 @@ "Message":{"shape":"ExceptionMessage"}, "Reason":{"shape":"HandshakeConstraintViolationExceptionReason"} }, - "documentation":"

The requested operation would violate the constraint identified in the reason code.

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. Note: deleted and closed accounts still count toward your limit.

    If you get an exception that indicates that you exceeded your account limits for the organization or that you can\"t add an account because your organization is still initializing, please contact AWS Customer Support.

  • HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of handshakes you can send in one day.

  • ALREADY_IN_AN_ORGANIZATION: The handshake request is invalid because the invited account is already a member of an organization.

  • ORGANIZATION_ALREADY_HAS_ALL_FEATURES: The handshake request is invalid because the organization has already enabled all features.

  • INVITE_DISABLED_DURING_ENABLE_ALL_FEATURES: You cannot issue new invitations to join an organization while it is in the process of enabling all features. You can resume inviting accounts after you finalize the process when all accounts have agreed to the change.

  • PAYMENT_INSTRUMENT_REQUIRED: You cannot complete the operation with an account that does not have a payment instrument, such as a credit card, associated with it.

  • ORGANIZATION_FROM_DIFFERENT_SELLER_OF_RECORD: The request failed because the account is from a different marketplace than the accounts in the organization. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be from the same marketplace.

  • ORGANIZATION_MEMBERSHIP_CHANGE_RATE_LIMIT_EXCEEDED: You attempted to change the membership of an account too quickly after its previous change.

", + "documentation":"

The requested operation would violate the constraint identified in the reason code.

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. Note: deleted and closed accounts still count toward your limit.

    If you get this exception immediately after creating the organization, wait one hour and try again. If after an hour it continues to fail with this error, contact AWS Customer Support.

  • HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of handshakes you can send in one day.

  • ALREADY_IN_AN_ORGANIZATION: The handshake request is invalid because the invited account is already a member of an organization.

  • ORGANIZATION_ALREADY_HAS_ALL_FEATURES: The handshake request is invalid because the organization has already enabled all features.

  • INVITE_DISABLED_DURING_ENABLE_ALL_FEATURES: You cannot issue new invitations to join an organization while it is in the process of enabling all features. You can resume inviting accounts after you finalize the process when all accounts have agreed to the change.

  • PAYMENT_INSTRUMENT_REQUIRED: You cannot complete the operation with an account that does not have a payment instrument, such as a credit card, associated with it.

  • ORGANIZATION_FROM_DIFFERENT_SELLER_OF_RECORD: The request failed because the account is from a different marketplace than the accounts in the organization. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be from the same marketplace.

  • ORGANIZATION_MEMBERSHIP_CHANGE_RATE_LIMIT_EXCEEDED: You attempted to change the membership of an account too quickly after its previous change.

", "exception":true }, "HandshakeConstraintViolationExceptionReason":{ @@ -2787,5 +2787,5 @@ } } }, - "documentation":"AWS Organizations API Reference

AWS Organizations is a web service that enables you to consolidate your multiple AWS accounts into an organization and centrally manage your accounts and their resources.

This guide provides descriptions of the Organizations API. For more information about using this service, see the AWS Organizations User Guide.

API Version

This version of the Organizations API Reference documents the Organizations API version 2016-11-28.

As an alternative to using the API directly, you can use one of the AWS SDKs, which consist of libraries and sample code for various programming languages and platforms (Java, Ruby, .NET, iOS, Android, and more). The SDKs provide a convenient way to create programmatic access to AWS Organizations. For example, the SDKs take care of cryptographically signing requests, managing errors, and retrying requests automatically. For more information about the AWS SDKs, including how to download and install them, see Tools for Amazon Web Services.

We recommend that you use the AWS SDKs to make programmatic API calls to Organizations. However, you also can use the Organizations Query API to make direct calls to the Organizations web service. To learn more about the Organizations Query API, see Making Query Requests in the AWS Organizations User Guide. Organizations supports GET and POST requests for all actions. That is, the API does not require you to use GET for some actions and POST for others. However, GET requests are subject to the limitation size of a URL. Therefore, for operations that require larger sizes, use a POST request.

Signing Requests

When you send HTTP requests to AWS, you must sign the requests so that AWS can identify who sent them. You sign requests with your AWS access key, which consists of an access key ID and a secret access key. We strongly recommend that you do not create an access key for your root account. Anyone who has the access key for your root account has unrestricted access to all the resources in your account. Instead, create an access key for an IAM user account that has administrative privileges. As another option, use AWS Security Token Service to generate temporary security credentials, and use those credentials to sign requests.

To sign requests, we recommend that you use Signature Version 4. If you have an existing application that uses Signature Version 2, you do not have to update it to use Signature Version 4. However, some operations now require Signature Version 4. The documentation for operations that require version 4 indicate this requirement.

When you use the AWS Command Line Interface (AWS CLI) or one of the AWS SDKs to make requests to AWS, these tools automatically sign the requests for you with the access key that you specify when you configure the tools.

In this release, each organization can have only one root. In a future release, a single organization will support multiple roots.

Support and Feedback for AWS Organizations

We welcome your feedback. Send your comments to feedback-awsorganizations@amazon.com or post your feedback and questions in the AWS Organizations support forum. For more information about the AWS support forums, see Forums Help.

Endpoint to Call When Using the CLI or the AWS API

For the current release of Organizations, you must specify the us-east-1 region for all AWS API and CLI calls. You can do this in the CLI by using these parameters and commands:

  • Use the following parameter with each command to specify both the endpoint and its region:

    --endpoint-url https://organizations.us-east-1.amazonaws.com

  • Use the default endpoint, but configure your default region with this command:

    aws configure set default.region us-east-1

  • Use the following parameter with each command to specify the endpoint:

    --region us-east-1

For the various SDKs used to call the APIs, see the documentation for the SDK of interest to learn how to direct the requests to a specific endpoint. For more information, see Regions and Endpoints in the AWS General Reference.

How examples are presented

The JSON returned by the AWS Organizations service as response to your requests is returned as a single long string without line breaks or formatting whitespace. Both line breaks and whitespace are included in the examples in this guide to improve readability. When example input parameters also would result in long strings that would extend beyond the screen, we insert line breaks to enhance readability. You should always submit the input as a single JSON text string.

Recording API Requests

AWS Organizations supports AWS CloudTrail, a service that records AWS API calls for your AWS account and delivers log files to an Amazon S3 bucket. By using information collected by AWS CloudTrail, you can determine which requests were successfully made to Organizations, who made the request, when it was made, and so on. For more about AWS Organizations and its support for AWS CloudTrail, see Logging AWS Organizations Events with AWS CloudTrail in the AWS Organizations User Guide. To learn more about CloudTrail, including how to turn it on and find your log files, see the AWS CloudTrail User Guide.

" + "documentation":"AWS Organizations API Reference

AWS Organizations is a web service that enables you to consolidate your multiple AWS accounts into an organization and centrally manage your accounts and their resources.

This guide provides descriptions of the Organizations API. For more information about using this service, see the AWS Organizations User Guide.

API Version

This version of the Organizations API Reference documents the Organizations API version 2016-11-28.

As an alternative to using the API directly, you can use one of the AWS SDKs, which consist of libraries and sample code for various programming languages and platforms (Java, Ruby, .NET, iOS, Android, and more). The SDKs provide a convenient way to create programmatic access to AWS Organizations. For example, the SDKs take care of cryptographically signing requests, managing errors, and retrying requests automatically. For more information about the AWS SDKs, including how to download and install them, see Tools for Amazon Web Services.

We recommend that you use the AWS SDKs to make programmatic API calls to Organizations. However, you also can use the Organizations Query API to make direct calls to the Organizations web service. To learn more about the Organizations Query API, see Making Query Requests in the AWS Organizations User Guide. Organizations supports GET and POST requests for all actions. That is, the API does not require you to use GET for some actions and POST for others. However, GET requests are subject to the limitation size of a URL. Therefore, for operations that require larger sizes, use a POST request.

Signing Requests

When you send HTTP requests to AWS, you must sign the requests so that AWS can identify who sent them. You sign requests with your AWS access key, which consists of an access key ID and a secret access key. We strongly recommend that you do not create an access key for your root account. Anyone who has the access key for your root account has unrestricted access to all the resources in your account. Instead, create an access key for an IAM user account that has administrative privileges. As another option, use AWS Security Token Service to generate temporary security credentials, and use those credentials to sign requests.

To sign requests, we recommend that you use Signature Version 4. If you have an existing application that uses Signature Version 2, you do not have to update it to use Signature Version 4. However, some operations now require Signature Version 4. The documentation for operations that require version 4 indicate this requirement.

When you use the AWS Command Line Interface (AWS CLI) or one of the AWS SDKs to make requests to AWS, these tools automatically sign the requests for you with the access key that you specify when you configure the tools.

In this release, each organization can have only one root. In a future release, a single organization will support multiple roots.

Support and Feedback for AWS Organizations

We welcome your feedback. Send your comments to feedback-awsorganizations@amazon.com or post your feedback and questions in the AWS Organizations support forum. For more information about the AWS support forums, see Forums Help.

Endpoint to Call When Using the CLI or the AWS API

For the current release of Organizations, you must specify the us-east-1 region for all AWS API and CLI calls. You can do this in the CLI by using these parameters and commands:

  • Use the following parameter with each command to specify both the endpoint and its region:

    --endpoint-url https://organizations.us-east-1.amazonaws.com

  • Use the default endpoint, but configure your default region with this command:

    aws configure set default.region us-east-1

  • Use the following parameter with each command to specify the endpoint:

    --region us-east-1

For the various SDKs used to call the APIs, see the documentation for the SDK of interest to learn how to direct the requests to a specific endpoint. For more information, see Regions and Endpoints in the AWS General Reference.

How examples are presented

The JSON returned by the AWS Organizations service as response to your requests is returned as a single long string without line breaks or formatting whitespace. Both line breaks and whitespace are included in the examples in this guide to improve readability. When example input parameters also would result in long strings that would extend beyond the screen, we insert line breaks to enhance readability. You should always submit the input as a single JSON text string.

Recording API Requests

AWS Organizations supports AWS CloudTrail, a service that records AWS API calls for your AWS account and delivers log files to an Amazon S3 bucket. By using information collected by AWS CloudTrail, you can determine which requests were successfully made to Organizations, who made the request, when it was made, and so on. For more about AWS Organizations and its support for AWS CloudTrail, see Logging AWS Organizations Events with AWS CloudTrail in the AWS Organizations User Guide. To learn more about CloudTrail, including how to turn it on and find your log files, see the AWS CloudTrail User Guide.

" } diff --git a/botocore/data/pi/2018-02-27/paginators-1.json b/botocore/data/pi/2018-02-27/paginators-1.json new file mode 100644 index 00000000..ea142457 --- /dev/null +++ b/botocore/data/pi/2018-02-27/paginators-1.json @@ -0,0 +1,3 @@ +{ + "pagination": {} +} diff --git a/botocore/data/pi/2018-02-27/service-2.json b/botocore/data/pi/2018-02-27/service-2.json new file mode 100644 index 00000000..3253917f --- /dev/null +++ b/botocore/data/pi/2018-02-27/service-2.json @@ -0,0 +1,401 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-02-27", + "endpointPrefix":"pi", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"AWS PI", + "serviceFullName":"AWS Performance Insights", + "serviceId":"PI", + "signatureVersion":"v4", + "signingName":"pi", + "targetPrefix":"PerformanceInsightsv20180227", + "uid":"pi-2018-02-27" + }, + "operations":{ + "DescribeDimensionKeys":{ + "name":"DescribeDimensionKeys", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDimensionKeysRequest"}, + "output":{"shape":"DescribeDimensionKeysResponse"}, + "errors":[ + {"shape":"InvalidArgumentException"}, + {"shape":"InternalServiceError"}, + {"shape":"NotAuthorizedException"} + ], + "documentation":"

For a specific time period, retrieve the top N dimension keys for a metric.

" + }, + "GetResourceMetrics":{ + "name":"GetResourceMetrics", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetResourceMetricsRequest"}, + "output":{"shape":"GetResourceMetricsResponse"}, + "errors":[ + {"shape":"InvalidArgumentException"}, + {"shape":"InternalServiceError"}, + {"shape":"NotAuthorizedException"} + ], + "documentation":"

Retrieve Performance Insights metrics for a set of data sources, over a time period. You can provide specific dimension groups and dimensions, and provide aggregation and filtering criteria for each group.

" + } + }, + "shapes":{ + "DataPoint":{ + "type":"structure", + "required":[ + "Timestamp", + "Value" + ], + "members":{ + "Timestamp":{ + "shape":"ISOTimestamp", + "documentation":"

The time, in epoch format, associated with a particular Value.

" + }, + "Value":{ + "shape":"Double", + "documentation":"

The actual value associated with a particular Timestamp.

" + } + }, + "documentation":"

A timestamp, and a single numerical value, which together represent a measurement at a particular point in time.

" + }, + "DataPointsList":{ + "type":"list", + "member":{"shape":"DataPoint"} + }, + "DescribeDimensionKeysRequest":{ + "type":"structure", + "required":[ + "ServiceType", + "Identifier", + "StartTime", + "EndTime", + "Metric", + "GroupBy" + ], + "members":{ + "ServiceType":{ + "shape":"ServiceType", + "documentation":"

The AWS service for which Performance Insights will return metrics. The only valid value for ServiceType is: RDS

" + }, + "Identifier":{ + "shape":"String", + "documentation":"

An immutable, AWS Region-unique identifier for a data source. Performance Insights gathers metrics from this data source.

To use an Amazon RDS instance as a data source, you specify its DbiResourceId value - for example: db-FAIHNTYBKTGAUSUZQYPDS2GW4A

" + }, + "StartTime":{ + "shape":"ISOTimestamp", + "documentation":"

The date and time specifying the beginning of the requested time series data. You can't specify a StartTime that's earlier than 7 days ago. The value specified is inclusive - data points equal to or greater than StartTime will be returned.

The value for StartTime must be earlier than the value for EndTime.

" + }, + "EndTime":{ + "shape":"ISOTimestamp", + "documentation":"

The date and time specifying the end of the requested time series data. The value specified is exclusive - data points less than (but not equal to) EndTime will be returned.

The value for EndTime must be later than the value for StartTime.

" + }, + "Metric":{ + "shape":"String", + "documentation":"

The name of a Performance Insights metric to be measured.

Valid values for Metric are:

  • db.load.avg - a scaled representation of the number of active sessions for the database engine.

  • db.sampledload.avg - the raw number of active sessions for the database engine.

" + }, + "PeriodInSeconds":{ + "shape":"Integer", + "documentation":"

The granularity, in seconds, of the data points returned from Performance Insights. A period can be as short as one second, or as long as one day (86400 seconds). Valid values are:

  • 1 (one second)

  • 60 (one minute)

  • 300 (five minutes)

  • 3600 (one hour)

  • 86400 (twenty-four hours)

If you don't specify PeriodInSeconds, then Performance Insights will choose a value for you, with a goal of returning roughly 100-200 data points in the response.

" + }, + "GroupBy":{ + "shape":"DimensionGroup", + "documentation":"

A specification for how to aggregate the data points from a query result. You must specify a valid dimension group. Performance Insights will return all of the dimensions within that group, unless you provide the names of specific dimensions within that group. You can also request that Performance Insights return a limited number of values for a dimension.

" + }, + "PartitionBy":{ + "shape":"DimensionGroup", + "documentation":"

For each dimension specified in GroupBy, specify a secondary dimension to further subdivide the partition keys in the response.

" + }, + "Filter":{ + "shape":"MetricQueryFilterMap", + "documentation":"

One or more filters to apply in the request. Restrictions:

  • Any number of filters by the same dimension, as specified in the GroupBy or Partition parameters.

  • A single filter for any other dimension in this dimension group.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of items to return in the response. If more items exist than the specified MaxRecords value, a pagination token is included in the response so that the remaining results can be retrieved.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the token, up to the value specified by MaxRecords.

" + } + } + }, + "DescribeDimensionKeysResponse":{ + "type":"structure", + "members":{ + "AlignedStartTime":{ + "shape":"ISOTimestamp", + "documentation":"

The start time for the returned dimension keys, after alignment to a granular boundary (as specified by PeriodInSeconds). AlignedStartTime will be less than or equal to the value of the user-specified StartTime.

" + }, + "AlignedEndTime":{ + "shape":"ISOTimestamp", + "documentation":"

The end time for the returned dimension keys, after alignment to a granular boundary (as specified by PeriodInSeconds). AlignedEndTime will be greater than or equal to the value of the user-specified Endtime.

" + }, + "PartitionKeys":{ + "shape":"ResponsePartitionKeyList", + "documentation":"

If PartitionBy was present in the request, PartitionKeys contains the breakdown of dimension keys by the specified partitions.

" + }, + "Keys":{ + "shape":"DimensionKeyDescriptionList", + "documentation":"

The dimension keys that were requested.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the token, up to the value specified by MaxRecords.

" + } + } + }, + "DimensionGroup":{ + "type":"structure", + "required":["Group"], + "members":{ + "Group":{ + "shape":"String", + "documentation":"

The name of the dimension group. Valid values are:

  • db.user

  • db.host

  • db.sql

  • db.sql_tokenized

  • db.wait_event

  • db.wait_event_type

" + }, + "Dimensions":{ + "shape":"StringList", + "documentation":"

A list of specific dimensions from a dimension group. If this parameter is not present, then it signifies that all of the dimensions in the group were requested, or are present in the response.

Valid values for elements in the Dimensions array are:

  • db.user.id

  • db.user.name

  • db.host.id

  • db.host.name

  • db.sql.id

  • db.sql.db_id

  • db.sql.statement

  • db.sql.tokenized_id

  • db.sql_tokenized.id

  • db.sql_tokenized.db_id

  • db.sql_tokenized.statement

  • db.wait_event.name

  • db.wait_event.type

  • db.wait_event_type.name

" + }, + "Limit":{ + "shape":"Limit", + "documentation":"

The maximum number of items to fetch for this dimension group.

" + } + }, + "documentation":"

A logical grouping of Performance Insights metrics for a related subject area. For example, the db.sql dimension group consists of the following dimensions: db.sql.id, db.sql.db_id, db.sql.statement, and db.sql.tokenized_id.

" + }, + "DimensionKeyDescription":{ + "type":"structure", + "members":{ + "Dimensions":{ + "shape":"DimensionMap", + "documentation":"

A map of name-value pairs for the dimensions in the group.

" + }, + "Total":{ + "shape":"Double", + "documentation":"

The aggregated metric value for the dimension(s), over the requested time range.

" + }, + "Partitions":{ + "shape":"MetricValuesList", + "documentation":"

If PartitionBy was specified, PartitionKeys contains the dimensions that were.

" + } + }, + "documentation":"

An array of descriptions and aggregated values for each dimension within a dimension group.

" + }, + "DimensionKeyDescriptionList":{ + "type":"list", + "member":{"shape":"DimensionKeyDescription"} + }, + "DimensionMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "Double":{"type":"double"}, + "GetResourceMetricsRequest":{ + "type":"structure", + "required":[ + "ServiceType", + "Identifier", + "MetricQueries", + "StartTime", + "EndTime" + ], + "members":{ + "ServiceType":{ + "shape":"ServiceType", + "documentation":"

The AWS service for which Performance Insights will return metrics. The only valid value for ServiceType is: RDS

" + }, + "Identifier":{ + "shape":"String", + "documentation":"

An immutable, AWS Region-unique identifier for a data source. Performance Insights gathers metrics from this data source.

To use an Amazon RDS instance as a data source, you specify its DbiResourceId value - for example: db-FAIHNTYBKTGAUSUZQYPDS2GW4A

" + }, + "MetricQueries":{ + "shape":"MetricQueryList", + "documentation":"

An array of one or more queries to perform. Each query must specify a Performance Insights metric, and can optionally specify aggregation and filtering criteria.

" + }, + "StartTime":{ + "shape":"ISOTimestamp", + "documentation":"

The date and time specifying the beginning of the requested time series data. You can't specify a StartTime that's earlier than 7 days ago. The value specified is inclusive - data points equal to or greater than StartTime will be returned.

The value for StartTime must be earlier than the value for EndTime.

" + }, + "EndTime":{ + "shape":"ISOTimestamp", + "documentation":"

The date and time specifiying the end of the requested time series data. The value specified is exclusive - data points less than (but not equal to) EndTime will be returned.

The value for EndTime must be later than the value for StartTime.

" + }, + "PeriodInSeconds":{ + "shape":"Integer", + "documentation":"

The granularity, in seconds, of the data points returned from Performance Insights. A period can be as short as one second, or as long as one day (86400 seconds). Valid values are:

  • 1 (one second)

  • 60 (one minute)

  • 300 (five minutes)

  • 3600 (one hour)

  • 86400 (twenty-four hours)

If you don't specify PeriodInSeconds, then Performance Insights will choose a value for you, with a goal of returning roughly 100-200 data points in the response.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of items to return in the response. If more items exist than the specified MaxRecords value, a pagination token is included in the response so that the remaining results can be retrieved.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the token, up to the value specified by MaxRecords.

" + } + } + }, + "GetResourceMetricsResponse":{ + "type":"structure", + "members":{ + "AlignedStartTime":{ + "shape":"ISOTimestamp", + "documentation":"

The start time for the returned metrics, after alignment to a granular boundary (as specified by PeriodInSeconds). AlignedStartTime will be less than or equal to the value of the user-specified StartTime.

" + }, + "AlignedEndTime":{ + "shape":"ISOTimestamp", + "documentation":"

The end time for the returned metrics, after alignment to a granular boundary (as specified by PeriodInSeconds). AlignedEndTime will be greater than or equal to the value of the user-specified Endtime.

" + }, + "Identifier":{ + "shape":"String", + "documentation":"

An immutable, AWS Region-unique identifier for a data source. Performance Insights gathers metrics from this data source.

To use an Amazon RDS instance as a data source, you specify its DbiResourceId value - for example: db-FAIHNTYBKTGAUSUZQYPDS2GW4A

" + }, + "MetricList":{ + "shape":"MetricKeyDataPointsList", + "documentation":"

An array of metric results,, where each array element contains all of the data points for a particular dimension.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the token, up to the value specified by MaxRecords.

" + } + } + }, + "ISOTimestamp":{"type":"timestamp"}, + "Integer":{"type":"integer"}, + "InternalServiceError":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The request failed due to an unknown error.

", + "exception":true, + "fault":true + }, + "InvalidArgumentException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

One of the arguments provided is invalid for this request.

", + "exception":true + }, + "Limit":{ + "type":"integer", + "max":10, + "min":1 + }, + "MaxResults":{ + "type":"integer", + "max":20, + "min":0 + }, + "MetricKeyDataPoints":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"ResponseResourceMetricKey", + "documentation":"

The dimension(s) to which the data points apply.

" + }, + "DataPoints":{ + "shape":"DataPointsList", + "documentation":"

An array of timestamp-value pairs, representing measurements over a period of time.

" + } + }, + "documentation":"

A time-ordered series of data points, correpsonding to a dimension of a Performance Insights metric.

" + }, + "MetricKeyDataPointsList":{ + "type":"list", + "member":{"shape":"MetricKeyDataPoints"} + }, + "MetricQuery":{ + "type":"structure", + "required":["Metric"], + "members":{ + "Metric":{ + "shape":"String", + "documentation":"

The name of a Performance Insights metric to be measured.

Valid values for Metric are:

  • db.load.avg - a scaled representation of the number of active sessions for the database engine.

  • db.sampledload.avg - the raw number of active sessions for the database engine.

" + }, + "GroupBy":{ + "shape":"DimensionGroup", + "documentation":"

A specification for how to aggregate the data points from a query result. You must specify a valid dimension group. Performance Insights will return all of the dimensions within that group, unless you provide the names of specific dimensions within that group. You can also request that Performance Insights return a limited number of values for a dimension.

" + }, + "Filter":{ + "shape":"MetricQueryFilterMap", + "documentation":"

One or more filters to apply in the request. Restrictions:

  • Any number of filters by the same dimension, as specified in the GroupBy parameter.

  • A single filter for any other dimension in this dimension group.

" + } + }, + "documentation":"

A single query to be processed. You must provide the metric to query. If no other parameters are specified, Performance Insights returns all of the data points for that metric. You can optionally request that the data points be aggregated by dimension group ( GroupBy), and return only those data points that match your criteria (Filter).

" + }, + "MetricQueryFilterMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "MetricQueryList":{ + "type":"list", + "member":{"shape":"MetricQuery"}, + "max":15, + "min":1 + }, + "MetricValuesList":{ + "type":"list", + "member":{"shape":"Double"} + }, + "NotAuthorizedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The user is not authorized to perform this request.

", + "exception":true + }, + "ResponsePartitionKey":{ + "type":"structure", + "required":["Dimensions"], + "members":{ + "Dimensions":{ + "shape":"DimensionMap", + "documentation":"

A dimension map that contains the dimension(s) for this partition.

" + } + }, + "documentation":"

If PartitionBy was specified in a DescribeDimensionKeys request, the dimensions are returned in an array. Each element in the array specifies one dimension.

" + }, + "ResponsePartitionKeyList":{ + "type":"list", + "member":{"shape":"ResponsePartitionKey"} + }, + "ResponseResourceMetricKey":{ + "type":"structure", + "required":["Metric"], + "members":{ + "Metric":{ + "shape":"String", + "documentation":"

The name of a Performance Insights metric to be measured.

Valid values for Metric are:

  • db.load.avg - a scaled representation of the number of active sessions for the database engine.

  • db.sampledload.avg - the raw number of active sessions for the database engine.

" + }, + "Dimensions":{ + "shape":"DimensionMap", + "documentation":"

The valid dimensions for the metric.

" + } + }, + "documentation":"

An object describing a Performance Insights metric and one or more dimensions for that metric.

" + }, + "ServiceType":{ + "type":"string", + "enum":["RDS"] + }, + "String":{"type":"string"}, + "StringList":{ + "type":"list", + "member":{"shape":"String"}, + "max":10, + "min":1 + } + }, + "documentation":"

AWS Performance Insights enables you to monitor and explore different dimensions of database load based on data captured from a running RDS instance. The guide provides detailed information about Performance Insights data types, parameters and errors. For more information about Performance Insights capabilities see Using Amazon RDS Performance Insights in the Amazon RDS User Guide.

The AWS Performance Insights API provides visibility into the performance of your RDS instance, when Performance Insights is enabled for supported engine types. While Amazon CloudWatch provides the authoritative source for AWS service vended monitoring metrics, AWS Performance Insights offers a domain-specific view of database load measured as Average Active Sessions and provided to API consumers as a 2-dimensional time-series dataset. The time dimension of the data provides DB load data for each time point in the queried time range, and each time point decomposes overall load in relation to the requested dimensions, such as SQL, Wait-event, User or Host, measured at that time point.

" +} diff --git a/botocore/data/pinpoint/2016-12-01/service-2.json b/botocore/data/pinpoint/2016-12-01/service-2.json index 8975aabf..1ac19d12 100644 --- a/botocore/data/pinpoint/2016-12-01/service-2.json +++ b/botocore/data/pinpoint/2016-12-01/service-2.json @@ -4,11 +4,13 @@ "endpointPrefix" : "pinpoint", "signingName" : "mobiletargeting", "serviceFullName" : "Amazon Pinpoint", + "serviceId" : "Pinpoint", "protocol" : "rest-json", "jsonVersion" : "1.1", "uid" : "pinpoint-2016-12-01", "signatureVersion" : "v4" }, + "documentation" : "Amazon Pinpoint", "operations" : { "CreateApp" : { "name" : "CreateApp", @@ -21,20 +23,27 @@ "shape" : "CreateAppRequest" }, "output" : { - "shape" : "CreateAppResponse" + "shape" : "CreateAppResponse", + "documentation" : "201 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Creates or updates an app." }, @@ -49,20 +58,27 @@ "shape" : "CreateCampaignRequest" }, "output" : { - "shape" : "CreateCampaignResponse" + "shape" : "CreateCampaignResponse", + "documentation" : "201 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Creates or updates a campaign." }, @@ -77,20 +93,27 @@ "shape" : "CreateExportJobRequest" }, "output" : { - "shape" : "CreateExportJobResponse" + "shape" : "CreateExportJobResponse", + "documentation" : "202 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Creates an export job." }, @@ -105,20 +128,27 @@ "shape" : "CreateImportJobRequest" }, "output" : { - "shape" : "CreateImportJobResponse" + "shape" : "CreateImportJobResponse", + "documentation" : "201 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Creates or updates an import job." }, @@ -133,20 +163,27 @@ "shape" : "CreateSegmentRequest" }, "output" : { - "shape" : "CreateSegmentResponse" + "shape" : "CreateSegmentResponse", + "documentation" : "201 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Used to create or update a segment." }, @@ -161,22 +198,29 @@ "shape" : "DeleteAdmChannelRequest" }, "output" : { - "shape" : "DeleteAdmChannelResponse" + "shape" : "DeleteAdmChannelResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], - "documentation" : "Delete an ADM channel" + "documentation" : "Delete an ADM channel." }, "DeleteApnsChannel" : { "name" : "DeleteApnsChannel", @@ -189,20 +233,27 @@ "shape" : "DeleteApnsChannelRequest" }, "output" : { - "shape" : "DeleteApnsChannelResponse" + "shape" : "DeleteApnsChannelResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Deletes the APNs channel for an app." }, @@ -217,22 +268,29 @@ "shape" : "DeleteApnsSandboxChannelRequest" }, "output" : { - "shape" : "DeleteApnsSandboxChannelResponse" + "shape" : "DeleteApnsSandboxChannelResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], - "documentation" : "Delete an APNS sandbox channel" + "documentation" : "Delete an APNS sandbox channel." }, "DeleteApnsVoipChannel" : { "name" : "DeleteApnsVoipChannel", @@ -245,20 +303,27 @@ "shape" : "DeleteApnsVoipChannelRequest" }, "output" : { - "shape" : "DeleteApnsVoipChannelResponse" + "shape" : "DeleteApnsVoipChannelResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Delete an APNS VoIP channel" }, @@ -273,20 +338,27 @@ "shape" : "DeleteApnsVoipSandboxChannelRequest" }, "output" : { - "shape" : "DeleteApnsVoipSandboxChannelResponse" + "shape" : "DeleteApnsVoipSandboxChannelResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Delete an APNS VoIP sandbox channel" }, @@ -301,20 +373,27 @@ "shape" : "DeleteAppRequest" }, "output" : { - "shape" : "DeleteAppResponse" + "shape" : "DeleteAppResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Deletes an app." }, @@ -329,20 +408,27 @@ "shape" : "DeleteBaiduChannelRequest" }, "output" : { - "shape" : "DeleteBaiduChannelResponse" + "shape" : "DeleteBaiduChannelResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Delete a BAIDU GCM channel" }, @@ -357,20 +443,27 @@ "shape" : "DeleteCampaignRequest" }, "output" : { - "shape" : "DeleteCampaignResponse" + "shape" : "DeleteCampaignResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Deletes a campaign." }, @@ -385,22 +478,29 @@ "shape" : "DeleteEmailChannelRequest" }, "output" : { - "shape" : "DeleteEmailChannelResponse" + "shape" : "DeleteEmailChannelResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], - "documentation" : "Delete an email channel" + "documentation" : "Delete an email channel." }, "DeleteEndpoint" : { "name" : "DeleteEndpoint", @@ -448,20 +548,27 @@ "shape" : "DeleteEventStreamRequest" }, "output" : { - "shape" : "DeleteEventStreamResponse" + "shape" : "DeleteEventStreamResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Deletes the event stream for an app." }, @@ -476,20 +583,27 @@ "shape" : "DeleteGcmChannelRequest" }, "output" : { - "shape" : "DeleteGcmChannelResponse" + "shape" : "DeleteGcmChannelResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Deletes the GCM channel for an app." }, @@ -504,20 +618,27 @@ "shape" : "DeleteSegmentRequest" }, "output" : { - "shape" : "DeleteSegmentResponse" + "shape" : "DeleteSegmentResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Deletes a segment." }, @@ -532,22 +653,64 @@ "shape" : "DeleteSmsChannelRequest" }, "output" : { - "shape" : "DeleteSmsChannelResponse" + "shape" : "DeleteSmsChannelResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], - "documentation" : "Delete an SMS channel" + "documentation" : "Delete an SMS channel." + }, + "DeleteUserEndpoints" : { + "name" : "DeleteUserEndpoints", + "http" : { + "method" : "DELETE", + "requestUri" : "/v1/apps/{application-id}/users/{user-id}", + "responseCode" : 202 + }, + "input" : { + "shape" : "DeleteUserEndpointsRequest" + }, + "output" : { + "shape" : "DeleteUserEndpointsResponse", + "documentation" : "202 response" + }, + "errors" : [ { + "shape" : "BadRequestException", + "documentation" : "400 response" + }, { + "shape" : "InternalServerErrorException", + "documentation" : "500 response" + }, { + "shape" : "ForbiddenException", + "documentation" : "403 response" + }, { + "shape" : "NotFoundException", + "documentation" : "404 response" + }, { + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" + }, { + "shape" : "TooManyRequestsException", + "documentation" : "429 response" + } ], + "documentation" : "Deletes endpoints associated with an user id." }, "GetAdmChannel" : { "name" : "GetAdmChannel", @@ -560,22 +723,29 @@ "shape" : "GetAdmChannelRequest" }, "output" : { - "shape" : "GetAdmChannelResponse" + "shape" : "GetAdmChannelResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], - "documentation" : "Get an ADM channel" + "documentation" : "Get an ADM channel." }, "GetApnsChannel" : { "name" : "GetApnsChannel", @@ -588,20 +758,27 @@ "shape" : "GetApnsChannelRequest" }, "output" : { - "shape" : "GetApnsChannelResponse" + "shape" : "GetApnsChannelResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Returns information about the APNs channel for an app." }, @@ -616,22 +793,29 @@ "shape" : "GetApnsSandboxChannelRequest" }, "output" : { - "shape" : "GetApnsSandboxChannelResponse" + "shape" : "GetApnsSandboxChannelResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], - "documentation" : "Get an APNS sandbox channel" + "documentation" : "Get an APNS sandbox channel." }, "GetApnsVoipChannel" : { "name" : "GetApnsVoipChannel", @@ -644,20 +828,27 @@ "shape" : "GetApnsVoipChannelRequest" }, "output" : { - "shape" : "GetApnsVoipChannelResponse" + "shape" : "GetApnsVoipChannelResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Get an APNS VoIP channel" }, @@ -672,20 +863,27 @@ "shape" : "GetApnsVoipSandboxChannelRequest" }, "output" : { - "shape" : "GetApnsVoipSandboxChannelResponse" + "shape" : "GetApnsVoipSandboxChannelResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Get an APNS VoIPSandbox channel" }, @@ -700,20 +898,27 @@ "shape" : "GetAppRequest" }, "output" : { - "shape" : "GetAppResponse" + "shape" : "GetAppResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Returns information about an app." }, @@ -728,20 +933,27 @@ "shape" : "GetApplicationSettingsRequest" }, "output" : { - "shape" : "GetApplicationSettingsResponse" + "shape" : "GetApplicationSettingsResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Used to request the settings for an app." }, @@ -756,20 +968,27 @@ "shape" : "GetAppsRequest" }, "output" : { - "shape" : "GetAppsResponse" + "shape" : "GetAppsResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Returns information about your apps." }, @@ -784,20 +1003,27 @@ "shape" : "GetBaiduChannelRequest" }, "output" : { - "shape" : "GetBaiduChannelResponse" + "shape" : "GetBaiduChannelResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Get a BAIDU GCM channel" }, @@ -812,20 +1038,27 @@ "shape" : "GetCampaignRequest" }, "output" : { - "shape" : "GetCampaignResponse" + "shape" : "GetCampaignResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Returns information about a campaign." }, @@ -840,20 +1073,27 @@ "shape" : "GetCampaignActivitiesRequest" }, "output" : { - "shape" : "GetCampaignActivitiesResponse" + "shape" : "GetCampaignActivitiesResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Returns information about the activity performed by a campaign." }, @@ -868,20 +1108,27 @@ "shape" : "GetCampaignVersionRequest" }, "output" : { - "shape" : "GetCampaignVersionResponse" + "shape" : "GetCampaignVersionResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Returns information about a specific version of a campaign." }, @@ -896,20 +1143,27 @@ "shape" : "GetCampaignVersionsRequest" }, "output" : { - "shape" : "GetCampaignVersionsResponse" + "shape" : "GetCampaignVersionsResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Returns information about your campaign versions." }, @@ -924,23 +1178,65 @@ "shape" : "GetCampaignsRequest" }, "output" : { - "shape" : "GetCampaignsResponse" + "shape" : "GetCampaignsResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Returns information about your campaigns." }, + "GetChannels" : { + "name" : "GetChannels", + "http" : { + "method" : "GET", + "requestUri" : "/v1/apps/{application-id}/channels", + "responseCode" : 200 + }, + "input" : { + "shape" : "GetChannelsRequest" + }, + "output" : { + "shape" : "GetChannelsResponse", + "documentation" : "200 response" + }, + "errors" : [ { + "shape" : "BadRequestException", + "documentation" : "400 response" + }, { + "shape" : "InternalServerErrorException", + "documentation" : "500 response" + }, { + "shape" : "ForbiddenException", + "documentation" : "403 response" + }, { + "shape" : "NotFoundException", + "documentation" : "404 response" + }, { + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" + }, { + "shape" : "TooManyRequestsException", + "documentation" : "429 response" + } ], + "documentation" : "Get all channels." + }, "GetEmailChannel" : { "name" : "GetEmailChannel", "http" : { @@ -952,22 +1248,29 @@ "shape" : "GetEmailChannelRequest" }, "output" : { - "shape" : "GetEmailChannelResponse" + "shape" : "GetEmailChannelResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], - "documentation" : "Get an email channel" + "documentation" : "Get an email channel." }, "GetEndpoint" : { "name" : "GetEndpoint", @@ -980,20 +1283,27 @@ "shape" : "GetEndpointRequest" }, "output" : { - "shape" : "GetEndpointResponse" + "shape" : "GetEndpointResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Returns information about an endpoint." }, @@ -1008,20 +1318,27 @@ "shape" : "GetEventStreamRequest" }, "output" : { - "shape" : "GetEventStreamResponse" + "shape" : "GetEventStreamResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Returns the event stream for an app." }, @@ -1036,20 +1353,27 @@ "shape" : "GetExportJobRequest" }, "output" : { - "shape" : "GetExportJobResponse" + "shape" : "GetExportJobResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Returns information about an export job." }, @@ -1064,20 +1388,27 @@ "shape" : "GetExportJobsRequest" }, "output" : { - "shape" : "GetExportJobsResponse" + "shape" : "GetExportJobsResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Returns information about your export jobs." }, @@ -1092,20 +1423,27 @@ "shape" : "GetGcmChannelRequest" }, "output" : { - "shape" : "GetGcmChannelResponse" + "shape" : "GetGcmChannelResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Returns information about the GCM channel for an app." }, @@ -1120,20 +1458,27 @@ "shape" : "GetImportJobRequest" }, "output" : { - "shape" : "GetImportJobResponse" + "shape" : "GetImportJobResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Returns information about an import job." }, @@ -1148,20 +1493,27 @@ "shape" : "GetImportJobsRequest" }, "output" : { - "shape" : "GetImportJobsResponse" + "shape" : "GetImportJobsResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Returns information about your import jobs." }, @@ -1176,20 +1528,27 @@ "shape" : "GetSegmentRequest" }, "output" : { - "shape" : "GetSegmentResponse" + "shape" : "GetSegmentResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Returns information about a segment." }, @@ -1204,20 +1563,27 @@ "shape" : "GetSegmentExportJobsRequest" }, "output" : { - "shape" : "GetSegmentExportJobsResponse" + "shape" : "GetSegmentExportJobsResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Returns a list of export jobs for a specific segment." }, @@ -1232,20 +1598,27 @@ "shape" : "GetSegmentImportJobsRequest" }, "output" : { - "shape" : "GetSegmentImportJobsResponse" + "shape" : "GetSegmentImportJobsResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Returns a list of import jobs for a specific segment." }, @@ -1260,20 +1633,27 @@ "shape" : "GetSegmentVersionRequest" }, "output" : { - "shape" : "GetSegmentVersionResponse" + "shape" : "GetSegmentVersionResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Returns information about a segment version." }, @@ -1288,20 +1668,27 @@ "shape" : "GetSegmentVersionsRequest" }, "output" : { - "shape" : "GetSegmentVersionsResponse" + "shape" : "GetSegmentVersionsResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Returns information about your segment versions." }, @@ -1316,20 +1703,27 @@ "shape" : "GetSegmentsRequest" }, "output" : { - "shape" : "GetSegmentsResponse" + "shape" : "GetSegmentsResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Used to get information about your segments." }, @@ -1344,22 +1738,99 @@ "shape" : "GetSmsChannelRequest" }, "output" : { - "shape" : "GetSmsChannelResponse" + "shape" : "GetSmsChannelResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], - "documentation" : "Get an SMS channel" + "documentation" : "Get an SMS channel." + }, + "GetUserEndpoints" : { + "name" : "GetUserEndpoints", + "http" : { + "method" : "GET", + "requestUri" : "/v1/apps/{application-id}/users/{user-id}", + "responseCode" : 200 + }, + "input" : { + "shape" : "GetUserEndpointsRequest" + }, + "output" : { + "shape" : "GetUserEndpointsResponse", + "documentation" : "200 response" + }, + "errors" : [ { + "shape" : "BadRequestException", + "documentation" : "400 response" + }, { + "shape" : "InternalServerErrorException", + "documentation" : "500 response" + }, { + "shape" : "ForbiddenException", + "documentation" : "403 response" + }, { + "shape" : "NotFoundException", + "documentation" : "404 response" + }, { + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" + }, { + "shape" : "TooManyRequestsException", + "documentation" : "429 response" + } ], + "documentation" : "Returns information about the endpoints associated with an user id." + }, + "PhoneNumberValidate" : { + "name" : "PhoneNumberValidate", + "http" : { + "method" : "POST", + "requestUri" : "/v1/phone/number/validate", + "responseCode" : 200 + }, + "input" : { + "shape" : "PhoneNumberValidateRequest" + }, + "output" : { + "shape" : "PhoneNumberValidateResponse", + "documentation" : "200 response" + }, + "errors" : [ { + "shape" : "BadRequestException", + "documentation" : "400 response" + }, { + "shape" : "InternalServerErrorException", + "documentation" : "500 response" + }, { + "shape" : "ForbiddenException", + "documentation" : "403 response" + }, { + "shape" : "NotFoundException", + "documentation" : "404 response" + }, { + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" + }, { + "shape" : "TooManyRequestsException", + "documentation" : "429 response" + } ], + "documentation" : "Returns information about the specified phone number." }, "PutEventStream" : { "name" : "PutEventStream", @@ -1372,23 +1843,65 @@ "shape" : "PutEventStreamRequest" }, "output" : { - "shape" : "PutEventStreamResponse" + "shape" : "PutEventStreamResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Use to create or update the event stream for an app." }, + "RemoveAttributes" : { + "name" : "RemoveAttributes", + "http" : { + "method" : "PUT", + "requestUri" : "/v1/apps/{application-id}/attributes/{attribute-type}", + "responseCode" : 200 + }, + "input" : { + "shape" : "RemoveAttributesRequest" + }, + "output" : { + "shape" : "RemoveAttributesResponse", + "documentation" : "200 response" + }, + "errors" : [ { + "shape" : "BadRequestException", + "documentation" : "400 response" + }, { + "shape" : "InternalServerErrorException", + "documentation" : "500 response" + }, { + "shape" : "ForbiddenException", + "documentation" : "403 response" + }, { + "shape" : "NotFoundException", + "documentation" : "404 response" + }, { + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" + }, { + "shape" : "TooManyRequestsException", + "documentation" : "429 response" + } ], + "documentation" : "Used to remove the attributes for an app" + }, "SendMessages" : { "name" : "SendMessages", "http" : { @@ -1400,22 +1913,29 @@ "shape" : "SendMessagesRequest" }, "output" : { - "shape" : "SendMessagesResponse" + "shape" : "SendMessagesResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], - "documentation" : "Send a batch of messages" + "documentation" : "Use this resource to send a direct message, which is a one time message that you send to a limited audience without creating a campaign. \n\nYou can send the message to up to 100 recipients. You cannot use the message to engage a segment. When you send the message, Amazon Pinpoint delivers it immediately, and you cannot schedule the delivery. To engage a user segment, and to schedule the message delivery, create a campaign instead of sending a direct message.\n\nYou can send a direct message as a push notification to your mobile app or as an SMS message to SMS-enabled devices." }, "SendUsersMessages" : { "name" : "SendUsersMessages", @@ -1428,22 +1948,29 @@ "shape" : "SendUsersMessagesRequest" }, "output" : { - "shape" : "SendUsersMessagesResponse" + "shape" : "SendUsersMessagesResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], - "documentation" : "Send a batch of messages to users" + "documentation" : "Use this resource to message a list of users. Amazon Pinpoint sends the message to all of the endpoints that are associated with each user.\n\nA user represents an individual who is assigned a unique user ID, and this ID is assigned to one or more endpoints. For example, if an individual uses your app on multiple devices, your app could assign that person's user ID to the endpoint for each device.\n\nWith the users-messages resource, you specify the message recipients as user IDs. For each user ID, Amazon Pinpoint delivers the message to all of the user's endpoints. Within the body of your request, you can specify a default message, and you can tailor your message for different channels, including those for mobile push and SMS.\n\nWith this resource, you send a direct message, which is a one time message that you send to a limited audience without creating a campaign. You can send the message to up to 100 users per request. You cannot use the message to engage a segment. When you send the message, Amazon Pinpoint delivers it immediately, and you cannot schedule the delivery. To engage a user segment, and to schedule the message delivery, create a campaign instead of using the users-messages resource." }, "UpdateAdmChannel" : { "name" : "UpdateAdmChannel", @@ -1456,22 +1983,29 @@ "shape" : "UpdateAdmChannelRequest" }, "output" : { - "shape" : "UpdateAdmChannelResponse" + "shape" : "UpdateAdmChannelResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], - "documentation" : "Update an ADM channel" + "documentation" : "Update an ADM channel." }, "UpdateApnsChannel" : { "name" : "UpdateApnsChannel", @@ -1484,20 +2018,27 @@ "shape" : "UpdateApnsChannelRequest" }, "output" : { - "shape" : "UpdateApnsChannelResponse" + "shape" : "UpdateApnsChannelResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Use to update the APNs channel for an app." }, @@ -1512,22 +2053,29 @@ "shape" : "UpdateApnsSandboxChannelRequest" }, "output" : { - "shape" : "UpdateApnsSandboxChannelResponse" + "shape" : "UpdateApnsSandboxChannelResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], - "documentation" : "Update an APNS sandbox channel" + "documentation" : "Update an APNS sandbox channel." }, "UpdateApnsVoipChannel" : { "name" : "UpdateApnsVoipChannel", @@ -1540,20 +2088,27 @@ "shape" : "UpdateApnsVoipChannelRequest" }, "output" : { - "shape" : "UpdateApnsVoipChannelResponse" + "shape" : "UpdateApnsVoipChannelResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Update an APNS VoIP channel" }, @@ -1568,20 +2123,27 @@ "shape" : "UpdateApnsVoipSandboxChannelRequest" }, "output" : { - "shape" : "UpdateApnsVoipSandboxChannelResponse" + "shape" : "UpdateApnsVoipSandboxChannelResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Update an APNS VoIP sandbox channel" }, @@ -1596,20 +2158,27 @@ "shape" : "UpdateApplicationSettingsRequest" }, "output" : { - "shape" : "UpdateApplicationSettingsResponse" + "shape" : "UpdateApplicationSettingsResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Used to update the settings for an app." }, @@ -1624,20 +2193,27 @@ "shape" : "UpdateBaiduChannelRequest" }, "output" : { - "shape" : "UpdateBaiduChannelResponse" + "shape" : "UpdateBaiduChannelResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Update a BAIDU GCM channel" }, @@ -1652,20 +2228,27 @@ "shape" : "UpdateCampaignRequest" }, "output" : { - "shape" : "UpdateCampaignResponse" + "shape" : "UpdateCampaignResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Use to update a campaign." }, @@ -1680,22 +2263,29 @@ "shape" : "UpdateEmailChannelRequest" }, "output" : { - "shape" : "UpdateEmailChannelResponse" + "shape" : "UpdateEmailChannelResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], - "documentation" : "Update an email channel" + "documentation" : "Update an email channel." }, "UpdateEndpoint" : { "name" : "UpdateEndpoint", @@ -1708,22 +2298,29 @@ "shape" : "UpdateEndpointRequest" }, "output" : { - "shape" : "UpdateEndpointResponse" + "shape" : "UpdateEndpointResponse", + "documentation" : "202 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], - "documentation" : "Use to update an endpoint." + "documentation" : "Creates or updates an endpoint." }, "UpdateEndpointsBatch" : { "name" : "UpdateEndpointsBatch", @@ -1736,20 +2333,27 @@ "shape" : "UpdateEndpointsBatchRequest" }, "output" : { - "shape" : "UpdateEndpointsBatchResponse" + "shape" : "UpdateEndpointsBatchResponse", + "documentation" : "202 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Use to update a batch of endpoints." }, @@ -1764,20 +2368,27 @@ "shape" : "UpdateGcmChannelRequest" }, "output" : { - "shape" : "UpdateGcmChannelResponse" + "shape" : "UpdateGcmChannelResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Use to update the GCM channel for an app." }, @@ -1792,20 +2403,27 @@ "shape" : "UpdateSegmentRequest" }, "output" : { - "shape" : "UpdateSegmentResponse" + "shape" : "UpdateSegmentResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], "documentation" : "Use to update a segment." }, @@ -1820,22 +2438,29 @@ "shape" : "UpdateSmsChannelRequest" }, "output" : { - "shape" : "UpdateSmsChannelResponse" + "shape" : "UpdateSmsChannelResponse", + "documentation" : "200 response" }, "errors" : [ { - "shape" : "BadRequestException" + "shape" : "BadRequestException", + "documentation" : "400 response" }, { - "shape" : "InternalServerErrorException" + "shape" : "InternalServerErrorException", + "documentation" : "500 response" }, { - "shape" : "ForbiddenException" + "shape" : "ForbiddenException", + "documentation" : "403 response" }, { - "shape" : "NotFoundException" + "shape" : "NotFoundException", + "documentation" : "404 response" }, { - "shape" : "MethodNotAllowedException" + "shape" : "MethodNotAllowedException", + "documentation" : "405 response" }, { - "shape" : "TooManyRequestsException" + "shape" : "TooManyRequestsException", + "documentation" : "429 response" } ], - "documentation" : "Update an SMS channel" + "documentation" : "Update an SMS channel." } }, "shapes" : { @@ -1855,7 +2480,8 @@ "documentation" : "If the channel is enabled for sending messages." } }, - "documentation" : "Amazon Device Messaging channel definition." + "documentation" : "Amazon Device Messaging channel definition.", + "required" : [ ] }, "ADMChannelResponse" : { "type" : "structure", @@ -1874,7 +2500,7 @@ }, "HasCredential" : { "shape" : "__boolean", - "documentation" : "Indicates whether the channel is configured with ADM credentials. Amazon Pinpoint uses your credentials to authenticate push notifications with ADM. Provide your credentials by setting the ClientId and ClientSecret attributes." + "documentation" : "Not used. Retained for backwards compatibility." }, "Id" : { "shape" : "__string", @@ -1901,7 +2527,8 @@ "documentation" : "Version of channel" } }, - "documentation" : "Amazon Device Messaging channel definition." + "documentation" : "Amazon Device Messaging channel definition.", + "required" : [ ] }, "ADMMessage" : { "type" : "structure", @@ -2009,7 +2636,8 @@ "documentation" : "The token key used for APNs Tokens." } }, - "documentation" : "Apple Push Notification Service channel definition." + "documentation" : "Apple Push Notification Service channel definition.", + "required" : [ ] }, "APNSChannelResponse" : { "type" : "structure", @@ -2032,7 +2660,7 @@ }, "HasCredential" : { "shape" : "__boolean", - "documentation" : "Indicates whether the channel is configured with APNs credentials. Amazon Pinpoint uses your credentials to authenticate push notifications with APNs. To use APNs token authentication, set the BundleId, TeamId, TokenKey, and TokenKeyId attributes. To use certificate authentication, set the Certificate and PrivateKey attributes." + "documentation" : "Not used. Retained for backwards compatibility." }, "HasTokenKey" : { "shape" : "__boolean", @@ -2063,7 +2691,8 @@ "documentation" : "Version of channel" } }, - "documentation" : "Apple Distribution Push Notification Service channel definition." + "documentation" : "Apple Distribution Push Notification Service channel definition.", + "required" : [ ] }, "APNSMessage" : { "type" : "structure", @@ -2175,7 +2804,8 @@ "documentation" : "The token key used for APNs Tokens." } }, - "documentation" : "Apple Development Push Notification Service channel definition." + "documentation" : "Apple Development Push Notification Service channel definition.", + "required" : [ ] }, "APNSSandboxChannelResponse" : { "type" : "structure", @@ -2198,7 +2828,7 @@ }, "HasCredential" : { "shape" : "__boolean", - "documentation" : "Indicates whether the channel is configured with APNs credentials. Amazon Pinpoint uses your credentials to authenticate push notifications with APNs. To use APNs token authentication, set the BundleId, TeamId, TokenKey, and TokenKeyId attributes. To use certificate authentication, set the Certificate and PrivateKey attributes." + "documentation" : "Not used. Retained for backwards compatibility." }, "HasTokenKey" : { "shape" : "__boolean", @@ -2229,7 +2859,8 @@ "documentation" : "Version of channel" } }, - "documentation" : "Apple Development Push Notification Service channel definition." + "documentation" : "Apple Development Push Notification Service channel definition.", + "required" : [ ] }, "APNSVoipChannelRequest" : { "type" : "structure", @@ -2267,7 +2898,8 @@ "documentation" : "The token key used for APNs Tokens." } }, - "documentation" : "Apple VoIP Push Notification Service channel definition." + "documentation" : "Apple VoIP Push Notification Service channel definition.", + "required" : [ ] }, "APNSVoipChannelResponse" : { "type" : "structure", @@ -2290,7 +2922,7 @@ }, "HasCredential" : { "shape" : "__boolean", - "documentation" : "If the channel is registered with a credential for authentication." + "documentation" : "Not used. Retained for backwards compatibility." }, "HasTokenKey" : { "shape" : "__boolean", @@ -2321,7 +2953,8 @@ "documentation" : "Version of channel" } }, - "documentation" : "Apple VoIP Push Notification Service channel definition." + "documentation" : "Apple VoIP Push Notification Service channel definition.", + "required" : [ ] }, "APNSVoipSandboxChannelRequest" : { "type" : "structure", @@ -2359,7 +2992,8 @@ "documentation" : "The token key used for APNs Tokens." } }, - "documentation" : "Apple VoIP Developer Push Notification Service channel definition." + "documentation" : "Apple VoIP Developer Push Notification Service channel definition.", + "required" : [ ] }, "APNSVoipSandboxChannelResponse" : { "type" : "structure", @@ -2382,7 +3016,7 @@ }, "HasCredential" : { "shape" : "__boolean", - "documentation" : "If the channel is registered with a credential for authentication." + "documentation" : "Not used. Retained for backwards compatibility." }, "HasTokenKey" : { "shape" : "__boolean", @@ -2413,7 +3047,8 @@ "documentation" : "Version of channel" } }, - "documentation" : "Apple VoIP Developer Push Notification Service channel definition." + "documentation" : "Apple VoIP Developer Push Notification Service channel definition.", + "required" : [ ] }, "Action" : { "type" : "string", @@ -2427,7 +3062,8 @@ "documentation" : "List of campaign activities" } }, - "documentation" : "Activities for campaign." + "documentation" : "Activities for campaign.", + "required" : [ ] }, "ActivityResponse" : { "type" : "structure", @@ -2485,7 +3121,8 @@ "documentation" : "The ID of a variation of the campaign used for A/B testing." } }, - "documentation" : "Activity definition" + "documentation" : "Activity definition", + "required" : [ ] }, "AddressConfiguration" : { "type" : "structure", @@ -2529,7 +3166,8 @@ "documentation" : "The display name of the application." } }, - "documentation" : "Application Response." + "documentation" : "Application Response.", + "required" : [ ] }, "ApplicationSettingsResource" : { "type" : "structure", @@ -2555,7 +3193,8 @@ "documentation" : "The default quiet time for the app. Each campaign for this app sends no messages during this time unless the campaign overrides the default with a quiet time of its own." } }, - "documentation" : "Application settings." + "documentation" : "Application settings.", + "required" : [ ] }, "ApplicationsResponse" : { "type" : "structure", @@ -2583,18 +3222,38 @@ "documentation" : "The criteria values for the segment dimension. Endpoints with matching attribute values are included or excluded from the segment, depending on the setting for Type." } }, - "documentation" : "Custom attibute dimension" + "documentation" : "Custom attibute dimension", + "required" : [ ] }, "AttributeType" : { "type" : "string", "enum" : [ "INCLUSIVE", "EXCLUSIVE" ] }, + "AttributesResource" : { + "type" : "structure", + "members" : { + "ApplicationId" : { + "shape" : "__string", + "documentation" : "The unique ID for the application." + }, + "AttributeType" : { + "shape" : "__string", + "documentation" : "The attribute type for the application." + }, + "Attributes" : { + "shape" : "ListOf__string", + "documentation" : "The attributes for the application." + } + }, + "documentation" : "Attributes.", + "required" : [ ] + }, "BadRequestException" : { "type" : "structure", "members" : { "Message" : { "shape" : "__string", - "documentation" : "The error message returned from the API." + "documentation" : "The error message that's returned from the API." }, "RequestID" : { "shape" : "__string", @@ -2623,7 +3282,8 @@ "documentation" : "Platform credential Secret key from Baidu." } }, - "documentation" : "Baidu Cloud Push credentials" + "documentation" : "Baidu Cloud Push credentials", + "required" : [ ] }, "BaiduChannelResponse" : { "type" : "structure", @@ -2646,7 +3306,7 @@ }, "HasCredential" : { "shape" : "__boolean", - "documentation" : "Indicates whether the channel is configured with Baidu Cloud Push credentials. Amazon Pinpoint uses your credentials to authenticate push notifications with Baidu Cloud Push. Provide your credentials by setting the ApiKey and SecretKey attributes." + "documentation" : "Not used. Retained for backwards compatibility." }, "Id" : { "shape" : "__string", @@ -2673,7 +3333,8 @@ "documentation" : "Version of channel" } }, - "documentation" : "Baidu Cloud Messaging channel definition" + "documentation" : "Baidu Cloud Messaging channel definition", + "required" : [ ] }, "BaiduMessage" : { "type" : "structure", @@ -2722,6 +3383,10 @@ "shape" : "MapOfListOf__string", "documentation" : "Default message substitutions. Can be overridden by individual address substitutions." }, + "TimeToLive" : { + "shape" : "__integer", + "documentation" : "This parameter specifies how long (in seconds) the message should be kept in Baidu storage if the device is offline. The and the default value and the maximum time to live supported is 7 days (604800 seconds)" + }, "Title" : { "shape" : "__string", "documentation" : "The message title that displays above the message on the user's device." @@ -2753,7 +3418,8 @@ "documentation" : "The email title (Or subject)." } }, - "documentation" : "The email message configuration." + "documentation" : "The email message configuration.", + "required" : [ ] }, "CampaignHook" : { "type" : "structure", @@ -2770,7 +3436,8 @@ "shape" : "__string", "documentation" : "Web URL to call for hook. If the URL has authentication specified it will be added as authentication to the request" } - } + }, + "documentation" : "Campaign hook information." }, "CampaignLimits" : { "type" : "structure", @@ -2878,7 +3545,8 @@ "documentation" : "The campaign version number." } }, - "documentation" : "Campaign definition" + "documentation" : "Campaign definition", + "required" : [ ] }, "CampaignSmsMessage" : { "type" : "structure", @@ -2910,7 +3578,7 @@ }, "CampaignStatus" : { "type" : "string", - "enum" : [ "SCHEDULED", "EXECUTING", "PENDING_NEXT_RUN", "COMPLETED", "PAUSED" ] + "enum" : [ "SCHEDULED", "EXECUTING", "PENDING_NEXT_RUN", "COMPLETED", "PAUSED", "DELETED" ] }, "CampaignsResponse" : { "type" : "structure", @@ -2924,12 +3592,66 @@ "documentation" : "The string that you use in a subsequent request to get the next page of results in a paginated response." } }, - "documentation" : "List of available campaigns." + "documentation" : "List of available campaigns.", + "required" : [ ] + }, + "ChannelResponse" : { + "type" : "structure", + "members" : { + "ApplicationId" : { + "shape" : "__string", + "documentation" : "Application id" + }, + "CreationDate" : { + "shape" : "__string", + "documentation" : "When was this segment created" + }, + "Enabled" : { + "shape" : "__boolean", + "documentation" : "If the channel is enabled for sending messages." + }, + "HasCredential" : { + "shape" : "__boolean", + "documentation" : "Not used. Retained for backwards compatibility." + }, + "Id" : { + "shape" : "__string", + "documentation" : "Channel ID. Not used, only for backwards compatibility." + }, + "IsArchived" : { + "shape" : "__boolean", + "documentation" : "Is this channel archived" + }, + "LastModifiedBy" : { + "shape" : "__string", + "documentation" : "Who made the last change" + }, + "LastModifiedDate" : { + "shape" : "__string", + "documentation" : "Last date this was updated" + }, + "Version" : { + "shape" : "__integer", + "documentation" : "Version of channel" + } + }, + "documentation" : "Base definition for channel response." }, "ChannelType" : { "type" : "string", "enum" : [ "GCM", "APNS", "APNS_SANDBOX", "APNS_VOIP", "APNS_VOIP_SANDBOX", "ADM", "SMS", "EMAIL", "BAIDU", "CUSTOM" ] }, + "ChannelsResponse" : { + "type" : "structure", + "members" : { + "Channels" : { + "shape" : "MapOfChannelResponse", + "documentation" : "A map of channels, with the ChannelType as the key and the Channel as the value." + } + }, + "documentation" : "Get channels definition", + "required" : [ ] + }, "CreateAppRequest" : { "type" : "structure", "members" : { @@ -2958,7 +3680,8 @@ "documentation" : "The display name of the application. Used in the Amazon Pinpoint console." } }, - "documentation" : "Application Request." + "documentation" : "Application Request.", + "required" : [ ] }, "CreateCampaignRequest" : { "type" : "structure", @@ -2966,7 +3689,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." }, "WriteCampaignRequest" : { "shape" : "WriteCampaignRequest" @@ -2991,7 +3715,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." }, "ExportJobRequest" : { "shape" : "ExportJobRequest" @@ -3016,7 +3741,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." }, "ImportJobRequest" : { "shape" : "ImportJobRequest" @@ -3041,7 +3767,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." }, "WriteSegmentRequest" : { "shape" : "WriteSegmentRequest" @@ -3114,7 +3841,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." } }, "required" : [ "ApplicationId" ] @@ -3135,7 +3863,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." } }, "required" : [ "ApplicationId" ] @@ -3156,7 +3885,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." } }, "required" : [ "ApplicationId" ] @@ -3177,7 +3907,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." } }, "required" : [ "ApplicationId" ] @@ -3198,7 +3929,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." } }, "required" : [ "ApplicationId" ] @@ -3219,7 +3951,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." } }, "required" : [ "ApplicationId" ] @@ -3240,7 +3973,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." } }, "required" : [ "ApplicationId" ] @@ -3261,12 +3995,14 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." }, "CampaignId" : { "shape" : "__string", "location" : "uri", - "locationName" : "campaign-id" + "locationName" : "campaign-id", + "documentation" : "The unique ID of the campaign." } }, "required" : [ "CampaignId", "ApplicationId" ] @@ -3287,7 +4023,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." } }, "required" : [ "ApplicationId" ] @@ -3302,31 +4039,20 @@ "required" : [ "EmailChannelResponse" ], "payload" : "EmailChannelResponse" }, - "DeleteEventStreamRequest" : { - "type" : "structure", - "members" : { - "ApplicationId" : { - "shape" : "__string", - "location" : "uri", - "locationName" : "application-id", - "documentation" : "ApplicationId" - } - }, - "required" : [ "ApplicationId" ], - "documentation" : "DeleteEventStream Request" - }, "DeleteEndpointRequest" : { "type" : "structure", "members" : { "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." }, "EndpointId" : { "shape" : "__string", "location" : "uri", - "locationName" : "endpoint-id" + "locationName" : "endpoint-id", + "documentation" : "The unique ID of the endpoint." } }, "required" : [ "ApplicationId", "EndpointId" ] @@ -3341,6 +4067,18 @@ "required" : [ "EndpointResponse" ], "payload" : "EndpointResponse" }, + "DeleteEventStreamRequest" : { + "type" : "structure", + "members" : { + "ApplicationId" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." + } + }, + "required" : [ "ApplicationId" ] + }, "DeleteEventStreamResponse" : { "type" : "structure", "members" : { @@ -3357,7 +4095,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." } }, "required" : [ "ApplicationId" ] @@ -3378,12 +4117,14 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." }, "SegmentId" : { "shape" : "__string", "location" : "uri", - "locationName" : "segment-id" + "locationName" : "segment-id", + "documentation" : "The unique ID of the segment." } }, "required" : [ "SegmentId", "ApplicationId" ] @@ -3404,7 +4145,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." } }, "required" : [ "ApplicationId" ] @@ -3419,6 +4161,34 @@ "required" : [ "SMSChannelResponse" ], "payload" : "SMSChannelResponse" }, + "DeleteUserEndpointsRequest" : { + "type" : "structure", + "members" : { + "ApplicationId" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." + }, + "UserId" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "user-id", + "documentation" : "The unique ID of the user." + } + }, + "required" : [ "ApplicationId", "UserId" ] + }, + "DeleteUserEndpointsResponse" : { + "type" : "structure", + "members" : { + "EndpointsResponse" : { + "shape" : "EndpointsResponse" + } + }, + "required" : [ "EndpointsResponse" ], + "payload" : "EndpointsResponse" + }, "DeliveryStatus" : { "type" : "string", "enum" : [ "SUCCESSFUL", "THROTTLED", "TEMPORARY_FAILURE", "PERMANENT_FAILURE", "UNKNOWN_FAILURE", "OPT_OUT", "DUPLICATE" ] @@ -3459,7 +4229,8 @@ "documentation" : "The message to SMS channels. Overrides the default message." } }, - "documentation" : "The message configuration." + "documentation" : "Message definitions for the default message and any messages that are tailored for specific channels.", + "required" : [ ] }, "Duration" : { "type" : "string", @@ -3485,7 +4256,8 @@ "documentation" : "The ARN of an IAM Role used to submit events to Mobile Analytics' event ingestion service" } }, - "documentation" : "Email Channel Request" + "documentation" : "Email Channel Request", + "required" : [ ] }, "EmailChannelResponse" : { "type" : "structure", @@ -3508,7 +4280,7 @@ }, "HasCredential" : { "shape" : "__boolean", - "documentation" : "If the channel is registered with a credential for authentication." + "documentation" : "Not used. Retained for backwards compatibility." }, "Id" : { "shape" : "__string", @@ -3530,6 +4302,10 @@ "shape" : "__string", "documentation" : "Last date this was updated" }, + "MessagesPerSecond" : { + "shape" : "__integer", + "documentation" : "Messages per second that can be sent" + }, "Platform" : { "shape" : "__string", "documentation" : "Platform type. Will be \"EMAIL\"" @@ -3543,14 +4319,15 @@ "documentation" : "Version of channel" } }, - "documentation" : "Email Channel Response." + "documentation" : "Email Channel Response.", + "required" : [ ] }, "EndpointBatchItem" : { "type" : "structure", "members" : { "Address" : { "shape" : "__string", - "documentation" : "The address or token of the endpoint as provided by your push provider (e.g. DeviceToken or RegistrationId)." + "documentation" : "The destination for messages that you send to this endpoint. The address varies by channel. For mobile push channels, use the token provided by the push notification service, such as the APNs device token or the FCM registration token. For the SMS channel, use a phone number in E.164 format, such as +1206XXX5550100. For the email channel, use an email address." }, "Attributes" : { "shape" : "MapOfListOf__string", @@ -3570,7 +4347,7 @@ }, "EndpointStatus" : { "shape" : "__string", - "documentation" : "The endpoint status. Can be either ACTIVE or INACTIVE. Will be set to INACTIVE if a delivery fails. Will be set to ACTIVE if the address is updated." + "documentation" : "Unused." }, "Id" : { "shape" : "__string", @@ -3607,7 +4384,8 @@ "documentation" : "List of items to update. Maximum 100 items" } }, - "documentation" : "Endpoint batch update request." + "documentation" : "Endpoint batch update request.", + "required" : [ ] }, "EndpointDemographic" : { "type" : "structure", @@ -3688,6 +4466,10 @@ "shape" : "DeliveryStatus", "documentation" : "Delivery status of message." }, + "MessageId" : { + "shape" : "__string", + "documentation" : "Unique message identifier associated with the message that was sent." + }, "StatusCode" : { "shape" : "__integer", "documentation" : "Downstream service status code." @@ -3701,14 +4483,15 @@ "documentation" : "If token was updated as part of delivery. (This is GCM Specific)" } }, - "documentation" : "The result from sending a message to an endpoint." + "documentation" : "The result from sending a message to an endpoint.", + "required" : [ ] }, "EndpointRequest" : { "type" : "structure", "members" : { "Address" : { "shape" : "__string", - "documentation" : "The address or token of the endpoint as provided by your push provider (e.g. DeviceToken or RegistrationId)." + "documentation" : "The destination for messages that you send to this endpoint. The address varies by channel. For mobile push channels, use the token provided by the push notification service, such as the APNs device token or the FCM registration token. For the SMS channel, use a phone number in E.164 format, such as +1206XXX5550100. For the email channel, use an email address." }, "Attributes" : { "shape" : "MapOfListOf__string", @@ -3728,7 +4511,7 @@ }, "EndpointStatus" : { "shape" : "__string", - "documentation" : "The endpoint status. Can be either ACTIVE or INACTIVE. Will be set to INACTIVE if a delivery fails. Will be set to ACTIVE if the address is updated." + "documentation" : "Unused." }, "Location" : { "shape" : "EndpointLocation", @@ -3790,7 +4573,7 @@ }, "EndpointStatus" : { "shape" : "__string", - "documentation" : "The endpoint status. Can be either ACTIVE or INACTIVE. Will be set to INACTIVE if a delivery fails. Will be set to ACTIVE if the address is updated." + "documentation" : "Unused." }, "Id" : { "shape" : "__string", @@ -3859,6 +4642,17 @@ }, "documentation" : "Endpoint user specific custom userAttributes" }, + "EndpointsResponse" : { + "type" : "structure", + "members" : { + "Item" : { + "shape" : "ListOfEndpointResponse", + "documentation" : "The list of endpoints." + } + }, + "documentation" : "List of endpoints", + "required" : [ ] + }, "EventStream" : { "type" : "structure", "members" : { @@ -3887,7 +4681,8 @@ "documentation" : "The IAM role that authorizes Amazon Pinpoint to publish events to the stream in your account." } }, - "documentation" : "Model for an event publishing subscription export." + "documentation" : "Model for an event publishing subscription export.", + "required" : [ ] }, "ExportJobRequest" : { "type" : "structure", @@ -3898,13 +4693,19 @@ }, "S3UrlPrefix" : { "shape" : "__string", - "documentation" : "A URL that points to the location within an Amazon S3 bucket that will receive the export. The location is typically a folder with multiple files.\nThe URL should follow this format: s3://bucket-name/folder-name/\n\nAmazon Pinpoint will export endpoints to this location." + "documentation" : "A URL that points to the location within an Amazon S3 bucket that will receive the export. The location is typically a folder with multiple files.\n\nThe URL should follow this format: s3://bucket-name/folder-name/\n\nAmazon Pinpoint will export endpoints to this location." }, "SegmentId" : { "shape" : "__string", - "documentation" : "The ID of the segment to export endpoints from. If not present all endpoints will be exported." + "documentation" : "The ID of the segment to export endpoints from. If not present, Amazon Pinpoint exports all of the endpoints that belong to the application." + }, + "SegmentVersion" : { + "shape" : "__integer", + "documentation" : "The version of the segment to export if specified." } - } + }, + "documentation" : "Export job request.", + "required" : [ ] }, "ExportJobResource" : { "type" : "structure", @@ -3915,13 +4716,19 @@ }, "S3UrlPrefix" : { "shape" : "__string", - "documentation" : "A URL that points to the location within an Amazon S3 bucket that will receive the export. The location is typically a folder with multiple files.\nThe URL should follow this format: s3://bucket-name/folder-name/\n\nAmazon Pinpoint will export endpoints to this location." + "documentation" : "A URL that points to the location within an Amazon S3 bucket that will receive the export. The location is typically a folder with multiple files.\n\nThe URL should follow this format: s3://bucket-name/folder-name/\n\nAmazon Pinpoint will export endpoints to this location." }, "SegmentId" : { "shape" : "__string", - "documentation" : "The ID of the segment to export endpoints from. If not present, all endpoints are exported." + "documentation" : "The ID of the segment to export endpoints from. If not present, Amazon Pinpoint exports all of the endpoints that belong to the application." + }, + "SegmentVersion" : { + "shape" : "__integer", + "documentation" : "The version of the segment to export if specified." } - } + }, + "documentation" : "Export job resource.", + "required" : [ ] }, "ExportJobResponse" : { "type" : "structure", @@ -3978,7 +4785,9 @@ "shape" : "__string", "documentation" : "The job type. Will be 'EXPORT'." } - } + }, + "documentation" : "Export job response.", + "required" : [ ] }, "ExportJobsResponse" : { "type" : "structure", @@ -3992,14 +4801,15 @@ "documentation" : "The string that you use in a subsequent request to get the next page of results in a paginated response." } }, - "documentation" : "Export job list." + "documentation" : "Export job list.", + "required" : [ ] }, "ForbiddenException" : { "type" : "structure", "members" : { "Message" : { "shape" : "__string", - "documentation" : "The error message returned from the API." + "documentation" : "The error message that's returned from the API." }, "RequestID" : { "shape" : "__string", @@ -4032,7 +4842,8 @@ "documentation" : "If the channel is enabled for sending messages." } }, - "documentation" : "Google Cloud Messaging credentials" + "documentation" : "Google Cloud Messaging credentials", + "required" : [ ] }, "GCMChannelResponse" : { "type" : "structure", @@ -4055,7 +4866,7 @@ }, "HasCredential" : { "shape" : "__boolean", - "documentation" : "Indicates whether the channel is configured with FCM or GCM credentials. Amazon Pinpoint uses your credentials to authenticate push notifications with FCM or GCM. Provide your credentials by setting the ApiKey attribute." + "documentation" : "Not used. Retained for backwards compatibility." }, "Id" : { "shape" : "__string", @@ -4082,7 +4893,8 @@ "documentation" : "Version of channel" } }, - "documentation" : "Google Cloud Messaging channel definition" + "documentation" : "Google Cloud Messaging channel definition", + "required" : [ ] }, "GCMMessage" : { "type" : "structure", @@ -4158,13 +4970,44 @@ }, "documentation" : "GCM Message." }, + "GPSCoordinates" : { + "type" : "structure", + "members" : { + "Latitude" : { + "shape" : "__double", + "documentation" : "Latitude" + }, + "Longitude" : { + "shape" : "__double", + "documentation" : "Longitude" + } + }, + "documentation" : "GPS coordinates", + "required" : [ ] + }, + "GPSPointDimension" : { + "type" : "structure", + "members" : { + "Coordinates" : { + "shape" : "GPSCoordinates", + "documentation" : "Coordinate to measure distance from." + }, + "RangeInKilometers" : { + "shape" : "__double", + "documentation" : "Range in kilometers from the coordinate." + } + }, + "documentation" : "GPS point location dimension", + "required" : [ ] + }, "GetAdmChannelRequest" : { "type" : "structure", "members" : { "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." } }, "required" : [ "ApplicationId" ] @@ -4185,7 +5028,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." } }, "required" : [ "ApplicationId" ] @@ -4206,7 +5050,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." } }, "required" : [ "ApplicationId" ] @@ -4227,7 +5072,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." } }, "required" : [ "ApplicationId" ] @@ -4248,7 +5094,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." } }, "required" : [ "ApplicationId" ] @@ -4269,7 +5116,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." } }, "required" : [ "ApplicationId" ] @@ -4290,7 +5138,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." } }, "required" : [ "ApplicationId" ] @@ -4311,12 +5160,14 @@ "PageSize" : { "shape" : "__string", "location" : "querystring", - "locationName" : "page-size" + "locationName" : "page-size", + "documentation" : "The number of entries you want on each page in the response." }, "Token" : { "shape" : "__string", "location" : "querystring", - "locationName" : "token" + "locationName" : "token", + "documentation" : "The NextToken string returned on a previous page that you use to get the next page of results in a paginated response." } } }, @@ -4336,7 +5187,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." } }, "required" : [ "ApplicationId" ] @@ -4357,12 +5209,14 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." }, "CampaignId" : { "shape" : "__string", "location" : "uri", - "locationName" : "campaign-id" + "locationName" : "campaign-id", + "documentation" : "The unique ID of the campaign." }, "PageSize" : { "shape" : "__string", @@ -4395,12 +5249,14 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." }, "CampaignId" : { "shape" : "__string", "location" : "uri", - "locationName" : "campaign-id" + "locationName" : "campaign-id", + "documentation" : "The unique ID of the campaign." } }, "required" : [ "CampaignId", "ApplicationId" ] @@ -4421,17 +5277,20 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." }, "CampaignId" : { "shape" : "__string", "location" : "uri", - "locationName" : "campaign-id" + "locationName" : "campaign-id", + "documentation" : "The unique ID of the campaign." }, "Version" : { "shape" : "__string", "location" : "uri", - "locationName" : "version" + "locationName" : "version", + "documentation" : "The version of the campaign." } }, "required" : [ "Version", "ApplicationId", "CampaignId" ] @@ -4452,12 +5311,14 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." }, "CampaignId" : { "shape" : "__string", "location" : "uri", - "locationName" : "campaign-id" + "locationName" : "campaign-id", + "documentation" : "The unique ID of the campaign." }, "PageSize" : { "shape" : "__string", @@ -4490,7 +5351,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." }, "PageSize" : { "shape" : "__string", @@ -4517,13 +5379,36 @@ "required" : [ "CampaignsResponse" ], "payload" : "CampaignsResponse" }, + "GetChannelsRequest" : { + "type" : "structure", + "members" : { + "ApplicationId" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." + } + }, + "required" : [ "ApplicationId" ] + }, + "GetChannelsResponse" : { + "type" : "structure", + "members" : { + "ChannelsResponse" : { + "shape" : "ChannelsResponse" + } + }, + "required" : [ "ChannelsResponse" ], + "payload" : "ChannelsResponse" + }, "GetEmailChannelRequest" : { "type" : "structure", "members" : { "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." } }, "required" : [ "ApplicationId" ] @@ -4544,12 +5429,14 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." }, "EndpointId" : { "shape" : "__string", "location" : "uri", - "locationName" : "endpoint-id" + "locationName" : "endpoint-id", + "documentation" : "The unique ID of the endpoint." } }, "required" : [ "ApplicationId", "EndpointId" ] @@ -4571,11 +5458,10 @@ "shape" : "__string", "location" : "uri", "locationName" : "application-id", - "documentation" : "ApplicationId" + "documentation" : "The unique ID of your Amazon Pinpoint application." } }, - "required" : [ "ApplicationId" ], - "documentation" : "GetEventStreamRequest" + "required" : [ "ApplicationId" ] }, "GetEventStreamResponse" : { "type" : "structure", @@ -4593,12 +5479,14 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." }, "JobId" : { "shape" : "__string", "location" : "uri", - "locationName" : "job-id" + "locationName" : "job-id", + "documentation" : "The unique ID of the job." } }, "required" : [ "ApplicationId", "JobId" ] @@ -4619,7 +5507,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." }, "PageSize" : { "shape" : "__string", @@ -4652,7 +5541,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." } }, "required" : [ "ApplicationId" ] @@ -4673,12 +5563,14 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." }, "JobId" : { "shape" : "__string", "location" : "uri", - "locationName" : "job-id" + "locationName" : "job-id", + "documentation" : "The unique ID of the job." } }, "required" : [ "ApplicationId", "JobId" ] @@ -4699,7 +5591,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." }, "PageSize" : { "shape" : "__string", @@ -4732,7 +5625,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." }, "PageSize" : { "shape" : "__string", @@ -4743,7 +5637,8 @@ "SegmentId" : { "shape" : "__string", "location" : "uri", - "locationName" : "segment-id" + "locationName" : "segment-id", + "documentation" : "The unique ID of the segment." }, "Token" : { "shape" : "__string", @@ -4770,7 +5665,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." }, "PageSize" : { "shape" : "__string", @@ -4781,7 +5677,8 @@ "SegmentId" : { "shape" : "__string", "location" : "uri", - "locationName" : "segment-id" + "locationName" : "segment-id", + "documentation" : "The unique ID of the segment." }, "Token" : { "shape" : "__string", @@ -4808,12 +5705,14 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." }, "SegmentId" : { "shape" : "__string", "location" : "uri", - "locationName" : "segment-id" + "locationName" : "segment-id", + "documentation" : "The unique ID of the segment." } }, "required" : [ "SegmentId", "ApplicationId" ] @@ -4834,17 +5733,20 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." }, "SegmentId" : { "shape" : "__string", "location" : "uri", - "locationName" : "segment-id" + "locationName" : "segment-id", + "documentation" : "The unique ID of the segment." }, "Version" : { "shape" : "__string", "location" : "uri", - "locationName" : "version" + "locationName" : "version", + "documentation" : "The segment version." } }, "required" : [ "SegmentId", "Version", "ApplicationId" ] @@ -4865,7 +5767,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." }, "PageSize" : { "shape" : "__string", @@ -4876,7 +5779,8 @@ "SegmentId" : { "shape" : "__string", "location" : "uri", - "locationName" : "segment-id" + "locationName" : "segment-id", + "documentation" : "The unique ID of the segment." }, "Token" : { "shape" : "__string", @@ -4903,7 +5807,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." }, "PageSize" : { "shape" : "__string", @@ -4936,7 +5841,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." } }, "required" : [ "ApplicationId" ] @@ -4951,6 +5857,34 @@ "required" : [ "SMSChannelResponse" ], "payload" : "SMSChannelResponse" }, + "GetUserEndpointsRequest" : { + "type" : "structure", + "members" : { + "ApplicationId" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." + }, + "UserId" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "user-id", + "documentation" : "The unique ID of the user." + } + }, + "required" : [ "ApplicationId", "UserId" ] + }, + "GetUserEndpointsResponse" : { + "type" : "structure", + "members" : { + "EndpointsResponse" : { + "shape" : "EndpointsResponse" + } + }, + "required" : [ "EndpointsResponse" ], + "payload" : "EndpointsResponse" + }, "ImportJobRequest" : { "type" : "structure", "members" : { @@ -4986,7 +5920,9 @@ "shape" : "__string", "documentation" : "A custom name for the segment created by the import job. Use if DefineSegment is true." } - } + }, + "documentation" : "Import job request.", + "required" : [ ] }, "ImportJobResource" : { "type" : "structure", @@ -5023,7 +5959,9 @@ "shape" : "__string", "documentation" : "A custom name for the segment created by the import job. Use if DefineSegment is true." } - } + }, + "documentation" : "Import job resource", + "required" : [ ] }, "ImportJobResponse" : { "type" : "structure", @@ -5080,7 +6018,9 @@ "shape" : "__string", "documentation" : "The job type. Will be Import." } - } + }, + "documentation" : "Import job response.", + "required" : [ ] }, "ImportJobsResponse" : { "type" : "structure", @@ -5094,14 +6034,19 @@ "documentation" : "The string that you use in a subsequent request to get the next page of results in a paginated response." } }, - "documentation" : "Import job list." + "documentation" : "Import job list.", + "required" : [ ] + }, + "Include" : { + "type" : "string", + "enum" : [ "ALL", "ANY", "NONE" ] }, "InternalServerErrorException" : { "type" : "structure", "members" : { "Message" : { "shape" : "__string", - "documentation" : "The error message returned from the API." + "documentation" : "The error message that's returned from the API." }, "RequestID" : { "shape" : "__string", @@ -5118,156 +6063,6 @@ "type" : "string", "enum" : [ "CREATED", "INITIALIZING", "PROCESSING", "COMPLETING", "COMPLETED", "FAILING", "FAILED" ] }, - "ListOfActivityResponse" : { - "type" : "list", - "member" : { - "shape" : "ActivityResponse" - } - }, - "ListOfApplicationResponse" : { - "type" : "list", - "member" : { - "shape" : "ApplicationResponse" - } - }, - "ListOfCampaignResponse" : { - "type" : "list", - "member" : { - "shape" : "CampaignResponse" - } - }, - "ListOfEndpointBatchItem" : { - "type" : "list", - "member" : { - "shape" : "EndpointBatchItem" - } - }, - "ListOfExportJobResponse" : { - "type" : "list", - "member" : { - "shape" : "ExportJobResponse" - } - }, - "ListOfImportJobResponse" : { - "type" : "list", - "member" : { - "shape" : "ImportJobResponse" - } - }, - "ListOfSegmentResponse" : { - "type" : "list", - "member" : { - "shape" : "SegmentResponse" - } - }, - "ListOfTreatmentResource" : { - "type" : "list", - "member" : { - "shape" : "TreatmentResource" - } - }, - "ListOfWriteTreatmentResource" : { - "type" : "list", - "member" : { - "shape" : "WriteTreatmentResource" - } - }, - "ListOf__string" : { - "type" : "list", - "member" : { - "shape" : "__string" - } - }, - "MapOfAddressConfiguration" : { - "type" : "map", - "key" : { - "shape" : "__string" - }, - "value" : { - "shape" : "AddressConfiguration" - } - }, - "MapOfAttributeDimension" : { - "type" : "map", - "key" : { - "shape" : "__string" - }, - "value" : { - "shape" : "AttributeDimension" - } - }, - "MapOfEndpointMessageResult" : { - "type" : "map", - "key" : { - "shape" : "__string" - }, - "value" : { - "shape" : "EndpointMessageResult" - } - }, - "MapOfEndpointSendConfiguration" : { - "type" : "map", - "key" : { - "shape" : "__string" - }, - "value" : { - "shape" : "EndpointSendConfiguration" - } - }, - "MapOfListOf__string" : { - "type" : "map", - "key" : { - "shape" : "__string" - }, - "value" : { - "shape" : "ListOf__string" - } - }, - "MapOfMapOfEndpointMessageResult" : { - "type" : "map", - "key" : { - "shape" : "__string" - }, - "value" : { - "shape" : "MapOfEndpointMessageResult" - } - }, - "MapOfMessageResult" : { - "type" : "map", - "key" : { - "shape" : "__string" - }, - "value" : { - "shape" : "MessageResult" - } - }, - "MapOf__double" : { - "type" : "map", - "key" : { - "shape" : "__string" - }, - "value" : { - "shape" : "__double" - } - }, - "MapOf__integer" : { - "type" : "map", - "key" : { - "shape" : "__string" - }, - "value" : { - "shape" : "__integer" - } - }, - "MapOf__string" : { - "type" : "map", - "key" : { - "shape" : "__string" - }, - "value" : { - "shape" : "__string" - } - }, "Message" : { "type" : "structure", "members" : { @@ -5307,6 +6102,10 @@ "shape" : "__boolean", "documentation" : "Indicates if the message should display on the users device.\n\nSilent pushes can be used for Remote Configuration and Phone Home use cases. " }, + "TimeToLive" : { + "shape" : "__integer", + "documentation" : "This parameter specifies how long (in seconds) the message should be kept if the service is unable to deliver the notification the first time. If the value is 0, it treats the notification as if it expires immediately and does not store the notification or attempt to redeliver it. This value is converted to the expiration field when sent to the service. It only applies to APNs and GCM" + }, "Title" : { "shape" : "__string", "documentation" : "The message title that displays above the message on the user's device." @@ -5315,14 +6114,16 @@ "shape" : "__string", "documentation" : "The URL to open in the user's mobile browser. Used if the value for Action is URL." } - } + }, + "documentation" : "Message to send", + "required" : [ ] }, "MessageBody" : { "type" : "structure", "members" : { "Message" : { "shape" : "__string", - "documentation" : "The error message returned from the API." + "documentation" : "The error message that's returned from the API." }, "RequestID" : { "shape" : "__string", @@ -5370,7 +6171,7 @@ "members" : { "Addresses" : { "shape" : "MapOfAddressConfiguration", - "documentation" : "A map of destination addresses, with the address as the key(Email address, phone number or push token) and the Address Configuration as the value." + "documentation" : "A map of key-value pairs, where each key is an address and each value is an AddressConfiguration object. An address can be a push notification token, a phone number, or an email address." }, "Context" : { "shape" : "MapOf__string", @@ -5378,14 +6179,15 @@ }, "Endpoints" : { "shape" : "MapOfEndpointSendConfiguration", - "documentation" : "A map of destination addresses, with the address as the key(Email address, phone number or push token) and the Address Configuration as the value." + "documentation" : "A map of key-value pairs, where each key is an endpoint ID and each value is an EndpointSendConfiguration object. Within an EndpointSendConfiguration object, you can tailor the message for an endpoint by specifying message overrides or substitutions." }, "MessageConfiguration" : { "shape" : "DirectMessageConfiguration", "documentation" : "Message configuration." } }, - "documentation" : "Send message request." + "documentation" : "Send message request.", + "required" : [ ] }, "MessageResponse" : { "type" : "structure", @@ -5407,7 +6209,8 @@ "documentation" : "A map containing a multi part response for each address, with the address as the key(Email address, phone number or push token) and the result as the value." } }, - "documentation" : "Send message response." + "documentation" : "Send message response.", + "required" : [ ] }, "MessageResult" : { "type" : "structure", @@ -5416,6 +6219,10 @@ "shape" : "DeliveryStatus", "documentation" : "Delivery status of message." }, + "MessageId" : { + "shape" : "__string", + "documentation" : "Unique message identifier associated with the message that was sent." + }, "StatusCode" : { "shape" : "__integer", "documentation" : "Downstream service status code." @@ -5429,7 +6236,8 @@ "documentation" : "If token was updated as part of delivery. (This is GCM Specific)" } }, - "documentation" : "The result from sending a message to an address." + "documentation" : "The result from sending a message to an address.", + "required" : [ ] }, "MessageType" : { "type" : "string", @@ -5440,7 +6248,7 @@ "members" : { "Message" : { "shape" : "__string", - "documentation" : "The error message returned from the API." + "documentation" : "The error message that's returned from the API." }, "RequestID" : { "shape" : "__string", @@ -5453,6 +6261,20 @@ "httpStatusCode" : 405 } }, + "MetricDimension" : { + "type" : "structure", + "members" : { + "ComparisonOperator" : { + "shape" : "__string", + "documentation" : "GREATER_THAN | LESS_THAN | GREATER_THAN_OR_EQUAL | LESS_THAN_OR_EQUAL | EQUAL" + }, + "Value" : { + "shape" : "__double", + "documentation" : "Value to be compared." + } + }, + "documentation" : "Custom metric dimension" + }, "Mode" : { "type" : "string", "enum" : [ "DELIVERY", "FILTER" ] @@ -5462,7 +6284,7 @@ "members" : { "Message" : { "shape" : "__string", - "documentation" : "The error message returned from the API." + "documentation" : "The error message that's returned from the API." }, "RequestID" : { "shape" : "__string", @@ -5475,6 +6297,102 @@ "httpStatusCode" : 404 } }, + "NumberValidateRequest" : { + "type" : "structure", + "members" : { + "IsoCountryCode" : { + "shape" : "__string", + "documentation" : "(Optional) The two-character ISO country code for the country where the phone number was originally registered." + }, + "PhoneNumber" : { + "shape" : "__string", + "documentation" : "The phone number to get information about." + } + }, + "documentation" : "Phone Number Information request." + }, + "NumberValidateResponse" : { + "type" : "structure", + "members" : { + "Carrier" : { + "shape" : "__string", + "documentation" : "The carrier that the phone number is registered with." + }, + "City" : { + "shape" : "__string", + "documentation" : "The city where the phone number was originally registered." + }, + "CleansedPhoneNumberE164" : { + "shape" : "__string", + "documentation" : "The cleansed (standardized) phone number in E.164 format." + }, + "CleansedPhoneNumberNational" : { + "shape" : "__string", + "documentation" : "The cleansed phone number in national format." + }, + "Country" : { + "shape" : "__string", + "documentation" : "The country where the phone number was originally registered." + }, + "CountryCodeIso2" : { + "shape" : "__string", + "documentation" : "The two-character ISO country code for the country where the phone number was originally registered." + }, + "CountryCodeNumeric" : { + "shape" : "__string", + "documentation" : "The numeric country code for the country where the phone number was originally registered." + }, + "County" : { + "shape" : "__string", + "documentation" : "The county where the phone number was originally registered." + }, + "OriginalCountryCodeIso2" : { + "shape" : "__string", + "documentation" : "The two-character ISO country code that was included in the request body." + }, + "OriginalPhoneNumber" : { + "shape" : "__string", + "documentation" : "The phone number that you included in the request body." + }, + "PhoneType" : { + "shape" : "__string", + "documentation" : "A description of the phone type. Possible values include MOBILE, LANDLINE, VOIP, INVALID, and OTHER." + }, + "PhoneTypeCode" : { + "shape" : "__integer", + "documentation" : "The phone type as an integer. Possible values include 0 (MOBILE), 1 (LANDLINE), 2 (VOIP), 3 (INVALID), and 4 (OTHER)." + }, + "Timezone" : { + "shape" : "__string", + "documentation" : "The time zone for the location where the phone number was originally registered." + }, + "ZipCode" : { + "shape" : "__string", + "documentation" : "The zip code for the location where the phone number was originally registered." + } + }, + "documentation" : "Phone Number Information response." + }, + "PhoneNumberValidateRequest" : { + "type" : "structure", + "members" : { + "NumberValidateRequest" : { + "shape" : "NumberValidateRequest" + } + }, + "required" : [ "NumberValidateRequest" ], + "payload" : "NumberValidateRequest" + }, + "PhoneNumberValidateResponse" : { + "type" : "structure", + "members" : { + "NumberValidateResponse" : { + "shape" : "NumberValidateResponse" + } + }, + "required" : [ "NumberValidateResponse" ], + "payload" : "NumberValidateResponse" + }, "PutEventStreamRequest" : { "type" : "structure", "members" : { @@ -5482,11 +6400,10 @@ "shape" : "__string", "location" : "uri", "locationName" : "application-id", - "documentation" : "ApplicationId" + "documentation" : "The unique ID of your Amazon Pinpoint application." }, "WriteEventStream" : { - "shape" : "WriteEventStream", - "documentation" : "EventStream to write." + "shape" : "WriteEventStream" } }, "required" : [ "ApplicationId", "WriteEventStream" ], @@ -5528,12 +6445,45 @@ "documentation" : "The recency dimension type:\nACTIVE - Users who have used your app within the specified duration are included in the segment.\nINACTIVE - Users who have not used your app within the specified duration are included in the segment." } }, - "documentation" : "Define how a segment based on recency of use." + "documentation" : "Define how a segment based on recency of use.", + "required" : [ ] }, "RecencyType" : { "type" : "string", "enum" : [ "ACTIVE", "INACTIVE" ] }, + "RemoveAttributesRequest" : { + "type" : "structure", + "members" : { + "ApplicationId" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." + }, + "AttributeType" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "attribute-type", + "documentation" : "Type of attribute. Can be endpoint-custom-attributes, endpoint-custom-metrics, endpoint-user-attributes." + }, + "UpdateAttributesRequest" : { + "shape" : "UpdateAttributesRequest" + } + }, + "required" : [ "AttributeType", "ApplicationId", "UpdateAttributesRequest" ], + "payload" : "UpdateAttributesRequest" + }, + "RemoveAttributesResponse" : { + "type" : "structure", + "members" : { + "AttributesResource" : { + "shape" : "AttributesResource" + } + }, + "required" : [ "AttributesResource" ], + "payload" : "AttributesResource" + }, "SMSChannelRequest" : { "type" : "structure", "members" : { @@ -5550,7 +6500,8 @@ "documentation" : "ShortCode registered with phone provider." } }, - "documentation" : "SMS Channel Request" + "documentation" : "SMS Channel Request", + "required" : [ ] }, "SMSChannelResponse" : { "type" : "structure", @@ -5569,7 +6520,7 @@ }, "HasCredential" : { "shape" : "__boolean", - "documentation" : "If the channel is registered with a credential for authentication." + "documentation" : "Not used. Retained for backwards compatibility." }, "Id" : { "shape" : "__string", @@ -5591,6 +6542,10 @@ "shape" : "__string", "documentation" : "Platform type. Will be \"SMS\"" }, + "PromotionalMessagesPerSecond" : { + "shape" : "__integer", + "documentation" : "Promotional messages per second that can be sent" + }, "SenderId" : { "shape" : "__string", "documentation" : "Sender identifier of your messages." @@ -5599,19 +6554,28 @@ "shape" : "__string", "documentation" : "The short code registered with the phone provider." }, + "TransactionalMessagesPerSecond" : { + "shape" : "__integer", + "documentation" : "Transactional messages per second that can be sent" + }, "Version" : { "shape" : "__integer", "documentation" : "Version of channel" } }, - "documentation" : "SMS Channel Response." + "documentation" : "SMS Channel Response.", + "required" : [ ] }, "SMSMessage" : { "type" : "structure", "members" : { "Body" : { "shape" : "__string", - "documentation" : "The message body of the notification, the email body or the text message." + "documentation" : "The body of the SMS message." + }, + "Keyword" : { + "shape" : "__string", + "documentation" : "The SMS program name that you provided to AWS Support when you requested your dedicated number." }, "MessageType" : { "shape" : "MessageType", @@ -5660,7 +6624,8 @@ "documentation" : "The starting UTC offset for the schedule if the value for isLocalTime is true\n\nValid values: \nUTC\nUTC+01\nUTC+02\nUTC+03\nUTC+03:30\nUTC+04\nUTC+04:30\nUTC+05\nUTC+05:30\nUTC+05:45\nUTC+06\nUTC+06:30\nUTC+07\nUTC+08\nUTC+09\nUTC+09:30\nUTC+10\nUTC+10:30\nUTC+11\nUTC+12\nUTC+13\nUTC-02\nUTC-03\nUTC-04\nUTC-05\nUTC-06\nUTC-07\nUTC-08\nUTC-09\nUTC-10\nUTC-11" } }, - "documentation" : "Shcedule that defines when a campaign is run." + "documentation" : "Shcedule that defines when a campaign is run.", + "required" : [ ] }, "SegmentBehaviors" : { "type" : "structure", @@ -5721,6 +6686,10 @@ "shape" : "SegmentLocation", "documentation" : "The segment location attributes." }, + "Metrics" : { + "shape" : "MapOfMetricDimension", + "documentation" : "Custom segment metrics." + }, "UserAttributes" : { "shape" : "MapOfAttributeDimension", "documentation" : "Custom segment user attributes." @@ -5728,6 +6697,44 @@ }, "documentation" : "Segment dimensions" }, + "SegmentGroup" : { + "type" : "structure", + "members" : { + "Dimensions" : { + "shape" : "ListOfSegmentDimensions", + "documentation" : "List of dimensions to include or exclude." + }, + "SourceSegments" : { + "shape" : "ListOfSegmentReference", + "documentation" : "Segments that define the source of this segment. Currently a maximum of 1 import segment is supported." + }, + "SourceType" : { + "shape" : "SourceType", + "documentation" : "Include or exclude the source." + }, + "Type" : { + "shape" : "Type", + "documentation" : "How should the dimensions be applied for the result" + } + }, + "documentation" : "Segment group definition.", + "required" : [ ] + }, + "SegmentGroupList" : { + "type" : "structure", + "members" : { + "Groups" : { + "shape" : "ListOfSegmentGroup", + "documentation" : "List of dimension groups to evaluate." + }, + "Include" : { + "shape" : "Include", + "documentation" : "How should the groups be applied for the result" + } + }, + "documentation" : "Segment group definition.", + "required" : [ ] + }, "SegmentImportResource" : { "type" : "structure", "members" : { @@ -5756,7 +6763,8 @@ "documentation" : "The number of endpoints that were successfully imported to create this segment." } }, - "documentation" : "Segment import definition." + "documentation" : "Segment import definition.", + "required" : [ ] }, "SegmentLocation" : { "type" : "structure", @@ -5764,10 +6772,28 @@ "Country" : { "shape" : "SetDimension", "documentation" : "The country filter according to ISO 3166-1 Alpha-2 codes." + }, + "GPSPoint" : { + "shape" : "GPSPointDimension", + "documentation" : "The GPS Point dimension." } }, "documentation" : "Segment location dimensions" }, + "SegmentReference" : { + "type" : "structure", + "members" : { + "Id" : { + "shape" : "__string", + "documentation" : "Segment Id." + }, + "Version" : { + "shape" : "__integer", + "documentation" : "If specified contains a specific version of the segment included." + } + }, + "documentation" : "Segment reference." + }, "SegmentResponse" : { "type" : "structure", "members" : { @@ -5799,6 +6825,10 @@ "shape" : "__string", "documentation" : "The name of segment" }, + "SegmentGroups" : { + "shape" : "SegmentGroupList", + "documentation" : "Segment definition groups. We currently only support one. If specified Dimensions must be empty." + }, "SegmentType" : { "shape" : "SegmentType", "documentation" : "The segment type:\nDIMENSIONAL - A dynamic segment built from selection criteria based on endpoint data reported by your app. You create this type of segment by using the segment builder in the Amazon Pinpoint console or by making a POST request to the segments resource.\nIMPORT - A static segment built from an imported set of endpoint definitions. You create this type of segment by importing a segment in the Amazon Pinpoint console or by making a POST request to the jobs/import resource." @@ -5808,7 +6838,8 @@ "documentation" : "The segment version number." } }, - "documentation" : "Segment definition." + "documentation" : "Segment definition.", + "required" : [ ] }, "SegmentType" : { "type" : "string", @@ -5826,7 +6857,8 @@ "documentation" : "An identifier used to retrieve the next page of results. The token is null if no additional pages exist." } }, - "documentation" : "Segments in your account." + "documentation" : "Segments in your account.", + "required" : [ ] }, "SendMessagesRequest" : { "type" : "structure", @@ -5834,7 +6866,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." }, "MessageRequest" : { "shape" : "MessageRequest" @@ -5858,36 +6891,38 @@ "members" : { "Context" : { "shape" : "MapOf__string", - "documentation" : "A map of custom attributes to attributes to be attached to the message. This payload is added to the push notification's 'data.pinpoint' object or added to the email/sms delivery receipt event attributes." + "documentation" : "A map of custom attribute-value pairs. Amazon Pinpoint adds these attributes to the data.pinpoint object in the body of the push notification payload. Amazon Pinpoint also provides these attributes in the events that it generates for users-messages deliveries." }, "MessageConfiguration" : { "shape" : "DirectMessageConfiguration", - "documentation" : "Message configuration." + "documentation" : "Message definitions for the default message and any messages that are tailored for specific channels." }, "Users" : { "shape" : "MapOfEndpointSendConfiguration", - "documentation" : "A map of destination endpoints, with the EndpointId as the key Endpoint Message Configuration as the value." + "documentation" : "A map that associates user IDs with EndpointSendConfiguration objects. Within an EndpointSendConfiguration object, you can tailor the message for a user by specifying message overrides or substitutions." } }, - "documentation" : "Send message request." + "documentation" : "Send message request.", + "required" : [ ] }, "SendUsersMessageResponse" : { "type" : "structure", "members" : { "ApplicationId" : { "shape" : "__string", - "documentation" : "Application id of the message." + "documentation" : "The unique ID of the Amazon Pinpoint project used to send the message." }, "RequestId" : { "shape" : "__string", - "documentation" : "Original request Id for which this message was delivered." + "documentation" : "The unique ID assigned to the users-messages request." }, "Result" : { "shape" : "MapOfMapOfEndpointMessageResult", - "documentation" : "A map containing of UserId to Map of EndpointId to Endpoint Message Result." + "documentation" : "An object that shows the endpoints that were messaged for each user. The object provides a list of user IDs. For each user ID, it provides the endpoint IDs that were messaged. For each endpoint ID, it provides an EndpointMessageResult object." } }, - "documentation" : "User send message response." + "documentation" : "User send message response.", + "required" : [ ] }, "SendUsersMessagesRequest" : { "type" : "structure", @@ -5895,7 +6930,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." }, "SendUsersMessageRequest" : { "shape" : "SendUsersMessageRequest" @@ -5926,14 +6962,19 @@ "documentation" : "The criteria values for the segment dimension. Endpoints with matching attribute values are included or excluded from the segment, depending on the setting for Type." } }, - "documentation" : "Dimension specification of a segment." + "documentation" : "Dimension specification of a segment.", + "required" : [ ] + }, + "SourceType" : { + "type" : "string", + "enum" : [ "ALL", "ANY" ] }, "TooManyRequestsException" : { "type" : "structure", "members" : { "Message" : { "shape" : "__string", - "documentation" : "The error message returned from the API." + "documentation" : "The error message that's returned from the API." }, "RequestID" : { "shape" : "__string", @@ -5978,7 +7019,12 @@ "documentation" : "The custom name of a variation of the campaign used for A/B testing." } }, - "documentation" : "Treatment resource" + "documentation" : "Treatment resource", + "required" : [ ] + }, + "Type" : { + "type" : "string", + "enum" : [ "ALL", "ANY", "NONE" ] }, "UpdateAdmChannelRequest" : { "type" : "structure", @@ -5989,7 +7035,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." } }, "required" : [ "ApplicationId", "ADMChannelRequest" ], @@ -6014,7 +7061,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." } }, "required" : [ "ApplicationId", "APNSChannelRequest" ], @@ -6039,7 +7087,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." } }, "required" : [ "ApplicationId", "APNSSandboxChannelRequest" ], @@ -6064,7 +7113,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." } }, "required" : [ "ApplicationId", "APNSVoipChannelRequest" ], @@ -6089,7 +7139,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." } }, "required" : [ "ApplicationId", "APNSVoipSandboxChannelRequest" ], @@ -6111,7 +7162,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." }, "WriteApplicationSettingsRequest" : { "shape" : "WriteApplicationSettingsRequest" @@ -6130,13 +7182,24 @@ "required" : [ "ApplicationSettingsResource" ], "payload" : "ApplicationSettingsResource" }, + "UpdateAttributesRequest" : { + "type" : "structure", + "members" : { + "Blacklist" : { + "shape" : "ListOf__string", + "documentation" : "The GLOB wildcard for removing the attributes in the application" + } + }, + "documentation" : "Update attributes request" + }, "UpdateBaiduChannelRequest" : { "type" : "structure", "members" : { "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." }, "BaiduChannelRequest" : { "shape" : "BaiduChannelRequest" @@ -6161,12 +7224,14 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." }, "CampaignId" : { "shape" : "__string", "location" : "uri", - "locationName" : "campaign-id" + "locationName" : "campaign-id", + "documentation" : "The unique ID of the campaign." }, "WriteCampaignRequest" : { "shape" : "WriteCampaignRequest" @@ -6191,7 +7256,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." }, "EmailChannelRequest" : { "shape" : "EmailChannelRequest" @@ -6216,12 +7282,14 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." }, "EndpointId" : { "shape" : "__string", "location" : "uri", - "locationName" : "endpoint-id" + "locationName" : "endpoint-id", + "documentation" : "The unique ID of the endpoint." }, "EndpointRequest" : { "shape" : "EndpointRequest" @@ -6246,7 +7314,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." }, "EndpointBatchRequest" : { "shape" : "EndpointBatchRequest" @@ -6271,7 +7340,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." }, "GCMChannelRequest" : { "shape" : "GCMChannelRequest" @@ -6296,12 +7366,14 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." }, "SegmentId" : { "shape" : "__string", "location" : "uri", - "locationName" : "segment-id" + "locationName" : "segment-id", + "documentation" : "The unique ID of the segment." }, "WriteSegmentRequest" : { "shape" : "WriteSegmentRequest" @@ -6326,7 +7398,8 @@ "ApplicationId" : { "shape" : "__string", "location" : "uri", - "locationName" : "application-id" + "locationName" : "application-id", + "documentation" : "The unique ID of your Amazon Pinpoint application." }, "SMSChannelRequest" : { "shape" : "SMSChannelRequest" @@ -6352,6 +7425,10 @@ "shape" : "CampaignHook", "documentation" : "Default campaign hook information." }, + "CloudWatchMetricsEnabled" : { + "shape" : "__boolean", + "documentation" : "The CloudWatchMetrics settings for the app." + }, "Limits" : { "shape" : "CampaignLimits", "documentation" : "The default campaign limits for the app. These limits apply to each campaign for the app, unless the campaign overrides the default with limits of its own." @@ -6433,7 +7510,8 @@ "documentation" : "The IAM role that authorizes Amazon Pinpoint to publish events to the stream in your account." } }, - "documentation" : "Request to save an EventStream." + "documentation" : "Request to save an EventStream.", + "required" : [ ] }, "WriteSegmentRequest" : { "type" : "structure", @@ -6445,9 +7523,14 @@ "Name" : { "shape" : "__string", "documentation" : "The name of segment" + }, + "SegmentGroups" : { + "shape" : "SegmentGroupList", + "documentation" : "Segment definition groups. We currently only support one. If specified Dimensions must be empty." } }, - "documentation" : "Segment definition." + "documentation" : "Segment definition.", + "required" : [ ] }, "WriteTreatmentResource" : { "type" : "structure", @@ -6473,7 +7556,8 @@ "documentation" : "The custom name of a variation of the campaign used for A/B testing." } }, - "documentation" : "Used to create a campaign treatment." + "documentation" : "Used to create a campaign treatment.", + "required" : [ ] }, "__boolean" : { "type" : "boolean" @@ -6484,11 +7568,211 @@ "__integer" : { "type" : "integer" }, + "ListOfActivityResponse" : { + "type" : "list", + "member" : { + "shape" : "ActivityResponse" + } + }, + "ListOfApplicationResponse" : { + "type" : "list", + "member" : { + "shape" : "ApplicationResponse" + } + }, + "ListOfCampaignResponse" : { + "type" : "list", + "member" : { + "shape" : "CampaignResponse" + } + }, + "ListOfEndpointBatchItem" : { + "type" : "list", + "member" : { + "shape" : "EndpointBatchItem" + } + }, + "ListOfEndpointResponse" : { + "type" : "list", + "member" : { + "shape" : "EndpointResponse" + } + }, + "ListOfExportJobResponse" : { + "type" : "list", + "member" : { + "shape" : "ExportJobResponse" + } + }, + "ListOfImportJobResponse" : { + "type" : "list", + "member" : { + "shape" : "ImportJobResponse" + } + }, + "ListOfSegmentDimensions" : { + "type" : "list", + "member" : { + "shape" : "SegmentDimensions" + } + }, + "ListOfSegmentGroup" : { + "type" : "list", + "member" : { + "shape" : "SegmentGroup" + } + }, + "ListOfSegmentReference" : { + "type" : "list", + "member" : { + "shape" : "SegmentReference" + } + }, + "ListOfSegmentResponse" : { + "type" : "list", + "member" : { + "shape" : "SegmentResponse" + } + }, + "ListOfTreatmentResource" : { + "type" : "list", + "member" : { + "shape" : "TreatmentResource" + } + }, + "ListOfWriteTreatmentResource" : { + "type" : "list", + "member" : { + "shape" : "WriteTreatmentResource" + } + }, + "ListOf__string" : { + "type" : "list", + "member" : { + "shape" : "__string" + } + }, + "__long" : { + "type" : "long" + }, + "MapOfAddressConfiguration" : { + "type" : "map", + "key" : { + "shape" : "__string" + }, + "value" : { + "shape" : "AddressConfiguration" + } + }, + "MapOfAttributeDimension" : { + "type" : "map", + "key" : { + "shape" : "__string" + }, + "value" : { + "shape" : "AttributeDimension" + } + }, + "MapOfChannelResponse" : { + "type" : "map", + "key" : { + "shape" : "__string" + }, + "value" : { + "shape" : "ChannelResponse" + } + }, + "MapOfEndpointMessageResult" : { + "type" : "map", + "key" : { + "shape" : "__string" + }, + "value" : { + "shape" : "EndpointMessageResult" + } + }, + "MapOfEndpointSendConfiguration" : { + "type" : "map", + "key" : { + "shape" : "__string" + }, + "value" : { + "shape" : "EndpointSendConfiguration" + } + }, + "MapOfMessageResult" : { + "type" : "map", + "key" : { + "shape" : "__string" + }, + "value" : { + "shape" : "MessageResult" + } + }, + "MapOfMetricDimension" : { + "type" : "map", + "key" : { + "shape" : "__string" + }, + "value" : { + "shape" : "MetricDimension" + } + }, + "MapOf__double" : { + "type" : "map", + "key" : { + "shape" : "__string" + }, + "value" : { + "shape" : "__double" + } + }, + "MapOf__integer" : { + "type" : "map", + "key" : { + "shape" : "__string" + }, + "value" : { + "shape" : "__integer" + } + }, + "MapOfListOf__string" : { + "type" : "map", + "key" : { + "shape" : "__string" + }, + "value" : { + "shape" : "ListOf__string" + } + }, + "MapOfMapOfEndpointMessageResult" : { + "type" : "map", + "key" : { + "shape" : "__string" + }, + "value" : { + "shape" : "MapOfEndpointMessageResult" + } + }, + "MapOf__string" : { + "type" : "map", + "key" : { + "shape" : "__string" + }, + "value" : { + "shape" : "__string" + } + }, "__string" : { "type" : "string" }, - "__timestamp" : { - "type" : "timestamp" + "__timestampIso8601" : { + "type" : "timestamp", + "timestampFormat" : "iso8601" + }, + "__timestampUnix" : { + "type" : "timestamp", + "timestampFormat" : "unixTimestamp" } } -} +} \ No newline at end of file diff --git a/botocore/data/polly/2016-06-10/service-2.json b/botocore/data/polly/2016-06-10/service-2.json index dd534e34..22ef5905 100644 --- a/botocore/data/polly/2016-06-10/service-2.json +++ b/botocore/data/polly/2016-06-10/service-2.json @@ -546,7 +546,7 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

The value of the \"Text\" parameter is longer than the accepted limits. The limit for input text is a maximum of 3000 characters total, of which no more than 1500 can be billed characters. SSML tags are not counted as billed characters.

", + "documentation":"

The value of the \"Text\" parameter is longer than the accepted limits. The limit for input text is a maximum of 6000 characters total, of which no more than 3000 can be billed characters. SSML tags are not counted as billed characters.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -630,6 +630,7 @@ "Penelope", "Chantal", "Celine", + "Lea", "Mathieu", "Dora", "Karl", diff --git a/botocore/data/pricing/2017-10-15/service-2.json b/botocore/data/pricing/2017-10-15/service-2.json index 918c6670..bc77bc67 100644 --- a/botocore/data/pricing/2017-10-15/service-2.json +++ b/botocore/data/pricing/2017-10-15/service-2.json @@ -7,6 +7,7 @@ "protocol":"json", "serviceAbbreviation":"AWS Pricing", "serviceFullName":"AWS Price List Service", + "serviceId":"Pricing", "signatureVersion":"v4", "signingName":"pricing", "targetPrefix":"AWSPriceListService", diff --git a/botocore/data/rds/2014-10-31/service-2.json b/botocore/data/rds/2014-10-31/service-2.json index 2f64d3c1..cbae544b 100644 --- a/botocore/data/rds/2014-10-31/service-2.json +++ b/botocore/data/rds/2014-10-31/service-2.json @@ -93,6 +93,23 @@ ], "documentation":"

Enables ingress to a DBSecurityGroup using one of two forms of authorization. First, EC2 or VPC security groups can be added to the DBSecurityGroup if the application using the database is running on EC2 or VPC instances. Second, IP ranges are available if the application accessing your database is running on the Internet. Required parameters for this API are one of CIDR range, EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId for non-VPC).

You can't authorize ingress from an EC2 security group in one AWS Region to an Amazon RDS DB instance in another. You can't authorize ingress from a VPC security group in one VPC to an Amazon RDS DB instance in another.

For an overview of CIDR ranges, go to the Wikipedia Tutorial.

" }, + "BacktrackDBCluster":{ + "name":"BacktrackDBCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BacktrackDBClusterMessage"}, + "output":{ + "shape":"DBClusterBacktrack", + "resultWrapper":"BacktrackDBClusterResult" + }, + "errors":[ + {"shape":"DBClusterNotFoundFault"}, + {"shape":"InvalidDBClusterStateFault"} + ], + "documentation":"

Backtracks a DB cluster to a specific time, without creating a new DB cluster.

For more information on backtracking, see Backtracking an Aurora DB Cluster in the Amazon RDS User Guide.

" + }, "CopyDBClusterParameterGroup":{ "name":"CopyDBClusterParameterGroup", "http":{ @@ -130,7 +147,7 @@ {"shape":"SnapshotQuotaExceededFault"}, {"shape":"KMSKeyNotAccessibleFault"} ], - "documentation":"

Copies a snapshot of a DB cluster.

To copy a DB cluster snapshot from a shared manual DB cluster snapshot, SourceDBClusterSnapshotIdentifier must be the Amazon Resource Name (ARN) of the shared DB cluster snapshot.

You can copy an encrypted DB cluster snapshot from another AWS Region. In that case, the AWS Region where you call the CopyDBClusterSnapshot action is the destination AWS Region for the encrypted DB cluster snapshot to be copied to. To copy an encrypted DB cluster snapshot from another AWS Region, you must provide the following values:

  • KmsKeyId - The AWS Key Management System (AWS KMS) key identifier for the key to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region.

  • PreSignedUrl - A URL that contains a Signature Version 4 signed request for the CopyDBClusterSnapshot action to be called in the source AWS Region where the DB cluster snapshot is copied from. The pre-signed URL must be a valid request for the CopyDBClusterSnapshot API action that can be executed in the source AWS Region that contains the encrypted DB cluster snapshot to be copied.

    The pre-signed URL request must contain the following parameter values:

    • KmsKeyId - The KMS key identifier for the key to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region. This is the same identifier for both the CopyDBClusterSnapshot action that is called in the destination AWS Region, and the action contained in the pre-signed URL.

    • DestinationRegion - The name of the AWS Region that the DB cluster snapshot will be created in.

    • SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source AWS Region. For example, if you are copying an encrypted DB cluster snapshot from the us-west-2 AWS Region, then your SourceDBClusterSnapshotIdentifier looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115.

    To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process.

  • TargetDBClusterSnapshotIdentifier - The identifier for the new copy of the DB cluster snapshot in the destination AWS Region.

  • SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. This identifier must be in the ARN format for the source AWS Region and is the same value as the SourceDBClusterSnapshotIdentifier in the pre-signed URL.

To cancel the copy operation once it is in progress, delete the target DB cluster snapshot identified by TargetDBClusterSnapshotIdentifier while that DB cluster snapshot is in \"copying\" status.

For more information on copying encrypted DB cluster snapshots from one AWS Region to another, see Copying a DB Cluster Snapshot in the Same Account, Either in the Same Region or Across Regions in the Amazon RDS User Guide.

For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

" + "documentation":"

Copies a snapshot of a DB cluster.

To copy a DB cluster snapshot from a shared manual DB cluster snapshot, SourceDBClusterSnapshotIdentifier must be the Amazon Resource Name (ARN) of the shared DB cluster snapshot.

You can copy an encrypted DB cluster snapshot from another AWS Region. In that case, the AWS Region where you call the CopyDBClusterSnapshot action is the destination AWS Region for the encrypted DB cluster snapshot to be copied to. To copy an encrypted DB cluster snapshot from another AWS Region, you must provide the following values:

  • KmsKeyId - The AWS Key Management System (AWS KMS) key identifier for the key to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region.

  • PreSignedUrl - A URL that contains a Signature Version 4 signed request for the CopyDBClusterSnapshot action to be called in the source AWS Region where the DB cluster snapshot is copied from. The pre-signed URL must be a valid request for the CopyDBClusterSnapshot API action that can be executed in the source AWS Region that contains the encrypted DB cluster snapshot to be copied.

    The pre-signed URL request must contain the following parameter values:

    • KmsKeyId - The KMS key identifier for the key to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region. This is the same identifier for both the CopyDBClusterSnapshot action that is called in the destination AWS Region, and the action contained in the pre-signed URL.

    • DestinationRegion - The name of the AWS Region that the DB cluster snapshot will be created in.

    • SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source AWS Region. For example, if you are copying an encrypted DB cluster snapshot from the us-west-2 AWS Region, then your SourceDBClusterSnapshotIdentifier looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115.

    To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process.

  • TargetDBClusterSnapshotIdentifier - The identifier for the new copy of the DB cluster snapshot in the destination AWS Region.

  • SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. This identifier must be in the ARN format for the source AWS Region and is the same value as the SourceDBClusterSnapshotIdentifier in the pre-signed URL.

To cancel the copy operation once it is in progress, delete the target DB cluster snapshot identified by TargetDBClusterSnapshotIdentifier while that DB cluster snapshot is in \"copying\" status.

For more information on copying encrypted DB cluster snapshots from one AWS Region to another, see Copying a DB Cluster Snapshot in the Same Account, Either in the Same Region or Across Regions in the Amazon RDS User Guide.

For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

" }, "CopyDBParameterGroup":{ "name":"CopyDBParameterGroup", @@ -168,7 +185,7 @@ {"shape":"SnapshotQuotaExceededFault"}, {"shape":"KMSKeyNotAccessibleFault"} ], - "documentation":"

Copies the specified DB snapshot. The source DB snapshot must be in the \"available\" state.

You can copy a snapshot from one AWS Region to another. In that case, the AWS Region where you call the CopyDBSnapshot action is the destination AWS Region for the DB snapshot copy.

For more information about copying snapshots, see Copying a DB Snapshot in the Amazon RDS User Guide.

" + "documentation":"

Copies the specified DB snapshot. The source DB snapshot must be in the \"available\" state.

You can copy a snapshot from one AWS Region to another. In that case, the AWS Region where you call the CopyDBSnapshot action is the destination AWS Region for the DB snapshot copy.

For more information about copying snapshots, see Copying a DB Snapshot in the Amazon RDS User Guide.

" }, "CopyOptionGroup":{ "name":"CopyOptionGroup", @@ -354,7 +371,7 @@ {"shape":"DBSecurityGroupQuotaExceededFault"}, {"shape":"DBSecurityGroupNotSupportedFault"} ], - "documentation":"

Creates a new DB security group. DB security groups control access to a DB instance.

" + "documentation":"

Creates a new DB security group. DB security groups control access to a DB instance.

A DB security group controls access to EC2-Classic DB instances that are not in a VPC.

" }, "CreateDBSnapshot":{ "name":"CreateDBSnapshot", @@ -620,6 +637,23 @@ ], "documentation":"

Lists the set of CA certificates provided by Amazon RDS for this AWS account.

" }, + "DescribeDBClusterBacktracks":{ + "name":"DescribeDBClusterBacktracks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBClusterBacktracksMessage"}, + "output":{ + "shape":"DBClusterBacktrackMessage", + "resultWrapper":"DescribeDBClusterBacktracksResult" + }, + "errors":[ + {"shape":"DBClusterNotFoundFault"}, + {"shape":"DBClusterBacktrackNotFoundFault"} + ], + "documentation":"

Returns information about backtracks for a DB cluster.

For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

" + }, "DescribeDBClusterParameterGroups":{ "name":"DescribeDBClusterParameterGroups", "http":{ @@ -1487,7 +1521,8 @@ {"shape":"DBSubnetGroupNotFoundFault"}, {"shape":"InvalidSubnet"}, {"shape":"OptionGroupNotFoundFault"}, - {"shape":"KMSKeyNotAccessibleFault"} + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"DBClusterParameterGroupNotFoundFault"} ], "documentation":"

Creates a new DB cluster from a DB snapshot or DB cluster snapshot.

If a DB snapshot is specified, the target DB cluster is created from the source DB snapshot with a default configuration and default security group.

If a DB cluster snapshot is specified, the target DB cluster is created from the source DB cluster restore point with the same configuration as the original source DB cluster, except that the new DB cluster is created with the default security group.

For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

" }, @@ -1518,7 +1553,8 @@ {"shape":"InvalidVPCNetworkStateFault"}, {"shape":"KMSKeyNotAccessibleFault"}, {"shape":"OptionGroupNotFoundFault"}, - {"shape":"StorageQuotaExceededFault"} + {"shape":"StorageQuotaExceededFault"}, + {"shape":"DBClusterParameterGroupNotFoundFault"} ], "documentation":"

Restores a DB cluster to an arbitrary point in time. Users can restore to any point in time before LatestRestorableTime for up to BackupRetentionPeriod days. The target DB cluster is created from the source DB cluster with the same configuration as the original DB cluster, except that the new DB cluster is created with the default DB security group.

This action only restores the DB cluster, not the DB instances for that DB cluster. You must invoke the CreateDBInstance action to create DB instances for the restored DB cluster, specifying the identifier of the restored DB cluster in DBClusterIdentifier. You can create DB instances only after the RestoreDBClusterToPointInTime action has completed and the DB cluster is available.

For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

" }, @@ -1551,7 +1587,8 @@ {"shape":"AuthorizationNotFoundFault"}, {"shape":"KMSKeyNotAccessibleFault"}, {"shape":"DBSecurityGroupNotFoundFault"}, - {"shape":"DomainNotFoundFault"} + {"shape":"DomainNotFoundFault"}, + {"shape":"DBParameterGroupNotFoundFault"} ], "documentation":"

Creates a new DB instance from a DB snapshot. The target database is created from the source database restore point with the most of original configuration with the default security group and the default DB parameter group. By default, the new DB instance is created as a single-AZ deployment except when the instance is a SQL Server instance that has an option group that is associated with mirroring; in this case, the instance becomes a mirrored AZ deployment and not a single-AZ deployment.

If your intent is to replace your original DB instance with the new, restored DB instance, then rename your original DB instance before you call the RestoreDBInstanceFromDBSnapshot action. RDS doesn't allow two DB instances with the same name. Once you have renamed your original DB instance with a different identifier, then you can pass the original name of the DB instance as the DBInstanceIdentifier in the call to the RestoreDBInstanceFromDBSnapshot action. The result is that you will replace the original DB instance with the DB instance created from the snapshot.

If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier must be the ARN of the shared DB snapshot.

This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora, use RestoreDBClusterFromSnapshot.

" }, @@ -1616,9 +1653,10 @@ {"shape":"AuthorizationNotFoundFault"}, {"shape":"KMSKeyNotAccessibleFault"}, {"shape":"DBSecurityGroupNotFoundFault"}, - {"shape":"DomainNotFoundFault"} + {"shape":"DomainNotFoundFault"}, + {"shape":"DBParameterGroupNotFoundFault"} ], - "documentation":"

Restores a DB instance to an arbitrary point in time. You can restore to any point in time before the time identified by the LatestRestorableTime property. You can restore to a point up to the number of days specified by the BackupRetentionPeriod property.

The target database is created with most of the original configuration, but in a system-selected availability zone, with the default security group, the default subnet group, and the default DB parameter group. By default, the new DB instance is created as a single-AZ deployment except when the instance is a SQL Server instance that has an option group that is associated with mirroring; in this case, the instance becomes a mirrored deployment and not a single-AZ deployment.

This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora, use RestoreDBClusterToPointInTime.

" + "documentation":"

Restores a DB instance to an arbitrary point in time. You can restore to any point in time before the time identified by the LatestRestorableTime property. You can restore to a point up to the number of days specified by the BackupRetentionPeriod property.

The target database is created with most of the original configuration, but in a system-selected Availability Zone, with the default security group, the default subnet group, and the default DB parameter group. By default, the new DB instance is created as a single-AZ deployment except when the instance is a SQL Server instance that has an option group that is associated with mirroring; in this case, the instance becomes a mirrored deployment and not a single-AZ deployment.

This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora, use RestoreDBClusterToPointInTime.

" }, "RevokeDBSecurityGroupIngress":{ "name":"RevokeDBSecurityGroupIngress", @@ -1828,7 +1866,7 @@ "type":"structure", "members":{ }, - "documentation":"

The specified CIDRIP or EC2 security group is already authorized for the specified DB security group.

", + "documentation":"

The specified CIDRIP or Amazon EC2 security group is already authorized for the specified DB security group.

", "error":{ "code":"AuthorizationAlreadyExists", "httpStatusCode":400, @@ -1840,7 +1878,7 @@ "type":"structure", "members":{ }, - "documentation":"

Specified CIDRIP or EC2 security group is not authorized for the specified DB security group.

RDS may not also be authorized via IAM to perform necessary actions on your behalf.

", + "documentation":"

The specified CIDRIP or Amazon EC2 security group isn't authorized for the specified DB security group.

RDS also may not be authorized by using IAM to perform necessary actions on your behalf.

", "error":{ "code":"AuthorizationNotFound", "httpStatusCode":404, @@ -1852,7 +1890,7 @@ "type":"structure", "members":{ }, - "documentation":"

DB security group authorization quota has been reached.

", + "documentation":"

The DB security group authorization quota has been reached.

", "error":{ "code":"AuthorizationQuotaExceeded", "httpStatusCode":400, @@ -1898,7 +1936,7 @@ "members":{ "Name":{ "shape":"String", - "documentation":"

The name of the availability zone.

" + "documentation":"

The name of the Availability Zone.

" } }, "documentation":"

Contains Availability Zone information.

This data type is used as an element in the following data type:

", @@ -1918,6 +1956,57 @@ "locationName":"AvailabilityZone" } }, + "AvailableProcessorFeature":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"String", + "documentation":"

The name of the processor feature. Valid names are coreCount and threadsPerCore.

" + }, + "DefaultValue":{ + "shape":"String", + "documentation":"

The default value for the processor feature of the DB instance class.

" + }, + "AllowedValues":{ + "shape":"String", + "documentation":"

The allowed values for the processor feature of the DB instance class.

" + } + }, + "documentation":"

Contains the available processor feature information for the DB instance class of a DB instance.

For more information, see Configuring the Processor of the DB Instance Class in the Amazon RDS User Guide.

" + }, + "AvailableProcessorFeatureList":{ + "type":"list", + "member":{ + "shape":"AvailableProcessorFeature", + "locationName":"AvailableProcessorFeature" + } + }, + "BacktrackDBClusterMessage":{ + "type":"structure", + "required":[ + "DBClusterIdentifier", + "BacktrackTo" + ], + "members":{ + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

The DB cluster identifier of the DB cluster to be backtracked. This parameter is stored as a lowercase string.

Constraints:

  • Must contain from 1 to 63 alphanumeric characters or hyphens.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

Example: my-cluster1

" + }, + "BacktrackTo":{ + "shape":"TStamp", + "documentation":"

The timestamp of the time to backtrack the DB cluster to, specified in ISO 8601 format. For more information about ISO 8601, see the ISO8601 Wikipedia page.

If the specified time is not a consistent time for the DB cluster, Aurora automatically chooses the nearest possible consistent time for the DB cluster.

Constraints:

  • Must contain a valid ISO 8601 timestamp.

  • Cannot contain a timestamp set in the future.

Example: 2017-07-08T18:00Z

" + }, + "Force":{ + "shape":"BooleanOptional", + "documentation":"

A value that, if specified, forces the DB cluster to backtrack when binary logging is enabled. Otherwise, an error occurs when binary logging is enabled.

" + }, + "UseEarliestTimeOnPointInTimeUnavailable":{ + "shape":"BooleanOptional", + "documentation":"

If BacktrackTo is set to a timestamp earlier than the earliest backtrack time, this value backtracks the DB cluster to the earliest possible backtrack time. Otherwise, an error occurs.

" + } + }, + "documentation":"

" + }, "Boolean":{"type":"boolean"}, "BooleanOptional":{"type":"boolean"}, "Certificate":{ @@ -1976,7 +2065,7 @@ "type":"structure", "members":{ }, - "documentation":"

CertificateIdentifier does not refer to an existing certificate.

", + "documentation":"

CertificateIdentifier doesn't refer to an existing certificate.

", "error":{ "code":"CertificateNotFound", "httpStatusCode":404, @@ -2058,7 +2147,7 @@ }, "KmsKeyId":{ "shape":"String", - "documentation":"

The AWS AWS KMS key ID for an encrypted DB cluster snapshot. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.

If you copy an unencrypted DB cluster snapshot and specify a value for the KmsKeyId parameter, Amazon RDS encrypts the target DB cluster snapshot using the specified KMS encryption key.

If you copy an encrypted DB cluster snapshot from your AWS account, you can specify a value for KmsKeyId to encrypt the copy with a new KMS encryption key. If you don't specify a value for KmsKeyId, then the copy of the DB cluster snapshot is encrypted with the same KMS key as the source DB cluster snapshot.

If you copy an encrypted DB cluster snapshot that is shared from another AWS account, then you must specify a value for KmsKeyId.

To copy an encrypted DB cluster snapshot to another AWS Region, you must set KmsKeyId to the KMS key ID you want to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region. KMS encryption keys are specific to the AWS Region that they are created in, and you can't use encryption keys from one AWS Region in another AWS Region.

" + "documentation":"

The AWS AWS KMS key ID for an encrypted DB cluster snapshot. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.

If you copy an encrypted DB cluster snapshot from your AWS account, you can specify a value for KmsKeyId to encrypt the copy with a new KMS encryption key. If you don't specify a value for KmsKeyId, then the copy of the DB cluster snapshot is encrypted with the same KMS key as the source DB cluster snapshot.

If you copy an encrypted DB cluster snapshot that is shared from another AWS account, then you must specify a value for KmsKeyId.

To copy an encrypted DB cluster snapshot to another AWS Region, you must set KmsKeyId to the KMS key ID you want to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region. KMS encryption keys are specific to the AWS Region that they are created in, and you can't use encryption keys from one AWS Region in another AWS Region.

If you copy an unencrypted DB cluster snapshot and specify a value for the KmsKeyId parameter, an error is returned.

" }, "PreSignedUrl":{ "shape":"String", @@ -2270,6 +2359,14 @@ "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", "documentation":"

True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts, and otherwise false.

Default: false

" + }, + "BacktrackWindow":{ + "shape":"LongOptional", + "documentation":"

The target backtrack window, in seconds. To disable backtracking, set this value to 0.

Default: 0

Constraints:

  • If specified, this value must be set to a number from 0 to 259,200 (72 hours).

" + }, + "EnableCloudwatchLogsExports":{ + "shape":"LogTypeList", + "documentation":"

The list of log types that need to be enabled for exporting to CloudWatch Logs.

" } }, "documentation":"

" @@ -2360,7 +2457,7 @@ }, "DBInstanceClass":{ "shape":"String", - "documentation":"

The compute and memory capacity of the DB instance, for example, db.m4.large. Not all DB instance classes are available in all AWS Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide.

" + "documentation":"

The compute and memory capacity of the DB instance, for example, db.m4.large. Not all DB instance classes are available in all AWS Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide.

" }, "Engine":{ "shape":"String", @@ -2416,7 +2513,7 @@ }, "EngineVersion":{ "shape":"String", - "documentation":"

The version number of the database engine to use.

The following are the database engines and major and minor versions that are available with Amazon RDS. Not every database engine is available for every AWS Region.

Amazon Aurora

Not applicable. The version number of the database engine to be used by the DB instance is managed by the DB cluster. For more information, see CreateDBCluster.

MariaDB

  • 10.2.12 (supported in all AWS Regions)

  • 10.2.11 (supported in all AWS Regions)

  • 10.1.31 (supported in all AWS Regions)

  • 10.1.26 (supported in all AWS Regions)

  • 10.1.23 (supported in all AWS Regions)

  • 10.1.19 (supported in all AWS Regions)

  • 10.1.14 (supported in all AWS Regions except us-east-2)

  • 10.0.34 (supported in all AWS Regions)

  • 10.0.32 (supported in all AWS Regions)

  • 10.0.31 (supported in all AWS Regions)

  • 10.0.28 (supported in all AWS Regions)

  • 10.0.24 (supported in all AWS Regions)

  • 10.0.17 (supported in all AWS Regions except us-east-2, ca-central-1, eu-west-2)

Microsoft SQL Server 2017

  • 14.00.1000.169.v1 (supported for all editions, and all AWS Regions)

Microsoft SQL Server 2016

  • 13.00.4451.0.v1 (supported for all editions, and all AWS Regions)

  • 13.00.4422.0.v1 (supported for all editions, and all AWS Regions)

  • 13.00.2164.0.v1 (supported for all editions, and all AWS Regions)

Microsoft SQL Server 2014

  • 12.00.5546.0.v1 (supported for all editions, and all AWS Regions)

  • 12.00.5000.0.v1 (supported for all editions, and all AWS Regions)

  • 12.00.4422.0.v1 (supported for all editions except Enterprise Edition, and all AWS Regions except ca-central-1 and eu-west-2)

Microsoft SQL Server 2012

  • 11.00.6594.0.v1 (supported for all editions, and all AWS Regions)

  • 11.00.6020.0.v1 (supported for all editions, and all AWS Regions)

  • 11.00.5058.0.v1 (supported for all editions, and all AWS Regions except us-east-2, ca-central-1, and eu-west-2)

  • 11.00.2100.60.v1 (supported for all editions, and all AWS Regions except us-east-2, ca-central-1, and eu-west-2)

Microsoft SQL Server 2008 R2

  • 10.50.6529.0.v1 (supported for all editions, and all AWS Regions except us-east-2, ca-central-1, and eu-west-2)

  • 10.50.6000.34.v1 (supported for all editions, and all AWS Regions except us-east-2, ca-central-1, and eu-west-2)

  • 10.50.2789.0.v1 (supported for all editions, and all AWS Regions except us-east-2, ca-central-1, and eu-west-2)

MySQL

  • 5.7.21 (supported in all AWS regions)

  • 5.7.19 (supported in all AWS regions)

  • 5.7.17 (supported in all AWS regions)

  • 5.7.16 (supported in all AWS regions)

  • 5.6.39 (supported in all AWS Regions)

  • 5.6.37 (supported in all AWS Regions)

  • 5.6.35 (supported in all AWS Regions)

  • 5.6.34 (supported in all AWS Regions)

  • 5.6.29 (supported in all AWS Regions)

  • 5.6.27 (supported in all AWS Regions except us-east-2, ca-central-1, eu-west-2)

  • 5.5.59 (supported in all AWS Regions)

  • 5.5.57 (supported in all AWS Regions)

  • 5.5.54 (supported in all AWS Regions)

  • 5.5.53 (supported in all AWS Regions)

  • 5.5.46 (supported in all AWS Regions)

Oracle 12c

  • 12.1.0.2.v9 (supported for EE in all AWS regions, and SE2 in all AWS regions except us-gov-west-1)

  • 12.1.0.2.v8 (supported for EE in all AWS regions, and SE2 in all AWS regions except us-gov-west-1)

  • 12.1.0.2.v7 (supported for EE in all AWS regions, and SE2 in all AWS regions except us-gov-west-1)

  • 12.1.0.2.v6 (supported for EE in all AWS regions, and SE2 in all AWS regions except us-gov-west-1)

  • 12.1.0.2.v5 (supported for EE in all AWS regions, and SE2 in all AWS regions except us-gov-west-1)

  • 12.1.0.2.v4 (supported for EE in all AWS regions, and SE2 in all AWS regions except us-gov-west-1)

  • 12.1.0.2.v3 (supported for EE in all AWS regions, and SE2 in all AWS regions except us-gov-west-1)

  • 12.1.0.2.v2 (supported for EE in all AWS regions, and SE2 in all AWS regions except us-gov-west-1)

  • 12.1.0.2.v1 (supported for EE in all AWS regions, and SE2 in all AWS regions except us-gov-west-1)

Oracle 11g

  • 11.2.0.4.v13 (supported for EE, SE1, and SE, in all AWS regions)

  • 11.2.0.4.v12 (supported for EE, SE1, and SE, in all AWS regions)

  • 11.2.0.4.v11 (supported for EE, SE1, and SE, in all AWS regions)

  • 11.2.0.4.v10 (supported for EE, SE1, and SE, in all AWS regions)

  • 11.2.0.4.v9 (supported for EE, SE1, and SE, in all AWS regions)

  • 11.2.0.4.v8 (supported for EE, SE1, and SE, in all AWS regions)

  • 11.2.0.4.v7 (supported for EE, SE1, and SE, in all AWS regions)

  • 11.2.0.4.v6 (supported for EE, SE1, and SE, in all AWS regions)

  • 11.2.0.4.v5 (supported for EE, SE1, and SE, in all AWS regions)

  • 11.2.0.4.v4 (supported for EE, SE1, and SE, in all AWS regions)

  • 11.2.0.4.v3 (supported for EE, SE1, and SE, in all AWS regions)

  • 11.2.0.4.v1 (supported for EE, SE1, and SE, in all AWS regions)

PostgreSQL

  • Version 10.1

  • Version 9.6.x: 9.6.6 | 9.6.5 | 9.6.3 | 9.6.2 | 9.6.1

  • Version 9.5.x: 9.5.9 | 9.5.7 | 9.5.6 | 9.5.4 | 9.5.2

  • Version 9.4.x: 9.4.14 | 9.4.12 | 9.4.11 | 9.4.9 | 9.4.7

  • Version 9.3.x: 9.3.19 | 9.3.17 | 9.3.16 | 9.3.14 | 9.3.12

" + "documentation":"

The version number of the database engine to use.

For a list of valid engine versions, call DescribeDBEngineVersions.

The following are the database engines and links to information about the major and minor versions that are available with Amazon RDS. Not every database engine is available for every AWS Region.

Amazon Aurora

Not applicable. The version number of the database engine to be used by the DB instance is managed by the DB cluster. For more information, see CreateDBCluster.

MariaDB

See MariaDB on Amazon RDS Versions in the Amazon RDS User Guide.

Microsoft SQL Server

See Version and Feature Support on Amazon RDS in the Amazon RDS User Guide.

MySQL

See MySQL on Amazon RDS Versions in the Amazon RDS User Guide.

Oracle

See Oracle Database Engine Release Notes in the Amazon RDS User Guide.

PostgreSQL

See Supported PostgreSQL Database Versions in the Amazon RDS User Guide.

" }, "AutoMinorVersionUpgrade":{ "shape":"BooleanOptional", @@ -2501,15 +2598,23 @@ }, "EnablePerformanceInsights":{ "shape":"BooleanOptional", - "documentation":"

True to enable Performance Insights for the DB instance, and otherwise false.

" + "documentation":"

True to enable Performance Insights for the DB instance, and otherwise false.

For more information, see Using Amazon Performance Insights in the Amazon Relational Database Service User Guide.

" }, "PerformanceInsightsKMSKeyId":{ "shape":"String", "documentation":"

The AWS KMS key identifier for encryption of Performance Insights data. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.

" }, + "PerformanceInsightsRetentionPeriod":{ + "shape":"IntegerOptional", + "documentation":"

The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).

" + }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", "documentation":"

The list of log types that need to be enabled for exporting to CloudWatch Logs.

" + }, + "ProcessorFeatures":{ + "shape":"ProcessorFeatureList", + "documentation":"

The number of CPU cores and the number of threads per core for the DB instance class of the DB instance.

" } }, "documentation":"

" @@ -2531,7 +2636,7 @@ }, "DBInstanceClass":{ "shape":"String", - "documentation":"

The compute and memory capacity of the Read Replica, for example, db.m4.large. Not all DB instance classes are available in all AWS Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide.

Default: Inherits from the source DB instance.

" + "documentation":"

The compute and memory capacity of the Read Replica, for example, db.m4.large. Not all DB instance classes are available in all AWS Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide.

Default: Inherits from the source DB instance.

" }, "AvailabilityZone":{ "shape":"String", @@ -2596,15 +2701,27 @@ }, "EnablePerformanceInsights":{ "shape":"BooleanOptional", - "documentation":"

True to enable Performance Insights for the read replica, and otherwise false.

" + "documentation":"

True to enable Performance Insights for the read replica, and otherwise false.

For more information, see Using Amazon Performance Insights in the Amazon Relational Database Service User Guide.

" }, "PerformanceInsightsKMSKeyId":{ "shape":"String", "documentation":"

The AWS KMS key identifier for encryption of Performance Insights data. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.

" }, + "PerformanceInsightsRetentionPeriod":{ + "shape":"IntegerOptional", + "documentation":"

The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).

" + }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", "documentation":"

The list of logs that the new DB instance is to export to CloudWatch Logs.

" + }, + "ProcessorFeatures":{ + "shape":"ProcessorFeatureList", + "documentation":"

The number of CPU cores and the number of threads per core for the DB instance class of the DB instance.

" + }, + "UseDefaultProcessorFeatures":{ + "shape":"BooleanOptional", + "documentation":"

A value that specifies that the DB instance class of the DB instance uses its default processor features.

" } } }, @@ -2634,7 +2751,7 @@ }, "DBParameterGroupFamily":{ "shape":"String", - "documentation":"

The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a database engine and engine version compatible with that DB parameter group family.

" + "documentation":"

The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a database engine and engine version compatible with that DB parameter group family.

To list all of the available parameter group families, use the following command:

aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\"

The output contains duplicates.

" }, "Description":{ "shape":"String", @@ -2851,7 +2968,7 @@ }, "EarliestRestorableTime":{ "shape":"TStamp", - "documentation":"

Specifies the earliest time to which a database can be restored with point-in-time restore.

" + "documentation":"

The earliest time to which a database can be restored with point-in-time restore.

" }, "Endpoint":{ "shape":"String", @@ -2948,6 +3065,22 @@ "ClusterCreateTime":{ "shape":"TStamp", "documentation":"

Specifies the time when the DB cluster was created, in Universal Coordinated Time (UTC).

" + }, + "EarliestBacktrackTime":{ + "shape":"TStamp", + "documentation":"

The earliest time to which a DB cluster can be backtracked.

" + }, + "BacktrackWindow":{ + "shape":"LongOptional", + "documentation":"

The target backtrack window, in seconds. If this value is set to 0, backtracking is disabled for the DB cluster. Otherwise, backtracking is enabled.

" + }, + "BacktrackConsumedChangeRecords":{ + "shape":"LongOptional", + "documentation":"

The number of change records stored for Backtrack.

" + }, + "EnabledCloudwatchLogsExports":{ + "shape":"LogTypeList", + "documentation":"

A list of log types that this DB cluster is configured to export to CloudWatch Logs.

" } }, "documentation":"

Contains the details of an Amazon RDS DB cluster.

This data type is used as a response element in the DescribeDBClusters action.

", @@ -2957,7 +3090,7 @@ "type":"structure", "members":{ }, - "documentation":"

User already has a DB cluster with the given identifier.

", + "documentation":"

The user already has a DB cluster with the given identifier.

", "error":{ "code":"DBClusterAlreadyExistsFault", "httpStatusCode":400, @@ -2965,6 +3098,69 @@ }, "exception":true }, + "DBClusterBacktrack":{ + "type":"structure", + "members":{ + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

Contains a user-supplied DB cluster identifier. This identifier is the unique key that identifies a DB cluster.

" + }, + "BacktrackIdentifier":{ + "shape":"String", + "documentation":"

Contains the backtrack identifier.

" + }, + "BacktrackTo":{ + "shape":"TStamp", + "documentation":"

The timestamp of the time to which the DB cluster was backtracked.

" + }, + "BacktrackedFrom":{ + "shape":"TStamp", + "documentation":"

The timestamp of the time from which the DB cluster was backtracked.

" + }, + "BacktrackRequestCreationTime":{ + "shape":"TStamp", + "documentation":"

The timestamp of the time at which the backtrack was requested.

" + }, + "Status":{ + "shape":"String", + "documentation":"

The status of the backtrack. This property returns one of the following values:

  • applying - The backtrack is currently being applied to or rolled back from the DB cluster.

  • completed - The backtrack has successfully been applied to or rolled back from the DB cluster.

  • failed - An error occurred while the backtrack was applied to or rolled back from the DB cluster.

  • pending - The backtrack is currently pending application to or rollback from the DB cluster.

" + } + }, + "documentation":"

This data type is used as a response element in the DescribeDBClusterBacktracks action.

" + }, + "DBClusterBacktrackList":{ + "type":"list", + "member":{ + "shape":"DBClusterBacktrack", + "locationName":"DBClusterBacktrack" + } + }, + "DBClusterBacktrackMessage":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "documentation":"

A pagination token that can be used in a subsequent DescribeDBClusterBacktracks request.

" + }, + "DBClusterBacktracks":{ + "shape":"DBClusterBacktrackList", + "documentation":"

Contains a list of backtracks for the user.

" + } + }, + "documentation":"

Contains the result of a successful invocation of the DescribeDBClusterBacktracks action.

" + }, + "DBClusterBacktrackNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

BacktrackIdentifier doesn't refer to an existing backtrack.

", + "error":{ + "code":"DBClusterBacktrackNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, "DBClusterList":{ "type":"list", "member":{ @@ -3020,7 +3216,7 @@ "type":"structure", "members":{ }, - "documentation":"

DBClusterIdentifier does not refer to an existing DB cluster.

", + "documentation":"

DBClusterIdentifier doesn't refer to an existing DB cluster.

", "error":{ "code":"DBClusterNotFoundFault", "httpStatusCode":404, @@ -3107,7 +3303,7 @@ "type":"structure", "members":{ }, - "documentation":"

DBClusterParameterGroupName does not refer to an existing DB Cluster parameter group.

", + "documentation":"

DBClusterParameterGroupName doesn't refer to an existing DB cluster parameter group.

", "error":{ "code":"DBClusterParameterGroupNotFound", "httpStatusCode":404, @@ -3133,7 +3329,7 @@ "type":"structure", "members":{ }, - "documentation":"

User attempted to create a new DB cluster and the user has already reached the maximum allowed DB cluster quota.

", + "documentation":"

The user attempted to create a new DB cluster and the user has already reached the maximum allowed DB cluster quota.

", "error":{ "code":"DBClusterQuotaExceededFault", "httpStatusCode":403, @@ -3171,7 +3367,7 @@ "type":"structure", "members":{ }, - "documentation":"

The specified IAM role Amazon Resource Name (ARN) is not associated with the specified DB cluster.

", + "documentation":"

The specified IAM role Amazon Resource Name (ARN) isn't associated with the specified DB cluster.

", "error":{ "code":"DBClusterRoleNotFound", "httpStatusCode":404, @@ -3289,7 +3485,7 @@ "type":"structure", "members":{ }, - "documentation":"

User already has a DB cluster snapshot with the given identifier.

", + "documentation":"

The user already has a DB cluster snapshot with the given identifier.

", "error":{ "code":"DBClusterSnapshotAlreadyExistsFault", "httpStatusCode":400, @@ -3358,7 +3554,7 @@ "type":"structure", "members":{ }, - "documentation":"

DBClusterSnapshotIdentifier does not refer to an existing DB cluster snapshot.

", + "documentation":"

DBClusterSnapshotIdentifier doesn't refer to an existing DB cluster snapshot.

", "error":{ "code":"DBClusterSnapshotNotFoundFault", "httpStatusCode":404, @@ -3648,9 +3844,17 @@ "shape":"String", "documentation":"

The AWS KMS key identifier for encryption of Performance Insights data. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.

" }, + "PerformanceInsightsRetentionPeriod":{ + "shape":"IntegerOptional", + "documentation":"

The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).

" + }, "EnabledCloudwatchLogsExports":{ "shape":"LogTypeList", "documentation":"

A list of log types that this DB instance is configured to export to CloudWatch Logs.

" + }, + "ProcessorFeatures":{ + "shape":"ProcessorFeatureList", + "documentation":"

The number of CPU cores and the number of threads per core for the DB instance class of the DB instance.

" } }, "documentation":"

Contains the details of an Amazon RDS DB instance.

This data type is used as a response element in the DescribeDBInstances action.

", @@ -3660,7 +3864,7 @@ "type":"structure", "members":{ }, - "documentation":"

User already has a DB instance with the given identifier.

", + "documentation":"

The user already has a DB instance with the given identifier.

", "error":{ "code":"DBInstanceAlreadyExists", "httpStatusCode":400, @@ -3693,7 +3897,7 @@ "type":"structure", "members":{ }, - "documentation":"

DBInstanceIdentifier does not refer to an existing DB instance.

", + "documentation":"

DBInstanceIdentifier doesn't refer to an existing DB instance.

", "error":{ "code":"DBInstanceNotFound", "httpStatusCode":404, @@ -3734,7 +3938,7 @@ "type":"structure", "members":{ }, - "documentation":"

LogFileName does not refer to an existing DB log file.

", + "documentation":"

LogFileName doesn't refer to an existing DB log file.

", "error":{ "code":"DBLogFileNotFoundFault", "httpStatusCode":404, @@ -3812,7 +4016,7 @@ "type":"structure", "members":{ }, - "documentation":"

DBParameterGroupName does not refer to an existing DB parameter group.

", + "documentation":"

DBParameterGroupName doesn't refer to an existing DB parameter group.

", "error":{ "code":"DBParameterGroupNotFound", "httpStatusCode":404, @@ -3824,7 +4028,7 @@ "type":"structure", "members":{ }, - "documentation":"

Request would result in user exceeding the allowed number of DB parameter groups.

", + "documentation":"

The request would result in the user exceeding the allowed number of DB parameter groups.

", "error":{ "code":"DBParameterGroupQuotaExceeded", "httpStatusCode":400, @@ -3960,7 +4164,7 @@ "type":"structure", "members":{ }, - "documentation":"

DBSecurityGroupName does not refer to an existing DB security group.

", + "documentation":"

DBSecurityGroupName doesn't refer to an existing DB security group.

", "error":{ "code":"DBSecurityGroupNotFound", "httpStatusCode":404, @@ -3972,7 +4176,7 @@ "type":"structure", "members":{ }, - "documentation":"

A DB security group is not allowed for this action.

", + "documentation":"

A DB security group isn't allowed for this action.

", "error":{ "code":"DBSecurityGroupNotSupported", "httpStatusCode":400, @@ -3984,7 +4188,7 @@ "type":"structure", "members":{ }, - "documentation":"

Request would result in user exceeding the allowed number of DB security groups.

", + "documentation":"

The request would result in the user exceeding the allowed number of DB security groups.

", "error":{ "code":"QuotaExceeded.DBSecurityGroup", "httpStatusCode":400, @@ -4105,6 +4309,10 @@ "IAMDatabaseAuthenticationEnabled":{ "shape":"Boolean", "documentation":"

True if mapping of AWS Identity and Access Management (IAM) accounts to database accounts is enabled, and otherwise false.

" + }, + "ProcessorFeatures":{ + "shape":"ProcessorFeatureList", + "documentation":"

The number of CPU cores and the number of threads per core for the DB instance class of the DB instance when the DB snapshot was created.

" } }, "documentation":"

Contains the details of an Amazon RDS DB snapshot.

This data type is used as a response element in the DescribeDBSnapshots action.

", @@ -4184,7 +4392,7 @@ "type":"structure", "members":{ }, - "documentation":"

DBSnapshotIdentifier does not refer to an existing DB snapshot.

", + "documentation":"

DBSnapshotIdentifier doesn't refer to an existing DB snapshot.

", "error":{ "code":"DBSnapshotNotFound", "httpStatusCode":404, @@ -4265,7 +4473,7 @@ "type":"structure", "members":{ }, - "documentation":"

Indicates that the DBSubnetGroup should not be specified while creating read replicas that lie in the same region as the source instance.

", + "documentation":"

The DBSubnetGroup shouldn't be specified while creating read replicas that lie in the same region as the source instance.

", "error":{ "code":"DBSubnetGroupNotAllowedFault", "httpStatusCode":400, @@ -4277,7 +4485,7 @@ "type":"structure", "members":{ }, - "documentation":"

DBSubnetGroupName does not refer to an existing DB subnet group.

", + "documentation":"

DBSubnetGroupName doesn't refer to an existing DB subnet group.

", "error":{ "code":"DBSubnetGroupNotFoundFault", "httpStatusCode":404, @@ -4289,7 +4497,7 @@ "type":"structure", "members":{ }, - "documentation":"

Request would result in user exceeding the allowed number of DB subnet groups.

", + "documentation":"

The request would result in the user exceeding the allowed number of DB subnet groups.

", "error":{ "code":"DBSubnetGroupQuotaExceeded", "httpStatusCode":400, @@ -4308,7 +4516,7 @@ "type":"structure", "members":{ }, - "documentation":"

Request would result in user exceeding the allowed number of subnets in a DB subnet groups.

", + "documentation":"

The request would result in the user exceeding the allowed number of subnets in a DB subnet groups.

", "error":{ "code":"DBSubnetQuotaExceededFault", "httpStatusCode":400, @@ -4320,7 +4528,7 @@ "type":"structure", "members":{ }, - "documentation":"

The DB upgrade failed because a resource the DB depends on could not be modified.

", + "documentation":"

The DB upgrade failed because a resource the DB depends on can't be modified.

", "error":{ "code":"DBUpgradeDependencyFailure", "httpStatusCode":400, @@ -4512,6 +4720,33 @@ }, "documentation":"

" }, + "DescribeDBClusterBacktracksMessage":{ + "type":"structure", + "required":["DBClusterIdentifier"], + "members":{ + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

The DB cluster identifier of the DB cluster to be described. This parameter is stored as a lowercase string.

Constraints:

  • Must contain from 1 to 63 alphanumeric characters or hyphens.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

Example: my-cluster1

" + }, + "BacktrackIdentifier":{ + "shape":"String", + "documentation":"

If specified, this value is the backtrack identifier of the backtrack to be described.

Constraints:

Example: 123e4567-e89b-12d3-a456-426655440000

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

A filter that specifies one or more DB clusters to describe. Supported filters include the following:

  • db-cluster-backtrack-id - Accepts backtrack identifiers. The results list includes information about only the backtracks identified by these identifiers.

  • db-cluster-backtrack-status - Accepts any of the following backtrack status values:

    • applying

    • completed

    • failed

    • pending

    The results list includes information about only the backtracks identified by these values. For more information about backtrack status values, see DBClusterBacktrack.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous DescribeDBClusterBacktracks request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + } + }, + "documentation":"

" + }, "DescribeDBClusterParameterGroupsMessage":{ "type":"structure", "members":{ @@ -4655,7 +4890,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

Not currently supported.

" + "documentation":"

This parameter is not currently supported.

" }, "MaxRecords":{ "shape":"IntegerOptional", @@ -4961,7 +5196,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

Not currently supported.

" + "documentation":"

This parameter is not currently supported.

" }, "MaxRecords":{ "shape":"IntegerOptional", @@ -5239,7 +5474,7 @@ }, "ProductDescription":{ "shape":"String", - "documentation":"

Product description filter value. Specify this parameter to show only the available offerings matching the specified product description.

" + "documentation":"

Product description filter value. Specify this parameter to show only the available offerings that contain the specified product description.

The results show offerings that partially match the filter value.

" }, "OfferingType":{ "shape":"String", @@ -5337,7 +5572,7 @@ "type":"structure", "members":{ }, - "documentation":"

Domain does not refer to an existing Active Directory Domain.

", + "documentation":"

Domain doesn't refer to an existing Active Directory domain.

", "error":{ "code":"DomainNotFoundFault", "httpStatusCode":404, @@ -5677,14 +5912,14 @@ "members":{ "Name":{ "shape":"String", - "documentation":"

This parameter is not currently supported.

" + "documentation":"

The name of the filter. Filter names are case-sensitive.

" }, "Values":{ "shape":"FilterValueList", - "documentation":"

This parameter is not currently supported.

" + "documentation":"

One or more filter values. Filter values are case-sensitive.

" } }, - "documentation":"

This type is not currently supported.

" + "documentation":"

A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as IDs. The filters supported by a describe operation are documented with the describe operation.

Currently, wildcards are not supported in filters.

The following actions can be filtered:

" }, "FilterList":{ "type":"list", @@ -5725,7 +5960,7 @@ "type":"structure", "members":{ }, - "documentation":"

Request would result in user exceeding the allowed number of DB instances.

", + "documentation":"

The request would result in the user exceeding the allowed number of DB instances.

", "error":{ "code":"InstanceQuotaExceeded", "httpStatusCode":400, @@ -5737,7 +5972,7 @@ "type":"structure", "members":{ }, - "documentation":"

The DB cluster does not have enough capacity for the current operation.

", + "documentation":"

The DB cluster doesn't have enough capacity for the current operation.

", "error":{ "code":"InsufficientDBClusterCapacityFault", "httpStatusCode":403, @@ -5749,7 +5984,7 @@ "type":"structure", "members":{ }, - "documentation":"

Specified DB instance class is not available in the specified Availability Zone.

", + "documentation":"

The specified DB instance class isn't available in the specified Availability Zone.

", "error":{ "code":"InsufficientDBInstanceCapacity", "httpStatusCode":400, @@ -5761,7 +5996,7 @@ "type":"structure", "members":{ }, - "documentation":"

There is insufficient storage available for the current action. You may be able to resolve this error by updating your subnet group to use different Availability Zones that have more storage available.

", + "documentation":"

There is insufficient storage available for the current action. You might be able to resolve this error by updating your subnet group to use different Availability Zones that have more storage available.

", "error":{ "code":"InsufficientStorageClusterCapacity", "httpStatusCode":400, @@ -5775,7 +6010,7 @@ "type":"structure", "members":{ }, - "documentation":"

The supplied value is not a valid DB cluster snapshot state.

", + "documentation":"

The supplied value isn't a valid DB cluster snapshot state.

", "error":{ "code":"InvalidDBClusterSnapshotStateFault", "httpStatusCode":400, @@ -5787,7 +6022,7 @@ "type":"structure", "members":{ }, - "documentation":"

The DB cluster is not in a valid state.

", + "documentation":"

The DB cluster isn't in a valid state.

", "error":{ "code":"InvalidDBClusterStateFault", "httpStatusCode":400, @@ -5799,7 +6034,7 @@ "type":"structure", "members":{ }, - "documentation":"

The specified DB instance is not in the available state.

", + "documentation":"

The specified DB instance isn't in the available state.

", "error":{ "code":"InvalidDBInstanceState", "httpStatusCode":400, @@ -5811,7 +6046,7 @@ "type":"structure", "members":{ }, - "documentation":"

The DB parameter group is in use or is in an invalid state. If you are attempting to delete the parameter group, you cannot delete it when the parameter group is in this state.

", + "documentation":"

The DB parameter group is in use or is in an invalid state. If you are attempting to delete the parameter group, you can't delete it when the parameter group is in this state.

", "error":{ "code":"InvalidDBParameterGroupState", "httpStatusCode":400, @@ -5823,7 +6058,7 @@ "type":"structure", "members":{ }, - "documentation":"

The state of the DB security group does not allow deletion.

", + "documentation":"

The state of the DB security group doesn't allow deletion.

", "error":{ "code":"InvalidDBSecurityGroupState", "httpStatusCode":400, @@ -5835,7 +6070,7 @@ "type":"structure", "members":{ }, - "documentation":"

The state of the DB snapshot does not allow deletion.

", + "documentation":"

The state of the DB snapshot doesn't allow deletion.

", "error":{ "code":"InvalidDBSnapshotState", "httpStatusCode":400, @@ -5847,7 +6082,7 @@ "type":"structure", "members":{ }, - "documentation":"

Indicates the DBSubnetGroup does not belong to the same VPC as that of an existing cross region read replica of the same source instance.

", + "documentation":"

The DBSubnetGroup doesn't belong to the same VPC as that of an existing cross-region read replica of the same source instance.

", "error":{ "code":"InvalidDBSubnetGroupFault", "httpStatusCode":400, @@ -5859,7 +6094,7 @@ "type":"structure", "members":{ }, - "documentation":"

The DB subnet group cannot be deleted because it is in use.

", + "documentation":"

The DB subnet group cannot be deleted because it's in use.

", "error":{ "code":"InvalidDBSubnetGroupStateFault", "httpStatusCode":400, @@ -5871,7 +6106,7 @@ "type":"structure", "members":{ }, - "documentation":"

The DB subnet is not in the available state.

", + "documentation":"

The DB subnet isn't in the available state.

", "error":{ "code":"InvalidDBSubnetStateFault", "httpStatusCode":400, @@ -5895,7 +6130,7 @@ "type":"structure", "members":{ }, - "documentation":"

The option group is not in the available state.

", + "documentation":"

The option group isn't in the available state.

", "error":{ "code":"InvalidOptionGroupStateFault", "httpStatusCode":400, @@ -5907,7 +6142,7 @@ "type":"structure", "members":{ }, - "documentation":"

Cannot restore from vpc backup to non-vpc DB instance.

", + "documentation":"

Cannot restore from VPC backup to non-VPC DB instance.

", "error":{ "code":"InvalidRestoreFault", "httpStatusCode":400, @@ -5919,7 +6154,7 @@ "type":"structure", "members":{ }, - "documentation":"

The specified Amazon S3 bucket name could not be found or Amazon RDS is not authorized to access the specified Amazon S3 bucket. Verify the SourceS3BucketName and S3IngestionRoleArn values and try again.

", + "documentation":"

The specified Amazon S3 bucket name can't be found or Amazon RDS isn't authorized to access the specified Amazon S3 bucket. Verify the SourceS3BucketName and S3IngestionRoleArn values and try again.

", "error":{ "code":"InvalidS3BucketFault", "httpStatusCode":400, @@ -5943,7 +6178,7 @@ "type":"structure", "members":{ }, - "documentation":"

DB subnet group does not cover all Availability Zones after it is created because users' change.

", + "documentation":"

The DB subnet group doesn't cover all Availability Zones after it's created because of users' change.

", "error":{ "code":"InvalidVPCNetworkStateFault", "httpStatusCode":400, @@ -5955,7 +6190,7 @@ "type":"structure", "members":{ }, - "documentation":"

Error accessing KMS key.

", + "documentation":"

An error occurred accessing an AWS KMS key.

", "error":{ "code":"KMSKeyNotAccessibleFault", "httpStatusCode":400, @@ -5987,6 +6222,7 @@ "member":{"shape":"String"} }, "Long":{"type":"long"}, + "LongOptional":{"type":"long"}, "ModifyDBClusterMessage":{ "type":"structure", "required":["DBClusterIdentifier"], @@ -6039,9 +6275,17 @@ "shape":"BooleanOptional", "documentation":"

True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts, and otherwise false.

Default: false

" }, + "BacktrackWindow":{ + "shape":"LongOptional", + "documentation":"

The target backtrack window, in seconds. To disable backtracking, set this value to 0.

Default: 0

Constraints:

  • If specified, this value must be set to a number from 0 to 259,200 (72 hours).

" + }, + "CloudwatchLogsExportConfiguration":{ + "shape":"CloudwatchLogsExportConfiguration", + "documentation":"

The configuration setting for the log types to be enabled for export to CloudWatch Logs for a specific DB cluster.

" + }, "EngineVersion":{ "shape":"String", - "documentation":"

The version number of the database engine to which you want to upgrade. Changing this parameter results in an outage. The change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true.

For a list of valid engine versions, see CreateDBInstance, or call DescribeDBEngineVersions.

" + "documentation":"

The version number of the database engine to which you want to upgrade. Changing this parameter results in an outage. The change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true.

For a list of valid engine versions, see CreateDBCluster, or call DescribeDBEngineVersions.

" } }, "documentation":"

" @@ -6116,7 +6360,7 @@ }, "DBInstanceClass":{ "shape":"String", - "documentation":"

The new compute and memory capacity of the DB instance, for example, db.m4.large. Not all DB instance classes are available in all AWS Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide.

If you modify the DB instance class, an outage occurs during the change. The change is applied during the next maintenance window, unless ApplyImmediately is specified as true for this request.

Default: Uses existing setting

" + "documentation":"

The new compute and memory capacity of the DB instance, for example, db.m4.large. Not all DB instance classes are available in all AWS Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide.

If you modify the DB instance class, an outage occurs during the change. The change is applied during the next maintenance window, unless ApplyImmediately is specified as true for this request.

Default: Uses existing setting

" }, "DBSubnetGroupName":{ "shape":"String", @@ -6160,7 +6404,7 @@ }, "EngineVersion":{ "shape":"String", - "documentation":"

The version number of the database engine to upgrade to. Changing this parameter results in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

For major version upgrades, if a nondefault DB parameter group is currently in use, a new DB parameter group in the DB parameter group family for the new engine version must be specified. The new DB parameter group can be the default for that DB parameter group family.

For a list of valid engine versions, see CreateDBInstance.

" + "documentation":"

The version number of the database engine to upgrade to. Changing this parameter results in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

For major version upgrades, if a nondefault DB parameter group is currently in use, a new DB parameter group in the DB parameter group family for the new engine version must be specified. The new DB parameter group can be the default for that DB parameter group family.

For information about valid engine versions, see CreateDBInstance, or call DescribeDBEngineVersions.

" }, "AllowMajorVersionUpgrade":{ "shape":"Boolean", @@ -6240,15 +6484,27 @@ }, "EnablePerformanceInsights":{ "shape":"BooleanOptional", - "documentation":"

True to enable Performance Insights for the DB instance, and otherwise false.

" + "documentation":"

True to enable Performance Insights for the DB instance, and otherwise false.

For more information, see Using Amazon Performance Insights in the Amazon Relational Database Service User Guide.

" }, "PerformanceInsightsKMSKeyId":{ "shape":"String", "documentation":"

The AWS KMS key identifier for encryption of Performance Insights data. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.

" }, + "PerformanceInsightsRetentionPeriod":{ + "shape":"IntegerOptional", + "documentation":"

The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).

" + }, "CloudwatchLogsExportConfiguration":{ "shape":"CloudwatchLogsExportConfiguration", - "documentation":"

The configuration setting for the log types to be enabled for export to CloudWatch Logs for a specific DB instance or DB cluster.

" + "documentation":"

The configuration setting for the log types to be enabled for export to CloudWatch Logs for a specific DB instance.

" + }, + "ProcessorFeatures":{ + "shape":"ProcessorFeatureList", + "documentation":"

The number of CPU cores and the number of threads per core for the DB instance class of the DB instance.

" + }, + "UseDefaultProcessorFeatures":{ + "shape":"BooleanOptional", + "documentation":"

A value that specifies that the DB instance class of the DB instance uses its default processor features.

" } }, "documentation":"

" @@ -6930,6 +7186,10 @@ "MaxIopsPerGib":{ "shape":"DoubleOptional", "documentation":"

Maximum provisioned IOPS per GiB for a DB instance.

" + }, + "AvailableProcessorFeatures":{ + "shape":"AvailableProcessorFeatureList", + "documentation":"

A list of the available processor features for the DB instance class of a DB instance.

" } }, "documentation":"

Contains a list of available options for a DB instance.

This data type is used as a response element in the DescribeOrderableDBInstanceOptions action.

", @@ -7136,7 +7396,11 @@ "shape":"String", "documentation":"

The new DB subnet group for the DB instance.

" }, - "PendingCloudwatchLogsExports":{"shape":"PendingCloudwatchLogsExports"} + "PendingCloudwatchLogsExports":{"shape":"PendingCloudwatchLogsExports"}, + "ProcessorFeatures":{ + "shape":"ProcessorFeatureList", + "documentation":"

The number of CPU cores and the number of threads per core for the DB instance class of the DB instance.

" + } }, "documentation":"

This data type is used as a response element in the ModifyDBInstance action.

" }, @@ -7152,6 +7416,27 @@ }, "exception":true }, + "ProcessorFeature":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"String", + "documentation":"

The name of the processor feature. Valid names are coreCount and threadsPerCore.

" + }, + "Value":{ + "shape":"String", + "documentation":"

The value of a processor feature name.

" + } + }, + "documentation":"

Contains the processor features of a DB instance class.

To specify the number of CPU cores, use the coreCount feature name for the Name parameter. To specify the number of threads per core, use the threadsPerCore feature name for the Name parameter.

You can set the processor features of the DB instance class for a DB instance when you call one of the following actions:

You can view the valid processor values for a particular instance class by calling the DescribeOrderableDBInstanceOptions action and specifying the instance class for the DBInstanceClass parameter.

In addition, you can use the following actions for DB instance class processor information:

For more information, see Configuring the Processor of the DB Instance Class in the Amazon RDS User Guide.

" + }, + "ProcessorFeatureList":{ + "type":"list", + "member":{ + "shape":"ProcessorFeature", + "locationName":"ProcessorFeature" + } + }, "PromoteReadReplicaDBClusterMessage":{ "type":"structure", "required":["DBClusterIdentifier"], @@ -7758,6 +8043,14 @@ "S3IngestionRoleArn":{ "shape":"String", "documentation":"

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that authorizes Amazon RDS to access the Amazon S3 bucket on your behalf.

" + }, + "BacktrackWindow":{ + "shape":"LongOptional", + "documentation":"

The target backtrack window, in seconds. To disable backtracking, set this value to 0.

Default: 0

Constraints:

  • If specified, this value must be set to a number from 0 to 259,200 (72 hours).

" + }, + "EnableCloudwatchLogsExports":{ + "shape":"LogTypeList", + "documentation":"

The list of logs that the restored DB cluster is to export to CloudWatch Logs.

" } } }, @@ -7826,6 +8119,14 @@ "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", "documentation":"

True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts, and otherwise false.

Default: false

" + }, + "BacktrackWindow":{ + "shape":"LongOptional", + "documentation":"

The target backtrack window, in seconds. To disable backtracking, set this value to 0.

Default: 0

Constraints:

  • If specified, this value must be set to a number from 0 to 259,200 (72 hours).

" + }, + "EnableCloudwatchLogsExports":{ + "shape":"LogTypeList", + "documentation":"

The list of logs that the restored DB cluster is to export to CloudWatch Logs.

" } }, "documentation":"

" @@ -7865,7 +8166,7 @@ }, "Port":{ "shape":"IntegerOptional", - "documentation":"

The port number on which the new DB cluster accepts connections.

Constraints: Value must be 1150-65535

Default: The same port as the original DB cluster.

" + "documentation":"

The port number on which the new DB cluster accepts connections.

Constraints: A value from 1150-65535.

Default: The default port for the engine.

" }, "DBSubnetGroupName":{ "shape":"String", @@ -7887,6 +8188,14 @@ "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", "documentation":"

True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts, and otherwise false.

Default: false

" + }, + "BacktrackWindow":{ + "shape":"LongOptional", + "documentation":"

The target backtrack window, in seconds. To disable backtracking, set this value to 0.

Default: 0

Constraints:

  • If specified, this value must be set to a number from 0 to 259,200 (72 hours).

" + }, + "EnableCloudwatchLogsExports":{ + "shape":"LogTypeList", + "documentation":"

The list of logs that the restored DB cluster is to export to CloudWatch Logs.

" } }, "documentation":"

" @@ -7914,7 +8223,7 @@ }, "DBInstanceClass":{ "shape":"String", - "documentation":"

The compute and memory capacity of the Amazon RDS DB instance, for example, db.m4.large. Not all DB instance classes are available in all AWS Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide.

Default: The same DBInstanceClass as the original DB instance.

" + "documentation":"

The compute and memory capacity of the Amazon RDS DB instance, for example, db.m4.large. Not all DB instance classes are available in all AWS Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide.

Default: The same DBInstanceClass as the original DB instance.

" }, "Port":{ "shape":"IntegerOptional", @@ -7992,6 +8301,14 @@ "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", "documentation":"

The list of logs that the restored DB instance is to export to CloudWatch Logs.

" + }, + "ProcessorFeatures":{ + "shape":"ProcessorFeatureList", + "documentation":"

The number of CPU cores and the number of threads per core for the DB instance class of the DB instance.

" + }, + "UseDefaultProcessorFeatures":{ + "shape":"BooleanOptional", + "documentation":"

A value that specifies that the DB instance class of the DB instance uses its default processor features.

" } }, "documentation":"

" @@ -8028,7 +8345,7 @@ }, "DBInstanceClass":{ "shape":"String", - "documentation":"

The compute and memory capacity of the DB instance, for example, db.m4.large. Not all DB instance classes are available in all AWS Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide.

Importing from Amazon S3 is not supported on the db.t2.micro DB instance class.

" + "documentation":"

The compute and memory capacity of the DB instance, for example, db.m4.large. Not all DB instance classes are available in all AWS Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide.

Importing from Amazon S3 is not supported on the db.t2.micro DB instance class.

" }, "Engine":{ "shape":"String", @@ -8084,7 +8401,7 @@ }, "EngineVersion":{ "shape":"String", - "documentation":"

The version number of the database engine to use. Choose the latest minor version of your database engine as specified in CreateDBInstance.

" + "documentation":"

The version number of the database engine to use. Choose the latest minor version of your database engine. For information about engine versions, see CreateDBInstance, or call DescribeDBEngineVersions.

" }, "AutoMinorVersionUpgrade":{ "shape":"BooleanOptional", @@ -8160,15 +8477,27 @@ }, "EnablePerformanceInsights":{ "shape":"BooleanOptional", - "documentation":"

True to enable Performance Insights for the DB instance, and otherwise false.

" + "documentation":"

True to enable Performance Insights for the DB instance, and otherwise false.

For more information, see Using Amazon Performance Insights in the Amazon Relational Database Service User Guide.

" }, "PerformanceInsightsKMSKeyId":{ "shape":"String", "documentation":"

The AWS KMS key identifier for encryption of Performance Insights data. The KMS key ID is the Amazon Resource Name (ARN), the KMS key identifier, or the KMS key alias for the KMS encryption key.

" }, + "PerformanceInsightsRetentionPeriod":{ + "shape":"IntegerOptional", + "documentation":"

The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).

" + }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", "documentation":"

The list of logs that the restored DB instance is to export to CloudWatch Logs.

" + }, + "ProcessorFeatures":{ + "shape":"ProcessorFeatureList", + "documentation":"

The number of CPU cores and the number of threads per core for the DB instance class of the DB instance.

" + }, + "UseDefaultProcessorFeatures":{ + "shape":"BooleanOptional", + "documentation":"

A value that specifies that the DB instance class of the DB instance uses its default processor features.

" } } }, @@ -8203,7 +8532,7 @@ }, "DBInstanceClass":{ "shape":"String", - "documentation":"

The compute and memory capacity of the Amazon RDS DB instance, for example, db.m4.large. Not all DB instance classes are available in all AWS Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide.

Default: The same DBInstanceClass as the original DB instance.

" + "documentation":"

The compute and memory capacity of the Amazon RDS DB instance, for example, db.m4.large. Not all DB instance classes are available in all AWS Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide.

Default: The same DBInstanceClass as the original DB instance.

" }, "Port":{ "shape":"IntegerOptional", @@ -8281,6 +8610,14 @@ "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", "documentation":"

The list of logs that the restored DB instance is to export to CloudWatch Logs.

" + }, + "ProcessorFeatures":{ + "shape":"ProcessorFeatureList", + "documentation":"

The number of CPU cores and the number of threads per core for the DB instance class of the DB instance.

" + }, + "UseDefaultProcessorFeatures":{ + "shape":"BooleanOptional", + "documentation":"

A value that specifies that the DB instance class of the DB instance uses its default processor features.

" } }, "documentation":"

" @@ -8376,7 +8713,7 @@ "type":"structure", "members":{ }, - "documentation":"

Request would result in user exceeding the allowed number of DB snapshots.

", + "documentation":"

The request would result in the user exceeding the allowed number of DB snapshots.

", "error":{ "code":"SnapshotQuotaExceeded", "httpStatusCode":400, @@ -8493,7 +8830,7 @@ "type":"structure", "members":{ }, - "documentation":"

Request would result in user exceeding the allowed amount of storage available across all DB instances.

", + "documentation":"

The request would result in the user exceeding the allowed amount of storage available across all DB instances.

", "error":{ "code":"StorageQuotaExceeded", "httpStatusCode":400, @@ -8505,7 +8842,7 @@ "type":"structure", "members":{ }, - "documentation":"

StorageType specified cannot be associated with the DB Instance.

", + "documentation":"

Storage of the StorageType specified can't be associated with the DB instance.

", "error":{ "code":"StorageTypeNotSupported", "httpStatusCode":400, @@ -8680,6 +9017,10 @@ "Storage":{ "shape":"ValidStorageOptionsList", "documentation":"

Valid storage options for your DB instance.

" + }, + "ValidProcessorFeatures":{ + "shape":"AvailableProcessorFeatureList", + "documentation":"

Valid processor features for your DB instance.

" } }, "documentation":"

Information about valid modifications that you can make to your DB instance. Contains the result of a successful call to the DescribeValidDBInstanceModifications action. You can use this information when you call ModifyDBInstance.

", diff --git a/botocore/data/redshift/2012-12-01/service-2.json b/botocore/data/redshift/2012-12-01/service-2.json index 076bf413..f184f78a 100644 --- a/botocore/data/redshift/2012-12-01/service-2.json +++ b/botocore/data/redshift/2012-12-01/service-2.json @@ -11,6 +11,28 @@ "xmlNamespace":"http://redshift.amazonaws.com/doc/2012-12-01/" }, "operations":{ + "AcceptReservedNodeExchange":{ + "name":"AcceptReservedNodeExchange", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AcceptReservedNodeExchangeInputMessage"}, + "output":{ + "shape":"AcceptReservedNodeExchangeOutputMessage", + "resultWrapper":"AcceptReservedNodeExchangeResult" + }, + "errors":[ + {"shape":"ReservedNodeNotFoundFault"}, + {"shape":"InvalidReservedNodeStateFault"}, + {"shape":"ReservedNodeAlreadyMigratedFault"}, + {"shape":"ReservedNodeOfferingNotFoundFault"}, + {"shape":"UnsupportedOperationFault"}, + {"shape":"DependentServiceUnavailableFault"}, + {"shape":"ReservedNodeAlreadyExistsFault"} + ], + "documentation":"

Exchanges a DC1 Reserved Node for a DC2 Reserved Node with no changes to the configuration (term, payment type, or number of nodes) and no additional costs.

" + }, "AuthorizeClusterSecurityGroupIngress":{ "name":"AuthorizeClusterSecurityGroupIngress", "http":{ @@ -102,7 +124,7 @@ {"shape":"LimitExceededFault"}, {"shape":"DependentServiceRequestThrottlingFault"} ], - "documentation":"

Creates a new cluster.

To create the cluster in Virtual Private Cloud (VPC), you must provide a cluster subnet group name. The cluster subnet group identifies the subnets of your VPC that Amazon Redshift uses when creating the cluster. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

" + "documentation":"

Creates a new cluster.

To create a cluster in Virtual Private Cloud (VPC), you must provide a cluster subnet group name. The cluster subnet group identifies the subnets of your VPC that Amazon Redshift uses when creating the cluster. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

" }, "CreateClusterParameterGroup":{ "name":"CreateClusterParameterGroup", @@ -283,7 +305,7 @@ {"shape":"ResourceNotFoundFault"}, {"shape":"InvalidTagFault"} ], - "documentation":"

Adds one or more tags to a specified resource.

A resource can have up to 10 tags. If you try to create more than 10 tags for a resource, you will receive an error and the attempt will fail.

If you specify a key that already exists for the resource, the value for that key will be updated with the new value.

" + "documentation":"

Adds one or more tags to a specified resource.

A resource can have up to 50 tags. If you try to create more than 50 tags for a resource, you will receive an error and the attempt will fail.

If you specify a key that already exists for the resource, the value for that key will be updated with the new value.

" }, "DeleteCluster":{ "name":"DeleteCluster", @@ -426,6 +448,22 @@ ], "documentation":"

Deletes a tag or tags from a resource. You must provide the ARN of the resource from which you want to delete the tag or tags.

" }, + "DescribeClusterDbRevisions":{ + "name":"DescribeClusterDbRevisions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeClusterDbRevisionsMessage"}, + "output":{ + "shape":"ClusterDbRevisionsMessage", + "resultWrapper":"DescribeClusterDbRevisionsResult" + }, + "errors":[ + {"shape":"ClusterNotFoundFault"} + ], + "documentation":"

Returns an array of ClusterDbRevision objects.

" + }, "DescribeClusterParameterGroups":{ "name":"DescribeClusterParameterGroups", "http":{ @@ -860,6 +898,27 @@ ], "documentation":"

Returns a database user name and temporary password with temporary authorization to log on to an Amazon Redshift database. The action returns the database user name prefixed with IAM: if AutoCreate is False or IAMA: if AutoCreate is True. You can optionally specify one or more database user groups that the user will join at log on. By default, the temporary credentials expire in 900 seconds. You can optionally specify a duration between 900 seconds (15 minutes) and 3600 seconds (60 minutes). For more information, see Using IAM Authentication to Generate Database User Credentials in the Amazon Redshift Cluster Management Guide.

The AWS Identity and Access Management (IAM)user or role that executes GetClusterCredentials must have an IAM policy attached that allows access to all necessary actions and resources. For more information about permissions, see Resource Policies for GetClusterCredentials in the Amazon Redshift Cluster Management Guide.

If the DbGroups parameter is specified, the IAM policy must allow the redshift:JoinGroup action with access to the listed dbgroups.

In addition, if the AutoCreate parameter is set to True, then the policy must include the redshift:CreateClusterUser privilege.

If the DbName parameter is specified, the IAM policy must allow access to the resource dbname for the specified database name.

" }, + "GetReservedNodeExchangeOfferings":{ + "name":"GetReservedNodeExchangeOfferings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetReservedNodeExchangeOfferingsInputMessage"}, + "output":{ + "shape":"GetReservedNodeExchangeOfferingsOutputMessage", + "resultWrapper":"GetReservedNodeExchangeOfferingsResult" + }, + "errors":[ + {"shape":"ReservedNodeNotFoundFault"}, + {"shape":"InvalidReservedNodeStateFault"}, + {"shape":"ReservedNodeAlreadyMigratedFault"}, + {"shape":"ReservedNodeOfferingNotFoundFault"}, + {"shape":"UnsupportedOperationFault"}, + {"shape":"DependentServiceUnavailableFault"} + ], + "documentation":"

Returns an array of ReservedNodeOfferings which is filtered by payment type, term, and instance type.

" + }, "ModifyCluster":{ "name":"ModifyCluster", "http":{ @@ -887,10 +946,29 @@ {"shape":"ClusterAlreadyExistsFault"}, {"shape":"LimitExceededFault"}, {"shape":"DependentServiceRequestThrottlingFault"}, - {"shape":"InvalidElasticIpFault"} + {"shape":"InvalidElasticIpFault"}, + {"shape":"TableLimitExceededFault"} ], "documentation":"

Modifies the settings for a cluster. For example, you can add another security or parameter group, update the preferred maintenance window, or change the master user password. Resetting a cluster password or modifying the security groups associated with a cluster do not need a reboot. However, modifying a parameter group requires a reboot for parameters to take effect. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

You can also change node type and the number of nodes to scale up or down the cluster. When resizing a cluster, you must specify both the number of nodes and the node type even if one of the parameters does not change.

" }, + "ModifyClusterDbRevision":{ + "name":"ModifyClusterDbRevision", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyClusterDbRevisionMessage"}, + "output":{ + "shape":"ModifyClusterDbRevisionResult", + "resultWrapper":"ModifyClusterDbRevisionResult" + }, + "errors":[ + {"shape":"ClusterNotFoundFault"}, + {"shape":"ClusterOnLatestRevisionFault"}, + {"shape":"InvalidClusterStateFault"} + ], + "documentation":"

Modifies the database revision of a cluster. The database revision is a unique revision of the database running in a cluster.

" + }, "ModifyClusterIamRoles":{ "name":"ModifyClusterIamRoles", "http":{ @@ -1156,6 +1234,29 @@ } }, "shapes":{ + "AcceptReservedNodeExchangeInputMessage":{ + "type":"structure", + "required":[ + "ReservedNodeId", + "TargetReservedNodeOfferingId" + ], + "members":{ + "ReservedNodeId":{ + "shape":"String", + "documentation":"

A string representing the identifier of the Reserved Node to be exchanged.

" + }, + "TargetReservedNodeOfferingId":{ + "shape":"String", + "documentation":"

The unique identifier of the Reserved Node offering to be used for the exchange.

" + } + } + }, + "AcceptReservedNodeExchangeOutputMessage":{ + "type":"structure", + "members":{ + "ExchangedReservedNode":{"shape":"ReservedNode"} + } + }, "AccessToSnapshotDeniedFault":{ "type":"structure", "members":{ @@ -1449,6 +1550,10 @@ "IamRoles":{ "shape":"ClusterIamRoleList", "documentation":"

A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services.

" + }, + "PendingActions":{ + "shape":"PendingActionsList", + "documentation":"

Cluster operations that are waiting to be started.

" } }, "documentation":"

Describes a cluster.

", @@ -1484,6 +1589,48 @@ }, "documentation":"

Temporary credentials with authorization to log on to an Amazon Redshift database.

" }, + "ClusterDbRevision":{ + "type":"structure", + "members":{ + "ClusterIdentifier":{ + "shape":"String", + "documentation":"

The unique identifier of the cluster.

" + }, + "CurrentDatabaseRevision":{ + "shape":"String", + "documentation":"

A string representing the current cluster version.

" + }, + "DatabaseRevisionReleaseDate":{ + "shape":"TStamp", + "documentation":"

The date on which the database revision was released.

" + }, + "RevisionTargets":{ + "shape":"RevisionTargetsList", + "documentation":"

A list of RevisionTarget objects, where each object describes the database revision that a cluster can be updated to.

" + } + }, + "documentation":"

Describes a ClusterDbRevision.

" + }, + "ClusterDbRevisionsList":{ + "type":"list", + "member":{ + "shape":"ClusterDbRevision", + "locationName":"ClusterDbRevision" + } + }, + "ClusterDbRevisionsMessage":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "documentation":"

A string representing the starting point for the next set of revisions. If a value is returned in a response, you can retrieve the next set of revisions by providing the value in the marker parameter and retrying the command. If the marker field is empty, all revisions have already been returned.

" + }, + "ClusterDbRevisions":{ + "shape":"ClusterDbRevisionsList", + "documentation":"

A list of revisions.

" + } + } + }, "ClusterIamRole":{ "type":"structure", "members":{ @@ -1546,6 +1693,18 @@ }, "exception":true }, + "ClusterOnLatestRevisionFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

Cluster is already on the latest database revision.

", + "error":{ + "code":"ClusterOnLatestRevision", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "ClusterParameterGroup":{ "type":"structure", "members":{ @@ -2664,6 +2823,23 @@ }, "exception":true }, + "DescribeClusterDbRevisionsMessage":{ + "type":"structure", + "members":{ + "ClusterIdentifier":{ + "shape":"String", + "documentation":"

A unique identifier for a cluster whose ClusterDbRevisions you are requesting. This parameter is case sensitive. All clusters defined for an account are returned by default.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in the marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the marker parameter and retrying the request.

Default: 100

Constraints: minimum 20, maximum 100.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional parameter that specifies the starting point for returning a set of response records. When the results of a DescribeClusterDbRevisions request exceed the value specified in MaxRecords, Amazon Redshift returns a value in the marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the marker parameter and retrying the request.

Constraints: You can specify either the ClusterIdentifier parameter, or the marker parameter, but not both.

" + } + } + }, "DescribeClusterParameterGroupsMessage":{ "type":"structure", "members":{ @@ -3551,6 +3727,38 @@ }, "documentation":"

The request parameters to get cluster credentials.

" }, + "GetReservedNodeExchangeOfferingsInputMessage":{ + "type":"structure", + "required":["ReservedNodeId"], + "members":{ + "ReservedNodeId":{ + "shape":"String", + "documentation":"

A string representing the node identifier for the Reserved Node to be exchanged.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

An integer setting the maximum number of ReservedNodeOfferings to retrieve.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

A value that indicates the starting point for the next set of ReservedNodeOfferings.

" + } + }, + "documentation":"

" + }, + "GetReservedNodeExchangeOfferingsOutputMessage":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "documentation":"

An optional parameter that specifies the starting point for returning a set of response records. When the results of a GetReservedNodeExchangeOfferings request exceed the value specified in MaxRecords, Amazon Redshift returns a value in the marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the marker parameter and retrying the request.

" + }, + "ReservedNodeOfferings":{ + "shape":"ReservedNodeOfferingList", + "documentation":"

Returns an array of ReservedNodeOffering objects.

" + } + } + }, "HsmClientCertificate":{ "type":"structure", "members":{ @@ -3931,6 +4139,18 @@ }, "exception":true }, + "InvalidReservedNodeStateFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

Indicates that the Reserved Node being exchanged is not in an active state.

", + "error":{ + "code":"InvalidReservedNodeState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "InvalidRestoreFault":{ "type":"structure", "members":{ @@ -4083,6 +4303,29 @@ }, "Long":{"type":"long"}, "LongOptional":{"type":"long"}, + "ModifyClusterDbRevisionMessage":{ + "type":"structure", + "required":[ + "ClusterIdentifier", + "RevisionTarget" + ], + "members":{ + "ClusterIdentifier":{ + "shape":"String", + "documentation":"

The unique identifier of a cluster whose database revision you want to modify.

Example: examplecluster

" + }, + "RevisionTarget":{ + "shape":"String", + "documentation":"

The identifier of the database revision. You can retrieve this value from the response to the DescribeClusterDbRevisions request.

" + } + } + }, + "ModifyClusterDbRevisionResult":{ + "type":"structure", + "members":{ + "Cluster":{"shape":"Cluster"} + } + }, "ModifyClusterIamRolesMessage":{ "type":"structure", "required":["ClusterIdentifier"], @@ -4134,7 +4377,7 @@ }, "VpcSecurityGroupIds":{ "shape":"VpcSecurityGroupIdList", - "documentation":"

A list of virtual private cloud (VPC) security groups to be associated with the cluster.

" + "documentation":"

A list of virtual private cloud (VPC) security groups to be associated with the cluster. This change is asynchronously applied as soon as possible.

" }, "MasterUserPassword":{ "shape":"String", @@ -4435,6 +4678,10 @@ "locationName":"Parameter" } }, + "PendingActionsList":{ + "type":"list", + "member":{"shape":"String"} + }, "PendingModifiedValues":{ "type":"structure", "members":{ @@ -4578,7 +4825,7 @@ }, "State":{ "shape":"String", - "documentation":"

The state of the reserved compute node.

Possible Values:

  • pending-payment-This reserved node has recently been purchased, and the sale has been approved, but payment has not yet been confirmed.

  • active-This reserved node is owned by the caller and is available for use.

  • payment-failed-Payment failed for the purchase attempt.

" + "documentation":"

The state of the reserved compute node.

Possible Values:

  • pending-payment-This reserved node has recently been purchased, and the sale has been approved, but payment has not yet been confirmed.

  • active-This reserved node is owned by the caller and is available for use.

  • payment-failed-Payment failed for the purchase attempt.

  • retired-The reserved node is no longer available.

  • exchanging-The owner is exchanging the reserved node for another reserved node.

" }, "OfferingType":{ "shape":"String", @@ -4605,6 +4852,18 @@ }, "exception":true }, + "ReservedNodeAlreadyMigratedFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

Indicates that the reserved node has already been exchanged.

", + "error":{ + "code":"ReservedNodeAlreadyMigrated", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "ReservedNodeList":{ "type":"list", "member":{ @@ -5019,6 +5278,31 @@ "TableRestoreStatus":{"shape":"TableRestoreStatus"} } }, + "RevisionTarget":{ + "type":"structure", + "members":{ + "DatabaseRevision":{ + "shape":"String", + "documentation":"

A unique string that identifies the version to update the cluster to. You can use this value in ModifyClusterDbRevision.

" + }, + "Description":{ + "shape":"String", + "documentation":"

A string that describes the changes and features that will be applied to the cluster when it is updated to the corresponding ClusterDbRevision.

" + }, + "DatabaseRevisionReleaseDate":{ + "shape":"TStamp", + "documentation":"

The date on which the database revision was released.

" + } + }, + "documentation":"

Describes a RevisionTarget.

" + }, + "RevisionTargetsList":{ + "type":"list", + "member":{ + "shape":"RevisionTarget", + "locationName":"RevisionTarget" + } + }, "RevokeClusterSecurityGroupIngressMessage":{ "type":"structure", "required":["ClusterSecurityGroupName"], @@ -5535,6 +5819,18 @@ } }, "TStamp":{"type":"timestamp"}, + "TableLimitExceededFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The number of tables in the cluster exceeds the limit for the requested new cluster node type.

", + "error":{ + "code":"TableLimitExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "TableRestoreNotFoundFault":{ "type":"structure", "members":{ @@ -5666,7 +5962,7 @@ "type":"structure", "members":{ }, - "documentation":"

The request exceeds the limit of 10 tags for the resource.

", + "documentation":"

The number of tables in your source cluster exceeds the limit for the target cluster. Resize to a larger cluster node type.

", "error":{ "code":"TagLimitExceededFault", "httpStatusCode":400, diff --git a/botocore/data/rekognition/2016-06-27/service-2.json b/botocore/data/rekognition/2016-06-27/service-2.json index 2d7f2e47..b8cf7c67 100644 --- a/botocore/data/rekognition/2016-06-27/service-2.json +++ b/botocore/data/rekognition/2016-06-27/service-2.json @@ -30,7 +30,7 @@ {"shape":"ProvisionedThroughputExceededException"}, {"shape":"InvalidImageFormatException"} ], - "documentation":"

Compares a face in the source input image with each of the 100 largest faces detected in the target input image.

If the source image contains multiple faces, the service detects the largest face and compares it with each face detected in the target image.

You pass the input and target images either as base64-encoded image bytes or as a references to images in an Amazon S3 bucket. If you use the Amazon CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

In response, the operation returns an array of face matches ordered by similarity score in descending order. For each face match, the response provides a bounding box of the face, facial landmarks, pose details (pitch, role, and yaw), quality (brightness and sharpness), and confidence value (indicating the level of confidence that the bounding box contains a face). The response also provides a similarity score, which indicates how closely the faces match.

By default, only faces with a similarity score of greater than or equal to 80% are returned in the response. You can change this value by specifying the SimilarityThreshold parameter.

CompareFaces also returns an array of faces that don't match the source image. For each face, it returns a bounding box, confidence value, landmarks, pose details, and quality. The response also returns information about the face in the source image, including the bounding box of the face and confidence value.

If the image doesn't contain Exif metadata, CompareFaces returns orientation information for the source and target images. Use these values to display the images with the correct image orientation.

If no faces are detected in the source or target images, CompareFaces returns an InvalidParameterException error.

This is a stateless API operation. That is, data returned by this operation doesn't persist.

For an example, see faces-compare-images.

This operation requires permissions to perform the rekognition:CompareFaces action.

" + "documentation":"

Compares a face in the source input image with each of the 100 largest faces detected in the target input image.

If the source image contains multiple faces, the service detects the largest face and compares it with each face detected in the target image.

You pass the input and target images either as base64-encoded image bytes or as a references to images in an Amazon S3 bucket. If you use the Amazon CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

In response, the operation returns an array of face matches ordered by similarity score in descending order. For each face match, the response provides a bounding box of the face, facial landmarks, pose details (pitch, role, and yaw), quality (brightness and sharpness), and confidence value (indicating the level of confidence that the bounding box contains a face). The response also provides a similarity score, which indicates how closely the faces match.

By default, only faces with a similarity score of greater than or equal to 80% are returned in the response. You can change this value by specifying the SimilarityThreshold parameter.

CompareFaces also returns an array of faces that don't match the source image. For each face, it returns a bounding box, confidence value, landmarks, pose details, and quality. The response also returns information about the face in the source image, including the bounding box of the face and confidence value.

If the image doesn't contain Exif metadata, CompareFaces returns orientation information for the source and target images. Use these values to display the images with the correct image orientation.

If no faces are detected in the source or target images, CompareFaces returns an InvalidParameterException error.

This is a stateless API operation. That is, data returned by this operation doesn't persist.

For an example, see Comparing Faces in Images in the Amazon Rekognition Developer Guide.

This operation requires permissions to perform the rekognition:CompareFaces action.

" }, "CreateCollection":{ "name":"CreateCollection", @@ -67,7 +67,7 @@ {"shape":"ResourceInUseException"}, {"shape":"ProvisionedThroughputExceededException"} ], - "documentation":"

Creates an Amazon Rekognition stream processor that you can use to detect and recognize faces in a streaming video.

Rekognition Video is a consumer of live video from Amazon Kinesis Video Streams. Rekognition Video sends analysis results to Amazon Kinesis Data Streams.

You provide as input a Kinesis video stream (Input) and a Kinesis data stream (Output) stream. You also specify the face recognition criteria in Settings. For example, the collection containing faces that you want to recognize. Use Name to assign an identifier for the stream processor. You use Name to manage the stream processor. For example, you can start processing the source video by calling with the Name field.

After you have finished analyzing a streaming video, use to stop processing. You can delete the stream processor by calling .

" + "documentation":"

Creates an Amazon Rekognition stream processor that you can use to detect and recognize faces in a streaming video.

Amazon Rekognition Video is a consumer of live video from Amazon Kinesis Video Streams. Amazon Rekognition Video sends analysis results to Amazon Kinesis Data Streams.

You provide as input a Kinesis video stream (Input) and a Kinesis data stream (Output) stream. You also specify the face recognition criteria in Settings. For example, the collection containing faces that you want to recognize. Use Name to assign an identifier for the stream processor. You use Name to manage the stream processor. For example, you can start processing the source video by calling with the Name field.

After you have finished analyzing a streaming video, use to stop processing. You can delete the stream processor by calling .

" }, "DeleteCollection":{ "name":"DeleteCollection", @@ -160,7 +160,7 @@ {"shape":"ProvisionedThroughputExceededException"}, {"shape":"InvalidImageFormatException"} ], - "documentation":"

Detects faces within an image that is provided as input.

DetectFaces detects the 100 largest faces in the image. For each face detected, the operation returns face details including a bounding box of the face, a confidence value (that the bounding box contains a face), and a fixed set of attributes such as facial landmarks (for example, coordinates of eye and mouth), gender, presence of beard, sunglasses, etc.

The face-detection algorithm is most effective on frontal faces. For non-frontal or obscured faces, the algorithm may not detect the faces or might detect faces with lower confidence.

You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the Amazon CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

This is a stateless API operation. That is, the operation does not persist any data.

For an example, see procedure-detecting-faces-in-images.

This operation requires permissions to perform the rekognition:DetectFaces action.

" + "documentation":"

Detects faces within an image that is provided as input.

DetectFaces detects the 100 largest faces in the image. For each face detected, the operation returns face details including a bounding box of the face, a confidence value (that the bounding box contains a face), and a fixed set of attributes such as facial landmarks (for example, coordinates of eye and mouth), gender, presence of beard, sunglasses, etc.

The face-detection algorithm is most effective on frontal faces. For non-frontal or obscured faces, the algorithm may not detect the faces or might detect faces with lower confidence.

You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the Amazon CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

This is a stateless API operation. That is, the operation does not persist any data.

This operation requires permissions to perform the rekognition:DetectFaces action.

" }, "DetectLabels":{ "name":"DetectLabels", @@ -180,7 +180,7 @@ {"shape":"ProvisionedThroughputExceededException"}, {"shape":"InvalidImageFormatException"} ], - "documentation":"

Detects instances of real-world entities within an image (JPEG or PNG) provided as input. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; and concepts like landscape, evening, and nature. For an example, see images-s3.

DetectLabels does not support the detection of activities. However, activity detection is supported for label detection in videos. For more information, see .

You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the Amazon CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

For each object, scene, and concept the API returns one or more labels. Each label provides the object name, and the level of confidence that the image contains the object. For example, suppose the input image has a lighthouse, the sea, and a rock. The response will include all three labels, one for each object.

{Name: lighthouse, Confidence: 98.4629}

{Name: rock,Confidence: 79.2097}

{Name: sea,Confidence: 75.061}

In the preceding example, the operation returns one label for each of the three objects. The operation can also return multiple labels for the same object in the image. For example, if the input image shows a flower (for example, a tulip), the operation might return the following three labels.

{Name: flower,Confidence: 99.0562}

{Name: plant,Confidence: 99.0562}

{Name: tulip,Confidence: 99.0562}

In this example, the detection algorithm more precisely identifies the flower as a tulip.

In response, the API returns an array of labels. In addition, the response also includes the orientation correction. Optionally, you can specify MinConfidence to control the confidence threshold for the labels returned. The default is 50%. You can also add the MaxLabels parameter to limit the number of labels returned.

If the object detected is a person, the operation doesn't provide the same facial details that the DetectFaces operation provides.

This is a stateless API operation. That is, the operation does not persist any data.

This operation requires permissions to perform the rekognition:DetectLabels action.

" + "documentation":"

Detects instances of real-world entities within an image (JPEG or PNG) provided as input. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; and concepts like landscape, evening, and nature.

For an example, see Analyzing Images Stored in an Amazon S3 Bucket in the Amazon Rekognition Developer Guide.

DetectLabels does not support the detection of activities. However, activity detection is supported for label detection in videos. For more information, see StartLabelDetection in the Amazon Rekognition Developer Guide.

You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the Amazon CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

For each object, scene, and concept the API returns one or more labels. Each label provides the object name, and the level of confidence that the image contains the object. For example, suppose the input image has a lighthouse, the sea, and a rock. The response will include all three labels, one for each object.

{Name: lighthouse, Confidence: 98.4629}

{Name: rock,Confidence: 79.2097}

{Name: sea,Confidence: 75.061}

In the preceding example, the operation returns one label for each of the three objects. The operation can also return multiple labels for the same object in the image. For example, if the input image shows a flower (for example, a tulip), the operation might return the following three labels.

{Name: flower,Confidence: 99.0562}

{Name: plant,Confidence: 99.0562}

{Name: tulip,Confidence: 99.0562}

In this example, the detection algorithm more precisely identifies the flower as a tulip.

In response, the API returns an array of labels. In addition, the response also includes the orientation correction. Optionally, you can specify MinConfidence to control the confidence threshold for the labels returned. The default is 50%. You can also add the MaxLabels parameter to limit the number of labels returned.

If the object detected is a person, the operation doesn't provide the same facial details that the DetectFaces operation provides.

This is a stateless API operation. That is, the operation does not persist any data.

This operation requires permissions to perform the rekognition:DetectLabels action.

" }, "DetectModerationLabels":{ "name":"DetectModerationLabels", @@ -200,7 +200,7 @@ {"shape":"ProvisionedThroughputExceededException"}, {"shape":"InvalidImageFormatException"} ], - "documentation":"

Detects explicit or suggestive adult content in a specified JPEG or PNG format image. Use DetectModerationLabels to moderate images depending on your requirements. For example, you might want to filter images that contain nudity, but not images containing suggestive content.

To filter images, use the labels returned by DetectModerationLabels to determine which types of content are appropriate. For information about moderation labels, see moderation.

You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the Amazon CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

" + "documentation":"

Detects explicit or suggestive adult content in a specified JPEG or PNG format image. Use DetectModerationLabels to moderate images depending on your requirements. For example, you might want to filter images that contain nudity, but not images containing suggestive content.

To filter images, use the labels returned by DetectModerationLabels to determine which types of content are appropriate.

For information about moderation labels, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the Amazon CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

" }, "DetectText":{ "name":"DetectText", @@ -220,7 +220,7 @@ {"shape":"ProvisionedThroughputExceededException"}, {"shape":"InvalidImageFormatException"} ], - "documentation":"

Detects text in the input image and converts it into machine-readable text.

Pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, you must pass it as a reference to an image in an Amazon S3 bucket. For the AWS CLI, passing image bytes is not supported. The image must be either a .png or .jpeg formatted file.

The DetectText operation returns text in an array of elements, TextDetections. Each TextDetection element provides information about a single word or line of text that was detected in the image.

A word is one or more ISO basic latin script characters that are not separated by spaces. DetectText can detect up to 50 words in an image.

A line is a string of equally spaced words. A line isn't necessarily a complete sentence. For example, a driver's license number is detected as a line. A line ends when there is no aligned text after it. Also, a line ends when there is a large gap between words, relative to the length of the words. This means, depending on the gap between words, Amazon Rekognition may detect multiple lines in text aligned in the same direction. Periods don't represent the end of a line. If a sentence spans multiple lines, the DetectText operation returns multiple lines.

To determine whether a TextDetection element is a line of text or a word, use the TextDetection object Type field.

To be detected, text must be within +/- 30 degrees orientation of the horizontal axis.

For more information, see text-detection.

" + "documentation":"

Detects text in the input image and converts it into machine-readable text.

Pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, you must pass it as a reference to an image in an Amazon S3 bucket. For the AWS CLI, passing image bytes is not supported. The image must be either a .png or .jpeg formatted file.

The DetectText operation returns text in an array of elements, TextDetections. Each TextDetection element provides information about a single word or line of text that was detected in the image.

A word is one or more ISO basic latin script characters that are not separated by spaces. DetectText can detect up to 50 words in an image.

A line is a string of equally spaced words. A line isn't necessarily a complete sentence. For example, a driver's license number is detected as a line. A line ends when there is no aligned text after it. Also, a line ends when there is a large gap between words, relative to the length of the words. This means, depending on the gap between words, Amazon Rekognition may detect multiple lines in text aligned in the same direction. Periods don't represent the end of a line. If a sentence spans multiple lines, the DetectText operation returns multiple lines.

To determine whether a TextDetection element is a line of text or a word, use the TextDetection object Type field.

To be detected, text must be within +/- 30 degrees orientation of the horizontal axis.

For more information, see DetectText in the Amazon Rekognition Developer Guide.

" }, "GetCelebrityInfo":{ "name":"GetCelebrityInfo", @@ -238,7 +238,7 @@ {"shape":"ProvisionedThroughputExceededException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Gets the name and additional information about a celebrity based on his or her Rekognition ID. The additional information is returned as an array of URLs. If there is no additional information about the celebrity, this list is empty. For more information, see get-celebrity-info-procedure.

This operation requires permissions to perform the rekognition:GetCelebrityInfo action.

" + "documentation":"

Gets the name and additional information about a celebrity based on his or her Rekognition ID. The additional information is returned as an array of URLs. If there is no additional information about the celebrity, this list is empty.

For more information, see Recognizing Celebrities in an Image in the Amazon Rekognition Developer Guide.

This operation requires permissions to perform the rekognition:GetCelebrityInfo action.

" }, "GetCelebrityRecognition":{ "name":"GetCelebrityRecognition", @@ -257,7 +257,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Gets the celebrity recognition results for a Rekognition Video analysis started by .

Celebrity recognition in a video is an asynchronous operation. Analysis is started by a call to which returns a job identifier (JobId). When the celebrity recognition operation finishes, Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartCelebrityRecognition. To get the results of the celebrity recognition analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetCelebrityDetection and pass the job identifier (JobId) from the initial call to StartCelebrityDetection. For more information, see video.

GetCelebrityRecognition returns detected celebrities and the time(s) they are detected in an array (Celebrities) of objects. Each CelebrityRecognition contains information about the celebrity in a object and the time, Timestamp, the celebrity was detected.

GetCelebrityRecognition only returns the default facial attributes (BoundingBox, Confidence, Landmarks, Pose, and Quality). The other facial attributes listed in the Face object of the following response syntax are not returned. For more information, see .

By default, the Celebrities array is sorted by time (milliseconds from the start of the video). You can also sort the array by celebrity by specifying the value ID in the SortBy input parameter.

The CelebrityDetail object includes the celebrity identifer and additional information urls. If you don't store the additional information urls, you can get them later by calling with the celebrity identifer.

No information is returned for faces not recognized as celebrities.

Use MaxResults parameter to limit the number of labels returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetCelebrityDetection and populate the NextToken request parameter with the token value returned from the previous call to GetCelebrityRecognition.

" + "documentation":"

Gets the celebrity recognition results for a Amazon Rekognition Video analysis started by .

Celebrity recognition in a video is an asynchronous operation. Analysis is started by a call to which returns a job identifier (JobId). When the celebrity recognition operation finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartCelebrityRecognition. To get the results of the celebrity recognition analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetCelebrityDetection and pass the job identifier (JobId) from the initial call to StartCelebrityDetection.

For more information, see Working With Stored Videos in the Amazon Rekognition Developer Guide.

GetCelebrityRecognition returns detected celebrities and the time(s) they are detected in an array (Celebrities) of objects. Each CelebrityRecognition contains information about the celebrity in a object and the time, Timestamp, the celebrity was detected.

GetCelebrityRecognition only returns the default facial attributes (BoundingBox, Confidence, Landmarks, Pose, and Quality). The other facial attributes listed in the Face object of the following response syntax are not returned. For more information, see FaceDetail in the Amazon Rekognition Developer Guide.

By default, the Celebrities array is sorted by time (milliseconds from the start of the video). You can also sort the array by celebrity by specifying the value ID in the SortBy input parameter.

The CelebrityDetail object includes the celebrity identifer and additional information urls. If you don't store the additional information urls, you can get them later by calling with the celebrity identifer.

No information is returned for faces not recognized as celebrities.

Use MaxResults parameter to limit the number of labels returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetCelebrityDetection and populate the NextToken request parameter with the token value returned from the previous call to GetCelebrityRecognition.

" }, "GetContentModeration":{ "name":"GetContentModeration", @@ -276,7 +276,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Gets the content moderation analysis results for a Rekognition Video analysis started by .

Content moderation analysis of a video is an asynchronous operation. You start analysis by calling . which returns a job identifier (JobId). When analysis finishes, Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartContentModeration. To get the results of the content moderation analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetCelebrityDetection and pass the job identifier (JobId) from the initial call to StartCelebrityDetection. For more information, see video.

GetContentModeration returns detected content moderation labels, and the time they are detected, in an array, ModerationLabels, of objects.

By default, the moderated labels are returned sorted by time, in milliseconds from the start of the video. You can also sort them by moderated label by specifying NAME for the SortBy input parameter.

Since video analysis can return a large number of results, use the MaxResults parameter to limit the number of labels returned in a single call to GetContentModeration. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetContentModeration and populate the NextToken request parameter with the value of NextToken returned from the previous call to GetContentModeration.

For more information, see moderation.

" + "documentation":"

Gets the content moderation analysis results for a Amazon Rekognition Video analysis started by .

Content moderation analysis of a video is an asynchronous operation. You start analysis by calling . which returns a job identifier (JobId). When analysis finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartContentModeration. To get the results of the content moderation analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetCelebrityDetection and pass the job identifier (JobId) from the initial call to StartCelebrityDetection.

For more information, see Working with Stored Videos in the Amazon Rekognition Devlopers Guide.

GetContentModeration returns detected content moderation labels, and the time they are detected, in an array, ModerationLabels, of objects.

By default, the moderated labels are returned sorted by time, in milliseconds from the start of the video. You can also sort them by moderated label by specifying NAME for the SortBy input parameter.

Since video analysis can return a large number of results, use the MaxResults parameter to limit the number of labels returned in a single call to GetContentModeration. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetContentModeration and populate the NextToken request parameter with the value of NextToken returned from the previous call to GetContentModeration.

For more information, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

" }, "GetFaceDetection":{ "name":"GetFaceDetection", @@ -295,7 +295,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Gets face detection results for a Rekognition Video analysis started by .

Face detection with Rekognition Video is an asynchronous operation. You start face detection by calling which returns a job identifier (JobId). When the face detection operation finishes, Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartFaceDetection. To get the results of the face detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call and pass the job identifier (JobId) from the initial call to StartFaceDetection.

GetFaceDetection returns an array of detected faces (Faces) sorted by the time the faces were detected.

Use MaxResults parameter to limit the number of labels returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetFaceDetection and populate the NextToken request parameter with the token value returned from the previous call to GetFaceDetection.

" + "documentation":"

Gets face detection results for a Amazon Rekognition Video analysis started by .

Face detection with Amazon Rekognition Video is an asynchronous operation. You start face detection by calling which returns a job identifier (JobId). When the face detection operation finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartFaceDetection. To get the results of the face detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call and pass the job identifier (JobId) from the initial call to StartFaceDetection.

GetFaceDetection returns an array of detected faces (Faces) sorted by the time the faces were detected.

Use MaxResults parameter to limit the number of labels returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetFaceDetection and populate the NextToken request parameter with the token value returned from the previous call to GetFaceDetection.

" }, "GetFaceSearch":{ "name":"GetFaceSearch", @@ -314,7 +314,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Gets the face search results for Rekognition Video face search started by . The search returns faces in a collection that match the faces of persons detected in a video. It also includes the time(s) that faces are matched in the video.

Face search in a video is an asynchronous operation. You start face search by calling to which returns a job identifier (JobId). When the search operation finishes, Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartFaceSearch. To get the search results, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetFaceSearch and pass the job identifier (JobId) from the initial call to StartFaceSearch. For more information, see collections.

The search results are retured in an array, Persons, of objects. EachPersonMatch element contains details about the matching faces in the input collection, person information (facial attributes, bounding boxes, and person identifer) for the matched person, and the time the person was matched in the video.

GetFaceSearch only returns the default facial attributes (BoundingBox, Confidence, Landmarks, Pose, and Quality). The other facial attributes listed in the Face object of the following response syntax are not returned. For more information, see .

By default, the Persons array is sorted by the time, in milliseconds from the start of the video, persons are matched. You can also sort by persons by specifying INDEX for the SORTBY input parameter.

" + "documentation":"

Gets the face search results for Amazon Rekognition Video face search started by . The search returns faces in a collection that match the faces of persons detected in a video. It also includes the time(s) that faces are matched in the video.

Face search in a video is an asynchronous operation. You start face search by calling to which returns a job identifier (JobId). When the search operation finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartFaceSearch. To get the search results, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetFaceSearch and pass the job identifier (JobId) from the initial call to StartFaceSearch.

For more information, see Searching Faces in a Collection in the Amazon Rekognition Developer Guide.

The search results are retured in an array, Persons, of objects. EachPersonMatch element contains details about the matching faces in the input collection, person information (facial attributes, bounding boxes, and person identifer) for the matched person, and the time the person was matched in the video.

GetFaceSearch only returns the default facial attributes (BoundingBox, Confidence, Landmarks, Pose, and Quality). The other facial attributes listed in the Face object of the following response syntax are not returned. For more information, see FaceDetail in the Amazon Rekognition Developer Guide.

By default, the Persons array is sorted by the time, in milliseconds from the start of the video, persons are matched. You can also sort by persons by specifying INDEX for the SORTBY input parameter.

" }, "GetLabelDetection":{ "name":"GetLabelDetection", @@ -333,7 +333,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Gets the label detection results of a Rekognition Video analysis started by .

The label detection operation is started by a call to which returns a job identifier (JobId). When the label detection operation finishes, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartlabelDetection. To get the results of the label detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call and pass the job identifier (JobId) from the initial call to StartLabelDetection.

GetLabelDetection returns an array of detected labels (Labels) sorted by the time the labels were detected. You can also sort by the label name by specifying NAME for the SortBy input parameter.

The labels returned include the label name, the percentage confidence in the accuracy of the detected label, and the time the label was detected in the video.

Use MaxResults parameter to limit the number of labels returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetlabelDetection and populate the NextToken request parameter with the token value returned from the previous call to GetLabelDetection.

" + "documentation":"

Gets the label detection results of a Amazon Rekognition Video analysis started by .

The label detection operation is started by a call to which returns a job identifier (JobId). When the label detection operation finishes, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartlabelDetection. To get the results of the label detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call and pass the job identifier (JobId) from the initial call to StartLabelDetection.

GetLabelDetection returns an array of detected labels (Labels) sorted by the time the labels were detected. You can also sort by the label name by specifying NAME for the SortBy input parameter.

The labels returned include the label name, the percentage confidence in the accuracy of the detected label, and the time the label was detected in the video.

Use MaxResults parameter to limit the number of labels returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetlabelDetection and populate the NextToken request parameter with the token value returned from the previous call to GetLabelDetection.

" }, "GetPersonTracking":{ "name":"GetPersonTracking", @@ -352,7 +352,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Gets the person tracking results of a Rekognition Video analysis started by .

The person detection operation is started by a call to StartPersonTracking which returns a job identifier (JobId). When the person detection operation finishes, Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartPersonTracking.

To get the results of the person tracking operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call and pass the job identifier (JobId) from the initial call to StartPersonTracking.

GetPersonTracking returns an array, Persons, of tracked persons and the time(s) they were tracked in the video.

GetPersonTracking only returns the default facial attributes (BoundingBox, Confidence, Landmarks, Pose, and Quality). The other facial attributes listed in the Face object of the following response syntax are not returned. For more information, see .

By default, the array is sorted by the time(s) a person is tracked in the video. You can sort by tracked persons by specifying INDEX for the SortBy input parameter.

Use the MaxResults parameter to limit the number of items returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetPersonTracking and populate the NextToken request parameter with the token value returned from the previous call to GetPersonTracking.

" + "documentation":"

Gets the person tracking results of a Amazon Rekognition Video analysis started by .

The person detection operation is started by a call to StartPersonTracking which returns a job identifier (JobId). When the person detection operation finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartPersonTracking.

To get the results of the person tracking operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call and pass the job identifier (JobId) from the initial call to StartPersonTracking.

GetPersonTracking returns an array, Persons, of tracked persons and the time(s) they were tracked in the video.

GetPersonTracking only returns the default facial attributes (BoundingBox, Confidence, Landmarks, Pose, and Quality). The other facial attributes listed in the Face object of the following response syntax are not returned.

For more information, see FaceDetail in the Amazon Rekognition Developer Guide.

By default, the array is sorted by the time(s) a person is tracked in the video. You can sort by tracked persons by specifying INDEX for the SortBy input parameter.

Use the MaxResults parameter to limit the number of items returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetPersonTracking and populate the NextToken request parameter with the token value returned from the previous call to GetPersonTracking.

" }, "IndexFaces":{ "name":"IndexFaces", @@ -373,7 +373,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidImageFormatException"} ], - "documentation":"

Detects faces in the input image and adds them to the specified collection.

Amazon Rekognition does not save the actual faces detected. Instead, the underlying detection algorithm first detects the faces in the input image, and for each face extracts facial features into a feature vector, and stores it in the back-end database. Amazon Rekognition uses feature vectors when performing face match and search operations using the and operations.

If you are using version 1.0 of the face detection model, IndexFaces indexes the 15 largest faces in the input image. Later versions of the face detection model index the 100 largest faces in the input image. To determine which version of the model you are using, check the the value of FaceModelVersion in the response from IndexFaces. For more information, see face-detection-model.

If you provide the optional ExternalImageID for the input image you provided, Amazon Rekognition associates this ID with all faces that it detects. When you call the operation, the response returns the external ID. You can use this external image ID to create a client-side index to associate the faces with each image. You can then use the index to find all faces in an image.

In response, the operation returns an array of metadata for all detected faces. This includes, the bounding box of the detected face, confidence value (indicating the bounding box contains a face), a face ID assigned by the service for each face that is detected and stored, and an image ID assigned by the service for the input image. If you request all facial attributes (using the detectionAttributes parameter, Amazon Rekognition returns detailed facial attributes such as facial landmarks (for example, location of eye and mount) and other facial attributes such gender. If you provide the same image, specify the same collection, and use the same external ID in the IndexFaces operation, Amazon Rekognition doesn't save duplicate face metadata.

The input image is passed either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the Amazon CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

This operation requires permissions to perform the rekognition:IndexFaces action.

" + "documentation":"

Detects faces in the input image and adds them to the specified collection.

Amazon Rekognition does not save the actual faces detected. Instead, the underlying detection algorithm first detects the faces in the input image, and for each face extracts facial features into a feature vector, and stores it in the back-end database. Amazon Rekognition uses feature vectors when performing face match and search operations using the and operations.

If you are using version 1.0 of the face detection model, IndexFaces indexes the 15 largest faces in the input image. Later versions of the face detection model index the 100 largest faces in the input image. To determine which version of the model you are using, check the the value of FaceModelVersion in the response from IndexFaces.

For more information, see Model Versioning in the Amazon Rekognition Developer Guide.

If you provide the optional ExternalImageID for the input image you provided, Amazon Rekognition associates this ID with all faces that it detects. When you call the operation, the response returns the external ID. You can use this external image ID to create a client-side index to associate the faces with each image. You can then use the index to find all faces in an image.

In response, the operation returns an array of metadata for all detected faces. This includes, the bounding box of the detected face, confidence value (indicating the bounding box contains a face), a face ID assigned by the service for each face that is detected and stored, and an image ID assigned by the service for the input image. If you request all facial attributes (using the detectionAttributes parameter, Amazon Rekognition returns detailed facial attributes such as facial landmarks (for example, location of eye and mount) and other facial attributes such gender. If you provide the same image, specify the same collection, and use the same external ID in the IndexFaces operation, Amazon Rekognition doesn't save duplicate face metadata.

For more information, see Adding Faces to a Collection in the Amazon Rekognition Developer Guide.

The input image is passed either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the Amazon CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

This operation requires permissions to perform the rekognition:IndexFaces action.

" }, "ListCollections":{ "name":"ListCollections", @@ -392,7 +392,7 @@ {"shape":"InvalidPaginationTokenException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Returns list of collection IDs in your account. If the result is truncated, the response also provides a NextToken that you can use in the subsequent request to fetch the next set of collection IDs.

For an example, see list-collection-procedure.

This operation requires permissions to perform the rekognition:ListCollections action.

" + "documentation":"

Returns list of collection IDs in your account. If the result is truncated, the response also provides a NextToken that you can use in the subsequent request to fetch the next set of collection IDs.

For an example, see Listing Collections in the Amazon Rekognition Developer Guide.

This operation requires permissions to perform the rekognition:ListCollections action.

" }, "ListFaces":{ "name":"ListFaces", @@ -411,7 +411,7 @@ {"shape":"InvalidPaginationTokenException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Returns metadata for faces in the specified collection. This metadata includes information such as the bounding box coordinates, the confidence (that the bounding box contains a face), and face ID. For an example, see list-faces-in-collection-procedure.

This operation requires permissions to perform the rekognition:ListFaces action.

" + "documentation":"

Returns metadata for faces in the specified collection. This metadata includes information such as the bounding box coordinates, the confidence (that the bounding box contains a face), and face ID. For an example, see Listing Faces in a Collection in the Amazon Rekognition Developer Guide.

This operation requires permissions to perform the rekognition:ListFaces action.

" }, "ListStreamProcessors":{ "name":"ListStreamProcessors", @@ -450,7 +450,7 @@ {"shape":"ProvisionedThroughputExceededException"}, {"shape":"InvalidImageFormatException"} ], - "documentation":"

Returns an array of celebrities recognized in the input image. For more information, see celebrities.

RecognizeCelebrities returns the 100 largest faces in the image. It lists recognized celebrities in the CelebrityFaces array and unrecognized faces in the UnrecognizedFaces array. RecognizeCelebrities doesn't return celebrities whose faces are not amongst the largest 100 faces in the image.

For each celebrity recognized, the RecognizeCelebrities returns a Celebrity object. The Celebrity object contains the celebrity name, ID, URL links to additional information, match confidence, and a ComparedFace object that you can use to locate the celebrity's face on the image.

Rekognition does not retain information about which images a celebrity has been recognized in. Your application must store this information and use the Celebrity ID property as a unique identifier for the celebrity. If you don't store the celebrity name or additional information URLs returned by RecognizeCelebrities, you will need the ID to identify the celebrity in a call to the operation.

You pass the imput image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the Amazon CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

For an example, see celebrities-procedure-image.

This operation requires permissions to perform the rekognition:RecognizeCelebrities operation.

" + "documentation":"

Returns an array of celebrities recognized in the input image. For more information, see Recognizing Celebrities in the Amazon Rekognition Developer Guide.

RecognizeCelebrities returns the 100 largest faces in the image. It lists recognized celebrities in the CelebrityFaces array and unrecognized faces in the UnrecognizedFaces array. RecognizeCelebrities doesn't return celebrities whose faces are not amongst the largest 100 faces in the image.

For each celebrity recognized, the RecognizeCelebrities returns a Celebrity object. The Celebrity object contains the celebrity name, ID, URL links to additional information, match confidence, and a ComparedFace object that you can use to locate the celebrity's face on the image.

Rekognition does not retain information about which images a celebrity has been recognized in. Your application must store this information and use the Celebrity ID property as a unique identifier for the celebrity. If you don't store the celebrity name or additional information URLs returned by RecognizeCelebrities, you will need the ID to identify the celebrity in a call to the operation.

You pass the imput image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the Amazon CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

For an example, see Recognizing Celebrities in an Image in the Amazon Rekognition Developer Guide.

This operation requires permissions to perform the rekognition:RecognizeCelebrities operation.

" }, "SearchFaces":{ "name":"SearchFaces", @@ -468,7 +468,7 @@ {"shape":"ProvisionedThroughputExceededException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

For a given input face ID, searches for matching faces in the collection the face belongs to. You get a face ID when you add a face to the collection using the IndexFaces operation. The operation compares the features of the input face with faces in the specified collection.

You can also search faces without indexing faces by using the SearchFacesByImage operation.

The operation response returns an array of faces that match, ordered by similarity score with the highest similarity first. More specifically, it is an array of metadata for each face match that is found. Along with the metadata, the response also includes a confidence value for each face match, indicating the confidence that the specific face matches the input face.

For an example, see search-face-with-id-procedure.

This operation requires permissions to perform the rekognition:SearchFaces action.

" + "documentation":"

For a given input face ID, searches for matching faces in the collection the face belongs to. You get a face ID when you add a face to the collection using the IndexFaces operation. The operation compares the features of the input face with faces in the specified collection.

You can also search faces without indexing faces by using the SearchFacesByImage operation.

The operation response returns an array of faces that match, ordered by similarity score with the highest similarity first. More specifically, it is an array of metadata for each face match that is found. Along with the metadata, the response also includes a confidence value for each face match, indicating the confidence that the specific face matches the input face.

For an example, see Searching for a Face Using Its Face ID in the Amazon Rekognition Developer Guide.

This operation requires permissions to perform the rekognition:SearchFaces action.

" }, "SearchFacesByImage":{ "name":"SearchFacesByImage", @@ -489,7 +489,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidImageFormatException"} ], - "documentation":"

For a given input image, first detects the largest face in the image, and then searches the specified collection for matching faces. The operation compares the features of the input face with faces in the specified collection.

To search for all faces in an input image, you might first call the operation, and then use the face IDs returned in subsequent calls to the operation.

You can also call the DetectFaces operation and use the bounding boxes in the response to make face crops, which then you can pass in to the SearchFacesByImage operation.

You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the Amazon CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

The response returns an array of faces that match, ordered by similarity score with the highest similarity first. More specifically, it is an array of metadata for each face match found. Along with the metadata, the response also includes a similarity indicating how similar the face is to the input face. In the response, the operation also returns the bounding box (and a confidence level that the bounding box contains a face) of the face that Amazon Rekognition used for the input image.

For an example, see search-face-with-image-procedure.

This operation requires permissions to perform the rekognition:SearchFacesByImage action.

" + "documentation":"

For a given input image, first detects the largest face in the image, and then searches the specified collection for matching faces. The operation compares the features of the input face with faces in the specified collection.

To search for all faces in an input image, you might first call the operation, and then use the face IDs returned in subsequent calls to the operation.

You can also call the DetectFaces operation and use the bounding boxes in the response to make face crops, which then you can pass in to the SearchFacesByImage operation.

You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the Amazon CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

The response returns an array of faces that match, ordered by similarity score with the highest similarity first. More specifically, it is an array of metadata for each face match found. Along with the metadata, the response also includes a similarity indicating how similar the face is to the input face. In the response, the operation also returns the bounding box (and a confidence level that the bounding box contains a face) of the face that Amazon Rekognition used for the input image.

For an example, Searching for a Face Using an Image in the Amazon Rekognition Developer Guide.

This operation requires permissions to perform the rekognition:SearchFacesByImage action.

" }, "StartCelebrityRecognition":{ "name":"StartCelebrityRecognition", @@ -510,7 +510,7 @@ {"shape":"LimitExceededException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Starts asynchronous recognition of celebrities in a stored video.

Rekognition Video can detect celebrities in a video must be stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartCelebrityRecognition returns a job identifier (JobId) which you use to get the results of the analysis. When celebrity recognition analysis is finished, Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the results of the celebrity recognition analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call and pass the job identifier (JobId) from the initial call to StartCelebrityRecognition. For more information, see celebrities.

", + "documentation":"

Starts asynchronous recognition of celebrities in a stored video.

Amazon Rekognition Video can detect celebrities in a video must be stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartCelebrityRecognition returns a job identifier (JobId) which you use to get the results of the analysis. When celebrity recognition analysis is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the results of the celebrity recognition analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call and pass the job identifier (JobId) from the initial call to StartCelebrityRecognition.

For more information, see Recognizing Celebrities in the Amazon Rekognition Developer Guide.

", "idempotent":true }, "StartContentModeration":{ @@ -532,7 +532,7 @@ {"shape":"LimitExceededException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Starts asynchronous detection of explicit or suggestive adult content in a stored video.

Rekognition Video can moderate content in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartContentModeration returns a job identifier (JobId) which you use to get the results of the analysis. When content moderation analysis is finished, Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel.

To get the results of the content moderation analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call and pass the job identifier (JobId) from the initial call to StartContentModeration. For more information, see moderation.

", + "documentation":"

Starts asynchronous detection of explicit or suggestive adult content in a stored video.

Amazon Rekognition Video can moderate content in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartContentModeration returns a job identifier (JobId) which you use to get the results of the analysis. When content moderation analysis is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel.

To get the results of the content moderation analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call and pass the job identifier (JobId) from the initial call to StartContentModeration.

For more information, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

", "idempotent":true }, "StartFaceDetection":{ @@ -554,7 +554,7 @@ {"shape":"LimitExceededException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Starts asynchronous detection of faces in a stored video.

Rekognition Video can detect faces in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartFaceDetection returns a job identifier (JobId) that you use to get the results of the operation. When face detection is finished, Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the results of the label detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call and pass the job identifier (JobId) from the initial call to StartFaceDetection. For more information, see faces-video.

", + "documentation":"

Starts asynchronous detection of faces in a stored video.

Amazon Rekognition Video can detect faces in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartFaceDetection returns a job identifier (JobId) that you use to get the results of the operation. When face detection is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the results of the label detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call and pass the job identifier (JobId) from the initial call to StartFaceDetection.

For more information, see Detecting Faces in a Stored Video in the Amazon Rekognition Developer Guide.

", "idempotent":true }, "StartFaceSearch":{ @@ -577,7 +577,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Starts the asynchronous search for faces in a collection that match the faces of persons detected in a stored video.

The video must be stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartFaceSearch returns a job identifier (JobId) which you use to get the search results once the search has completed. When searching is finished, Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the search results, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call and pass the job identifier (JobId) from the initial call to StartFaceSearch. For more information, see collections-search-person.

", + "documentation":"

Starts the asynchronous search for faces in a collection that match the faces of persons detected in a stored video.

The video must be stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartFaceSearch returns a job identifier (JobId) which you use to get the search results once the search has completed. When searching is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the search results, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call and pass the job identifier (JobId) from the initial call to StartFaceSearch. For more information, see collections-search-person.

", "idempotent":true }, "StartLabelDetection":{ @@ -599,7 +599,7 @@ {"shape":"LimitExceededException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Starts asynchronous detection of labels in a stored video.

Rekognition Video can detect labels in a video. Labels are instances of real-world entities. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; concepts like landscape, evening, and nature; and activities like a person getting out of a car or a person skiing.

The video must be stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartLabelDetection returns a job identifier (JobId) which you use to get the results of the operation. When label detection is finished, Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel.

To get the results of the label detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call and pass the job identifier (JobId) from the initial call to StartLabelDetection.

", + "documentation":"

Starts asynchronous detection of labels in a stored video.

Amazon Rekognition Video can detect labels in a video. Labels are instances of real-world entities. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; concepts like landscape, evening, and nature; and activities like a person getting out of a car or a person skiing.

The video must be stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartLabelDetection returns a job identifier (JobId) which you use to get the results of the operation. When label detection is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel.

To get the results of the label detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call and pass the job identifier (JobId) from the initial call to StartLabelDetection.

", "idempotent":true }, "StartPersonTracking":{ @@ -621,7 +621,7 @@ {"shape":"LimitExceededException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Starts the asynchronous tracking of persons in a stored video.

Rekognition Video can track persons in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartPersonTracking returns a job identifier (JobId) which you use to get the results of the operation. When label detection is finished, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel.

To get the results of the person detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call and pass the job identifier (JobId) from the initial call to StartPersonTracking.

", + "documentation":"

Starts the asynchronous tracking of persons in a stored video.

Amazon Rekognition Video can track persons in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartPersonTracking returns a job identifier (JobId) which you use to get the results of the operation. When label detection is finished, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel.

To get the results of the person detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call and pass the job identifier (JobId) from the initial call to StartPersonTracking.

", "idempotent":true }, "StartStreamProcessor":{ @@ -805,7 +805,7 @@ "documentation":"

Information about a recognized celebrity.

" } }, - "documentation":"

Information about a detected celebrity and the time the celebrity was detected in a stored video. For more information, see .

" + "documentation":"

Information about a detected celebrity and the time the celebrity was detected in a stored video. For more information, see GetCelebrityRecognition in the Amazon Rekognition Developer Guide.

" }, "CelebrityRecognitionSortBy":{ "type":"string", @@ -1014,7 +1014,7 @@ }, "Output":{ "shape":"StreamProcessorOutput", - "documentation":"

Kinesis data stream stream to which Rekognition Video puts the analysis results. If you are using the AWS CLI, the parameter name is StreamProcessorOutput.

" + "documentation":"

Kinesis data stream stream to which Amazon Rekognition Video puts the analysis results. If you are using the AWS CLI, the parameter name is StreamProcessorOutput.

" }, "Name":{ "shape":"StreamProcessorName", @@ -1148,7 +1148,7 @@ }, "Output":{ "shape":"StreamProcessorOutput", - "documentation":"

Kinesis data stream to which Rekognition Video puts the analysis results.

" + "documentation":"

Kinesis data stream to which Amazon Rekognition Video puts the analysis results.

" }, "RoleArn":{ "shape":"RoleArn", @@ -1422,7 +1422,7 @@ "documentation":"

Confidence level that the bounding box contains a face (and not a different object such as a tree). Default attribute.

" } }, - "documentation":"

Structure containing attributes of the face that the algorithm detected.

A FaceDetail object contains either the default facial attributes or all facial attributes. The default attributes are BoundingBox, Confidence, Landmarks, Pose, and Quality.

is the only Rekognition Video stored video operation that can return a FaceDetail object with all attributes. To specify which attributes to return, use the FaceAttributes input parameter for . The following Rekognition Video operations return only the default attributes. The corresponding Start operations don't have a FaceAttributes input parameter.

  • GetCelebrityRecognition

  • GetPersonTracking

  • GetFaceSearch

The Rekognition Image and operations can return all facial attributes. To specify which attributes to return, use the Attributes input parameter for DetectFaces. For IndexFaces, use the DetectAttributes input parameter.

" + "documentation":"

Structure containing attributes of the face that the algorithm detected.

A FaceDetail object contains either the default facial attributes or all facial attributes. The default attributes are BoundingBox, Confidence, Landmarks, Pose, and Quality.

is the only Amazon Rekognition Video stored video operation that can return a FaceDetail object with all attributes. To specify which attributes to return, use the FaceAttributes input parameter for . The following Amazon Rekognition Video operations return only the default attributes. The corresponding Start operations don't have a FaceAttributes input parameter.

  • GetCelebrityRecognition

  • GetPersonTracking

  • GetFaceSearch

The Amazon Rekognition Image and operations can return all facial attributes. To specify which attributes to return, use the Attributes input parameter for DetectFaces. For IndexFaces, use the DetectAttributes input parameter.

" }, "FaceDetailList":{ "type":"list", @@ -1594,7 +1594,7 @@ }, "NextToken":{ "shape":"PaginationToken", - "documentation":"

If the previous response was incomplete (because there is more recognized celebrities to retrieve), Rekognition Video returns a pagination token in the response. You can use this pagination token to retrieve the next set of celebrities.

" + "documentation":"

If the previous response was incomplete (because there is more recognized celebrities to retrieve), Amazon Rekognition Video returns a pagination token in the response. You can use this pagination token to retrieve the next set of celebrities.

" }, "SortBy":{ "shape":"CelebrityRecognitionSortBy", @@ -1615,11 +1615,11 @@ }, "VideoMetadata":{ "shape":"VideoMetadata", - "documentation":"

Information about a video that Rekognition Video analyzed. Videometadata is returned in every page of paginated responses from a Rekognition Video operation.

" + "documentation":"

Information about a video that Amazon Rekognition Video analyzed. Videometadata is returned in every page of paginated responses from a Amazon Rekognition Video operation.

" }, "NextToken":{ "shape":"PaginationToken", - "documentation":"

If the response is truncated, Rekognition Video returns this token that you can use in the subsequent request to retrieve the next set of celebrities.

" + "documentation":"

If the response is truncated, Amazon Rekognition Video returns this token that you can use in the subsequent request to retrieve the next set of celebrities.

" }, "Celebrities":{ "shape":"CelebrityRecognitions", @@ -1670,7 +1670,7 @@ }, "NextToken":{ "shape":"PaginationToken", - "documentation":"

If the response is truncated, Rekognition Video returns this token that you can use in the subsequent request to retrieve the next set of moderation labels.

" + "documentation":"

If the response is truncated, Amazon Rekognition Video returns this token that you can use in the subsequent request to retrieve the next set of moderation labels.

" } } }, @@ -1688,7 +1688,7 @@ }, "NextToken":{ "shape":"PaginationToken", - "documentation":"

If the previous response was incomplete (because there are more faces to retrieve), Rekognition Video returns a pagination token in the response. You can use this pagination token to retrieve the next set of faces.

" + "documentation":"

If the previous response was incomplete (because there are more faces to retrieve), Amazon Rekognition Video returns a pagination token in the response. You can use this pagination token to retrieve the next set of faces.

" } } }, @@ -1705,7 +1705,7 @@ }, "VideoMetadata":{ "shape":"VideoMetadata", - "documentation":"

Information about a video that Rekognition Video analyzed. Videometadata is returned in every page of paginated responses from a Amazon Rekognition video operation.

" + "documentation":"

Information about a video that Amazon Rekognition Video analyzed. Videometadata is returned in every page of paginated responses from a Amazon Rekognition video operation.

" }, "NextToken":{ "shape":"PaginationToken", @@ -1731,7 +1731,7 @@ }, "NextToken":{ "shape":"PaginationToken", - "documentation":"

If the previous response was incomplete (because there is more search results to retrieve), Rekognition Video returns a pagination token in the response. You can use this pagination token to retrieve the next set of search results.

" + "documentation":"

If the previous response was incomplete (because there is more search results to retrieve), Amazon Rekognition Video returns a pagination token in the response. You can use this pagination token to retrieve the next set of search results.

" }, "SortBy":{ "shape":"FaceSearchSortBy", @@ -1752,11 +1752,11 @@ }, "NextToken":{ "shape":"PaginationToken", - "documentation":"

If the response is truncated, Rekognition Video returns this token that you can use in the subsequent request to retrieve the next set of search results.

" + "documentation":"

If the response is truncated, Amazon Rekognition Video returns this token that you can use in the subsequent request to retrieve the next set of search results.

" }, "VideoMetadata":{ "shape":"VideoMetadata", - "documentation":"

Information about a video that Amazon Rekognition analyzed. Videometadata is returned in every page of paginated responses from a Rekognition Video operation.

" + "documentation":"

Information about a video that Amazon Rekognition analyzed. Videometadata is returned in every page of paginated responses from a Amazon Rekognition Video operation.

" }, "Persons":{ "shape":"PersonMatches", @@ -1778,7 +1778,7 @@ }, "NextToken":{ "shape":"PaginationToken", - "documentation":"

If the previous response was incomplete (because there are more labels to retrieve), Rekognition Video returns a pagination token in the response. You can use this pagination token to retrieve the next set of labels.

" + "documentation":"

If the previous response was incomplete (because there are more labels to retrieve), Amazon Rekognition Video returns a pagination token in the response. You can use this pagination token to retrieve the next set of labels.

" }, "SortBy":{ "shape":"LabelDetectionSortBy", @@ -1799,11 +1799,11 @@ }, "VideoMetadata":{ "shape":"VideoMetadata", - "documentation":"

Information about a video that Rekognition Video analyzed. Videometadata is returned in every page of paginated responses from a Amazon Rekognition video operation.

" + "documentation":"

Information about a video that Amazon Rekognition Video analyzed. Videometadata is returned in every page of paginated responses from a Amazon Rekognition video operation.

" }, "NextToken":{ "shape":"PaginationToken", - "documentation":"

If the response is truncated, Rekognition Video returns this token that you can use in the subsequent request to retrieve the next set of labels.

" + "documentation":"

If the response is truncated, Amazon Rekognition Video returns this token that you can use in the subsequent request to retrieve the next set of labels.

" }, "Labels":{ "shape":"LabelDetections", @@ -1825,7 +1825,7 @@ }, "NextToken":{ "shape":"PaginationToken", - "documentation":"

If the previous response was incomplete (because there are more persons to retrieve), Rekognition Video returns a pagination token in the response. You can use this pagination token to retrieve the next set of persons.

" + "documentation":"

If the previous response was incomplete (because there are more persons to retrieve), Amazon Rekognition Video returns a pagination token in the response. You can use this pagination token to retrieve the next set of persons.

" }, "SortBy":{ "shape":"PersonTrackingSortBy", @@ -1846,11 +1846,11 @@ }, "VideoMetadata":{ "shape":"VideoMetadata", - "documentation":"

Information about a video that Rekognition Video analyzed. Videometadata is returned in every page of paginated responses from a Rekognition Video operation.

" + "documentation":"

Information about a video that Amazon Rekognition Video analyzed. Videometadata is returned in every page of paginated responses from a Amazon Rekognition Video operation.

" }, "NextToken":{ "shape":"PaginationToken", - "documentation":"

If the response is truncated, Rekognition Video returns this token that you can use in the subsequent request to retrieve the next set of persons.

" + "documentation":"

If the response is truncated, Amazon Rekognition Video returns this token that you can use in the subsequent request to retrieve the next set of persons.

" }, "Persons":{ "shape":"PersonDetections", @@ -1877,7 +1877,7 @@ "documentation":"

Identifies an S3 object as the image source.

" } }, - "documentation":"

Provides the input image either as bytes or an S3 object.

You pass image bytes to a Rekognition API operation by using the Bytes property. For example, you would use the Bytes property to pass an image loaded from a local file system. Image bytes passed by using the Bytes property must be base64-encoded. Your code may not need to encode image bytes if you are using an AWS SDK to call Rekognition API operations. For more information, see images-bytes.

You pass images stored in an S3 bucket to a Rekognition API operation by using the S3Object property. Images stored in an S3 bucket do not need to be base64-encoded.

The region for the S3 bucket containing the S3 object must match the region you use for Amazon Rekognition operations.

If you use the Amazon CLI to call Amazon Rekognition operations, passing image bytes using the Bytes property is not supported. You must first upload the image to an Amazon S3 bucket and then call the operation using the S3Object property.

For Amazon Rekognition to process an S3 object, the user must have permission to access the S3 object. For more information, see manage-access-resource-policies.

" + "documentation":"

Provides the input image either as bytes or an S3 object.

You pass image bytes to a Rekognition API operation by using the Bytes property. For example, you would use the Bytes property to pass an image loaded from a local file system. Image bytes passed by using the Bytes property must be base64-encoded. Your code may not need to encode image bytes if you are using an AWS SDK to call Rekognition API operations.

For more information, see Analyzing an Image Loaded from a Local File System in the Amazon Rekognition Developer Guide.

You pass images stored in an S3 bucket to a Rekognition API operation by using the S3Object property. Images stored in an S3 bucket do not need to be base64-encoded.

The region for the S3 bucket containing the S3 object must match the region you use for Amazon Rekognition operations.

If you use the Amazon CLI to call Amazon Rekognition operations, passing image bytes using the Bytes property is not supported. You must first upload the image to an Amazon S3 bucket and then call the operation using the S3Object property.

For Amazon Rekognition to process an S3 object, the user must have permission to access the S3 object. For more information, see Resource Based Policies in the Amazon Rekognition Developer Guide.

" }, "ImageBlob":{ "type":"blob", @@ -1906,7 +1906,7 @@ "type":"structure", "members":{ }, - "documentation":"

The input image size exceeds the allowed limit. For more information, see limits.

", + "documentation":"

The input image size exceeds the allowed limit. For more information, see Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.

", "exception":true }, "IndexFacesRequest":{ @@ -1939,7 +1939,7 @@ "members":{ "FaceRecords":{ "shape":"FaceRecordList", - "documentation":"

An array of faces detected and added to the collection. For more information, see collections-index-faces.

" + "documentation":"

An array of faces detected and added to the collection. For more information, see Searching Faces in a Collection in the Amazon Rekognition Developer Guide.

" }, "OrientationCorrection":{ "shape":"OrientationCorrection", @@ -2011,7 +2011,7 @@ "documentation":"

ARN of the output Amazon Kinesis Data Streams stream.

" } }, - "documentation":"

The Kinesis data stream Amazon Rekognition to which the analysis results of a Amazon Rekognition stream processor are streamed. For more information, see .

" + "documentation":"

The Kinesis data stream Amazon Rekognition to which the analysis results of a Amazon Rekognition stream processor are streamed. For more information, see CreateStreamProcessor in the Amazon Rekognition Developer Guide.

" }, "KinesisVideoArn":{ "type":"string", @@ -2025,7 +2025,7 @@ "documentation":"

ARN of the Kinesis video stream stream that streams the source video.

" } }, - "documentation":"

Kinesis video stream stream that provides the source streaming video for a Rekognition Video stream processor. For more information, see .

" + "documentation":"

Kinesis video stream stream that provides the source streaming video for a Amazon Rekognition Video stream processor. For more information, see CreateStreamProcessor in the Amazon Rekognition Developer Guide.

" }, "Label":{ "type":"structure", @@ -2126,7 +2126,7 @@ "type":"structure", "members":{ }, - "documentation":"

An Amazon Rekognition service limit was exceeded. For example, if you start too many Rekognition Video jobs concurrently, calls to start operations (StartLabelDetection, for example) will raise a LimitExceededException exception (HTTP status code: 400) until the number of concurrently running jobs is below the Amazon Rekognition service limit.

", + "documentation":"

An Amazon Rekognition service limit was exceeded. For example, if you start too many Amazon Rekognition Video jobs concurrently, calls to start operations (StartLabelDetection, for example) will raise a LimitExceededException exception (HTTP status code: 400) until the number of concurrently running jobs is below the Amazon Rekognition service limit.

", "exception":true }, "ListCollectionsRequest":{ @@ -2199,11 +2199,11 @@ "members":{ "NextToken":{ "shape":"PaginationToken", - "documentation":"

If the previous response was incomplete (because there are more stream processors to retrieve), Rekognition Video returns a pagination token in the response. You can use this pagination token to retrieve the next set of stream processors.

" + "documentation":"

If the previous response was incomplete (because there are more stream processors to retrieve), Amazon Rekognition Video returns a pagination token in the response. You can use this pagination token to retrieve the next set of stream processors.

" }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

Maximum number of stream processors you want Rekognition Video to return in the response. The default is 1000.

" + "documentation":"

Maximum number of stream processors you want Amazon Rekognition Video to return in the response. The default is 1000.

" } } }, @@ -2212,7 +2212,7 @@ "members":{ "NextToken":{ "shape":"PaginationToken", - "documentation":"

If the response is truncated, Rekognition Video returns this token that you can use in the subsequent request to retrieve the next set of stream processors.

" + "documentation":"

If the response is truncated, Amazon Rekognition Video returns this token that you can use in the subsequent request to retrieve the next set of stream processors.

" }, "StreamProcessors":{ "shape":"StreamProcessorList", @@ -2245,7 +2245,7 @@ "documentation":"

The name for the parent label. Labels at the top-level of the hierarchy have the parent label \"\".

" } }, - "documentation":"

Provides information about a single type of moderated content found in an image or video. Each type of moderated content has a label within a hierarchical taxonomy. For more information, see moderation.

" + "documentation":"

Provides information about a single type of moderated content found in an image or video. Each type of moderated content has a label within a hierarchical taxonomy. For more information, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

" }, "ModerationLabels":{ "type":"list", @@ -2350,7 +2350,7 @@ "documentation":"

Details about a person tracked in a video.

" } }, - "documentation":"

Details and tracking information for a single time a person is tracked in a video. Amazon Rekognition operations that track persons return an array of PersonDetection objects with elements for each time a person is tracked in a video. For more information, see .

" + "documentation":"

Details and tracking information for a single time a person is tracked in a video. Amazon Rekognition operations that track persons return an array of PersonDetection objects with elements for each time a person is tracked in a video.

For more information, see API_GetPersonTracking in the Amazon Rekognition Developer Guide.

" }, "PersonDetections":{ "type":"list", @@ -2398,7 +2398,7 @@ "documentation":"

The value of the Y coordinate for a point on a Polygon.

" } }, - "documentation":"

The X and Y coordinates of a point on an image. The X and Y values returned are ratios of the overall image size. For example, if the input image is 700x200 and the operation returns X=0.5 and Y=0.25, then the point is at the (350,50) pixel coordinate on the image.

An array of Point objects, Polygon, is returned by . Polygon represents a fine-grained polygon around detected text. For more information, see .

" + "documentation":"

The X and Y coordinates of a point on an image. The X and Y values returned are ratios of the overall image size. For example, if the input image is 700x200 and the operation returns X=0.5 and Y=0.25, then the point is at the (350,50) pixel coordinate on the image.

An array of Point objects, Polygon, is returned by . Polygon represents a fine-grained polygon around detected text. For more information, see Geometry in the Amazon Rekognition Developer Guide.

" }, "Polygon":{ "type":"list", @@ -2507,7 +2507,7 @@ "documentation":"

If the bucket is versioning enabled, you can specify the object version.

" } }, - "documentation":"

Provides the S3 bucket name and object name.

The region for the S3 bucket containing the S3 object must match the region you use for Amazon Rekognition operations.

For Amazon Rekognition to process an S3 object, the user must have permission to access the S3 object. For more information, see manage-access-resource-policies.

" + "documentation":"

Provides the S3 bucket name and object name.

The region for the S3 bucket containing the S3 object must match the region you use for Amazon Rekognition operations.

For Amazon Rekognition to process an S3 object, the user must have permission to access the S3 object. For more information, see Resource Based Policies in the Amazon Rekognition Developer Guide.

" }, "S3ObjectName":{ "type":"string", @@ -2639,7 +2639,7 @@ }, "NotificationChannel":{ "shape":"NotificationChannel", - "documentation":"

The Amazon SNS topic ARN that you want Rekognition Video to publish the completion status of the celebrity recognition analysis to.

" + "documentation":"

The Amazon SNS topic ARN that you want Amazon Rekognition Video to publish the completion status of the celebrity recognition analysis to.

" }, "JobTag":{ "shape":"JobTag", @@ -2674,7 +2674,7 @@ }, "NotificationChannel":{ "shape":"NotificationChannel", - "documentation":"

The Amazon SNS topic ARN that you want Rekognition Video to publish the completion status of the content moderation analysis to.

" + "documentation":"

The Amazon SNS topic ARN that you want Amazon Rekognition Video to publish the completion status of the content moderation analysis to.

" }, "JobTag":{ "shape":"JobTag", @@ -2705,7 +2705,7 @@ }, "NotificationChannel":{ "shape":"NotificationChannel", - "documentation":"

The ARN of the Amazon SNS topic to which you want Rekognition Video to publish the completion status of the face detection operation.

" + "documentation":"

The ARN of the Amazon SNS topic to which you want Amazon Rekognition Video to publish the completion status of the face detection operation.

" }, "FaceAttributes":{ "shape":"FaceAttributes", @@ -2751,7 +2751,7 @@ }, "NotificationChannel":{ "shape":"NotificationChannel", - "documentation":"

The ARN of the Amazon SNS topic to which you want Rekognition Video to publish the completion status of the search.

" + "documentation":"

The ARN of the Amazon SNS topic to which you want Amazon Rekognition Video to publish the completion status of the search.

" }, "JobTag":{ "shape":"JobTag", @@ -2782,11 +2782,11 @@ }, "MinConfidence":{ "shape":"Percent", - "documentation":"

Specifies the minimum confidence that Rekognition Video must have in order to return a detected label. Confidence represents how certain Amazon Rekognition is that a label is correctly identified.0 is the lowest confidence. 100 is the highest confidence. Rekognition Video doesn't return any labels with a confidence level lower than this specified value.

If you don't specify MinConfidence, the operation returns labels with confidence values greater than or equal to 50 percent.

" + "documentation":"

Specifies the minimum confidence that Amazon Rekognition Video must have in order to return a detected label. Confidence represents how certain Amazon Rekognition is that a label is correctly identified.0 is the lowest confidence. 100 is the highest confidence. Amazon Rekognition Video doesn't return any labels with a confidence level lower than this specified value.

If you don't specify MinConfidence, the operation returns labels with confidence values greater than or equal to 50 percent.

" }, "NotificationChannel":{ "shape":"NotificationChannel", - "documentation":"

The Amazon SNS topic ARN you want Rekognition Video to publish the completion status of the label detection operation to.

" + "documentation":"

The Amazon SNS topic ARN you want Amazon Rekognition Video to publish the completion status of the label detection operation to.

" }, "JobTag":{ "shape":"JobTag", @@ -2817,7 +2817,7 @@ }, "NotificationChannel":{ "shape":"NotificationChannel", - "documentation":"

The Amazon SNS topic ARN you want Rekognition Video to publish the completion status of the people detection operation to.

" + "documentation":"

The Amazon SNS topic ARN you want Amazon Rekognition Video to publish the completion status of the people detection operation to.

" }, "JobTag":{ "shape":"JobTag", @@ -2911,7 +2911,7 @@ "documentation":"

The Amazon Kinesis Data Streams stream to which the Amazon Rekognition stream processor streams the analysis results.

" } }, - "documentation":"

Information about the Amazon Kinesis Data Streams stream to which a Rekognition Video stream processor streams the results of a video analysis. For more information, see .

" + "documentation":"

Information about the Amazon Kinesis Data Streams stream to which a Amazon Rekognition Video stream processor streams the results of a video analysis. For more information, see CreateStreamProcessor in the Amazon Rekognition Developer Guide.

" }, "StreamProcessorSettings":{ "type":"structure", @@ -2976,7 +2976,7 @@ "documentation":"

The location of the detected text on the image. Includes an axis aligned coarse bounding box surrounding the text and a finer grain polygon for more accurate spatial information.

" } }, - "documentation":"

Information about a word or line of text detected by .

The DetectedText field contains the text that Amazon Rekognition detected in the image.

Every word and line has an identifier (Id). Each word belongs to a line and has a parent identifier (ParentId) that identifies the line of text in which the word appears. The word Id is also an index for the word within a line of words.

For more information, see text-detection.

" + "documentation":"

Information about a word or line of text detected by .

The DetectedText field contains the text that Amazon Rekognition detected in the image.

Every word and line has an identifier (Id). Each word belongs to a line and has a parent identifier (ParentId) that identifies the line of text in which the word appears. The word Id is also an index for the word within a line of words.

For more information, see Detecting Text in the Amazon Rekognition Developer Guide.

" }, "TextDetectionList":{ "type":"list", diff --git a/botocore/data/resourcegroupstaggingapi/2017-01-26/service-2.json b/botocore/data/resourcegroupstaggingapi/2017-01-26/service-2.json index e4275899..7c0b518a 100644 --- a/botocore/data/resourcegroupstaggingapi/2017-01-26/service-2.json +++ b/botocore/data/resourcegroupstaggingapi/2017-01-26/service-2.json @@ -6,6 +6,7 @@ "jsonVersion":"1.1", "protocol":"json", "serviceFullName":"AWS Resource Groups Tagging API", + "serviceId":"Resource Groups Tagging API", "signatureVersion":"v4", "targetPrefix":"ResourceGroupsTaggingAPI_20170126", "uid":"resourcegroupstaggingapi-2017-01-26" diff --git a/botocore/data/route53domains/2014-05-15/service-2.json b/botocore/data/route53domains/2014-05-15/service-2.json index a4bb71d7..4a7dfc94 100644 --- a/botocore/data/route53domains/2014-05-15/service-2.json +++ b/botocore/data/route53domains/2014-05-15/service-2.json @@ -6,6 +6,7 @@ "jsonVersion":"1.1", "protocol":"json", "serviceFullName":"Amazon Route 53 Domains", + "serviceId":"Route 53 Domains", "signatureVersion":"v4", "targetPrefix":"Route53Domains_v20140515", "uid":"route53domains-2014-05-15" diff --git a/botocore/data/s3/2006-03-01/service-2.json b/botocore/data/s3/2006-03-01/service-2.json index 0a49ed97..9674a4f6 100644 --- a/botocore/data/s3/2006-03-01/service-2.json +++ b/botocore/data/s3/2006-03-01/service-2.json @@ -949,6 +949,7 @@ "documentation":"Container for information regarding the access control for replicas." }, "AccountId":{"type":"string"}, + "AllowQuotedRecordDelimiter":{"type":"boolean"}, "AllowedHeader":{"type":"string"}, "AllowedHeaders":{ "type":"list", @@ -1240,6 +1241,10 @@ "QuoteCharacter":{ "shape":"QuoteCharacter", "documentation":"Value used for escaping where the field delimiter is part of the value." + }, + "AllowQuotedRecordDelimiter":{ + "shape":"AllowQuotedRecordDelimiter", + "documentation":"Specifies that CSV field values may contain quoted record delimiters and such records should be allowed. Default value is FALSE. Setting this value to TRUE may lower performance." } }, "documentation":"Describes how a CSV-formatted input object is formatted." diff --git a/botocore/data/sagemaker/2017-07-24/service-2.json b/botocore/data/sagemaker/2017-07-24/service-2.json index 0a695a64..43fdf06f 100644 --- a/botocore/data/sagemaker/2017-07-24/service-2.json +++ b/botocore/data/sagemaker/2017-07-24/service-2.json @@ -7,6 +7,7 @@ "protocol":"json", "serviceAbbreviation":"SageMaker", "serviceFullName":"Amazon SageMaker Service", + "serviceId":"SageMaker", "signatureVersion":"v4", "signingName":"sagemaker", "targetPrefix":"SageMaker", @@ -34,7 +35,7 @@ "errors":[ {"shape":"ResourceLimitExceeded"} ], - "documentation":"

Creates an endpoint using the endpoint configuration specified in the request. Amazon SageMaker uses the endpoint to provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API.

Use this API only for hosting models using Amazon SageMaker hosting services.

The endpoint name must be unique within an AWS Region in your AWS account.

When it receives the request, Amazon SageMaker creates the endpoint, launches the resources (ML compute instances), and deploys the model(s) on them.

When Amazon SageMaker receives the request, it sets the endpoint status to Creating. After it creates the endpoint, it sets the status to InService. Amazon SageMaker can then process incoming requests for inferences. To check the status of an endpoint, use the DescribeEndpoint API.

For an example, see Exercise 1: Using the K-Means Algorithm Provided by Amazon SageMaker.

" + "documentation":"

Creates an endpoint using the endpoint configuration specified in the request. Amazon SageMaker uses the endpoint to provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API.

Use this API only for hosting models using Amazon SageMaker hosting services.

The endpoint name must be unique within an AWS Region in your AWS account.

When it receives the request, Amazon SageMaker creates the endpoint, launches the resources (ML compute instances), and deploys the model(s) on them.

When Amazon SageMaker receives the request, it sets the endpoint status to Creating. After it creates the endpoint, it sets the status to InService. Amazon SageMaker can then process incoming requests for inferences. To check the status of an endpoint, use the DescribeEndpoint API.

For an example, see Exercise 1: Using the K-Means Algorithm Provided by Amazon SageMaker.

If any of the models hosted at this endpoint get model data from an Amazon S3 location, Amazon SageMaker uses AWS Security Token Service to download model artifacts from the S3 path you provided. AWS STS is activated in your IAM user account by default. If you previously deactivated AWS STS for a region, you need to reactivate AWS STS for that region. For more information, see Activating and Deactivating AWS STS i an AWS Region in the AWS Identity and Access Management User Guide.

" }, "CreateEndpointConfig":{ "name":"CreateEndpointConfig", @@ -49,6 +50,20 @@ ], "documentation":"

Creates an endpoint configuration that Amazon SageMaker hosting services uses to deploy models. In the configuration, you identify one or more models, created using the CreateModel API, to deploy and the resources that you want Amazon SageMaker to provision. Then you call the CreateEndpoint API.

Use this API only if you want to use Amazon SageMaker hosting services to deploy models into production.

In the request, you define one or more ProductionVariants, each of which identifies a model. Each ProductionVariant parameter also describes the resources that you want Amazon SageMaker to provision. This includes the number and type of ML compute instances to deploy.

If you are hosting multiple models, you also assign a VariantWeight to specify how much traffic you want to allocate to each model. For example, suppose that you want to host two models, A and B, and you assign traffic weight 2 for model A and 1 for model B. Amazon SageMaker distributes two-thirds of the traffic to Model A, and one-third to model B.

" }, + "CreateHyperParameterTuningJob":{ + "name":"CreateHyperParameterTuningJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateHyperParameterTuningJobRequest"}, + "output":{"shape":"CreateHyperParameterTuningJobResponse"}, + "errors":[ + {"shape":"ResourceInUse"}, + {"shape":"ResourceLimitExceeded"} + ], + "documentation":"

Starts a hyperparameter tuning job.

" + }, "CreateModel":{ "name":"CreateModel", "http":{ @@ -96,7 +111,7 @@ }, "input":{"shape":"CreatePresignedNotebookInstanceUrlInput"}, "output":{"shape":"CreatePresignedNotebookInstanceUrlOutput"}, - "documentation":"

Returns a URL that you can use to connect to the Juypter server from a notebook instance. In the Amazon SageMaker console, when you choose Open next to a notebook instance, Amazon SageMaker opens a new tab showing the Jupyter server home page from the notebook instance. The console uses this API to get the URL and show the page.

" + "documentation":"

Returns a URL that you can use to connect to the Jupyter server from a notebook instance. In the Amazon SageMaker console, when you choose Open next to a notebook instance, Amazon SageMaker opens a new tab showing the Jupyter server home page from the notebook instance. The console uses this API to get the URL and show the page.

" }, "CreateTrainingJob":{ "name":"CreateTrainingJob", @@ -110,7 +125,7 @@ {"shape":"ResourceInUse"}, {"shape":"ResourceLimitExceeded"} ], - "documentation":"

Starts a model training job. After training completes, Amazon SageMaker saves the resulting model artifacts to an Amazon S3 location that you specify.

If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts in a deep learning service other than Amazon SageMaker, provided that you know how to use them for inferences.

In the request body, you provide the following:

  • AlgorithmSpecification - Identifies the training algorithm to use.

  • HyperParameters - Specify these algorithm-specific parameters to influence the quality of the final model. For a list of hyperparameters for each training algorithm provided by Amazon SageMaker, see Algorithms.

  • InputDataConfig - Describes the training dataset and the Amazon S3 location where it is stored.

  • OutputDataConfig - Identifies the Amazon S3 location where you want Amazon SageMaker to save the results of model training.

  • ResourceConfig - Identifies the resources, ML compute instances, and ML storage volumes to deploy for model training. In distributed training, you specify more than one instance.

  • RoleARN - The Amazon Resource Number (ARN) that Amazon SageMaker assumes to perform tasks on your behalf during model training. You must grant this role the necessary permissions so that Amazon SageMaker can successfully complete model training.

  • StoppingCondition - Sets a duration for training. Use this parameter to cap model training costs.

For more information about Amazon SageMaker, see How It Works.

" + "documentation":"

Starts a model training job. After training completes, Amazon SageMaker saves the resulting model artifacts to an Amazon S3 location that you specify.

If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts in a deep learning service other than Amazon SageMaker, provided that you know how to use them for inferences.

In the request body, you provide the following:

  • AlgorithmSpecification - Identifies the training algorithm to use.

  • HyperParameters - Specify these algorithm-specific parameters to influence the quality of the final model. For a list of hyperparameters for each training algorithm provided by Amazon SageMaker, see Algorithms.

  • InputDataConfig - Describes the training dataset and the Amazon S3 location where it is stored.

  • OutputDataConfig - Identifies the Amazon S3 location where you want Amazon SageMaker to save the results of model training.

  • ResourceConfig - Identifies the resources, ML compute instances, and ML storage volumes to deploy for model training. In distributed training, you specify more than one instance.

  • RoleARN - The Amazon Resource Number (ARN) that Amazon SageMaker assumes to perform tasks on your behalf during model training. You must grant this role the necessary permissions so that Amazon SageMaker can successfully complete model training.

  • StoppingCondition - Sets a duration for training. Use this parameter to cap model training costs.

For more information about Amazon SageMaker, see How It Works.

" }, "DeleteEndpoint":{ "name":"DeleteEndpoint", @@ -128,7 +143,7 @@ "requestUri":"/" }, "input":{"shape":"DeleteEndpointConfigInput"}, - "documentation":"

Deletes an endpoint configuration. The DeleteEndpoingConfig API deletes only the specified configuration. It does not delete endpoints created using the configuration.

" + "documentation":"

Deletes an endpoint configuration. The DeleteEndpointConfig API deletes only the specified configuration. It does not delete endpoints created using the configuration.

" }, "DeleteModel":{ "name":"DeleteModel", @@ -187,6 +202,19 @@ "output":{"shape":"DescribeEndpointConfigOutput"}, "documentation":"

Returns the description of an endpoint configuration created using the CreateEndpointConfig API.

" }, + "DescribeHyperParameterTuningJob":{ + "name":"DescribeHyperParameterTuningJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeHyperParameterTuningJobRequest"}, + "output":{"shape":"DescribeHyperParameterTuningJobResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Gets a description of a hyperparameter tuning job.

" + }, "DescribeModel":{ "name":"DescribeModel", "http":{ @@ -250,6 +278,16 @@ "output":{"shape":"ListEndpointsOutput"}, "documentation":"

Lists endpoints.

" }, + "ListHyperParameterTuningJobs":{ + "name":"ListHyperParameterTuningJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListHyperParameterTuningJobsRequest"}, + "output":{"shape":"ListHyperParameterTuningJobsResponse"}, + "documentation":"

Gets a list of HyperParameterTuningJobSummary objects that describe the hyperparameter tuning jobs launched in your account.

" + }, "ListModels":{ "name":"ListModels", "http":{ @@ -268,7 +306,7 @@ }, "input":{"shape":"ListNotebookInstanceLifecycleConfigsInput"}, "output":{"shape":"ListNotebookInstanceLifecycleConfigsOutput"}, - "documentation":"

Lists notebook instance lifestyle configurations created with the API.

" + "documentation":"

Lists notebook instance lifestyle configurations created with the CreateNotebookInstanceLifecycleConfig API.

" }, "ListNotebookInstances":{ "name":"ListNotebookInstances", @@ -300,6 +338,19 @@ "output":{"shape":"ListTrainingJobsResponse"}, "documentation":"

Lists training jobs.

" }, + "ListTrainingJobsForHyperParameterTuningJob":{ + "name":"ListTrainingJobsForHyperParameterTuningJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTrainingJobsForHyperParameterTuningJobRequest"}, + "output":{"shape":"ListTrainingJobsForHyperParameterTuningJobResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Gets a list of TrainingJobSummary objects that describe the training jobs that a hyperparameter tuning job launched.

" + }, "StartNotebookInstance":{ "name":"StartNotebookInstance", "http":{ @@ -312,6 +363,18 @@ ], "documentation":"

Launches an ML compute instance with the latest version of the libraries and attaches your ML storage volume. After configuring the notebook instance, Amazon SageMaker sets the notebook instance status to InService. A notebook instance's status must be InService before you can connect to your Jupyter notebook.

" }, + "StopHyperParameterTuningJob":{ + "name":"StopHyperParameterTuningJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopHyperParameterTuningJobRequest"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Stops a running hyperparameter tuning job and all running training jobs that the tuning job launched.

All model artifacts output from the training jobs are stored in Amazon Simple Storage Service (Amazon S3). All data that the training jobs write toAmazon CloudWatch Logs are still available in CloudWatch. After the tuning job moves to the Stopped state, it releases all reserved resources for the tuning job.

" + }, "StopNotebookInstance":{ "name":"StopNotebookInstance", "http":{ @@ -344,7 +407,7 @@ "errors":[ {"shape":"ResourceLimitExceeded"} ], - "documentation":"

Deploys the new EndpointConfig specified in the request, switches to using newly created endpoint, and then deletes resources provisioned for the endpoint using the previous EndpointConfig (there is no availability loss).

When Amazon SageMaker receives the request, it sets the endpoint status to Updating. After updating the endpoint, it sets the status to InService. To check the status of an endpoint, use the DescribeEndpoint API.

" + "documentation":"

Deploys the new EndpointConfig specified in the request, switches to using newly created endpoint, and then deletes resources provisioned for the endpoint using the previous EndpointConfig (there is no availability loss).

When Amazon SageMaker receives the request, it sets the endpoint status to Updating. After updating the endpoint, it sets the status to InService. To check the status of an endpoint, use the DescribeEndpoint API.

You cannot update an endpoint with the current EndpointConfig. To update an endpoint, you must create a new EndpointConfig.

" }, "UpdateEndpointWeightsAndCapacities":{ "name":"UpdateEndpointWeightsAndCapacities", @@ -383,7 +446,7 @@ "errors":[ {"shape":"ResourceLimitExceeded"} ], - "documentation":"

Updates a notebook instance lifecycle configuration created with the API.

" + "documentation":"

Updates a notebook instance lifecycle configuration created with the CreateNotebookInstanceLifecycleConfig API.

" } }, "shapes":{ @@ -433,7 +496,31 @@ "documentation":"

The input mode that the algorithm supports. For the input modes that Amazon SageMaker algorithms support, see Algorithms. If an algorithm supports the File input mode, Amazon SageMaker downloads the training data from S3 to the provisioned ML storage Volume, and mounts the directory to docker volume for training container. If an algorithm supports the Pipe input mode, Amazon SageMaker streams data directly from S3 to the container.

In File mode, make sure you provision ML storage volume with sufficient capacity to accommodate the data download from S3. In addition to the training data, the ML storage volume also stores the output model. The algorithm container use ML storage volume to also store intermediate information, if any.

For distributed algorithms using File mode, training data is distributed uniformly, and your training duration is predictable if the input data objects size is approximately same. Amazon SageMaker does not split the files any further for model training. If the object sizes are skewed, training won't be optimal as the data distribution is also skewed where one host in a training cluster is overloaded, thus becoming bottleneck in training.

" } }, - "documentation":"

Specifies the training algorithm to use in a CreateTrainingJob request.

For more information about algorithms provided by Amazon SageMaker, see Algorithms. For information about using your own algorithms, see your-algorithms.

" + "documentation":"

Specifies the training algorithm to use in a CreateTrainingJob request.

For more information about algorithms provided by Amazon SageMaker, see Algorithms. For information about using your own algorithms, see your-algorithms.

" + }, + "CategoricalParameterRange":{ + "type":"structure", + "required":[ + "Name", + "Values" + ], + "members":{ + "Name":{ + "shape":"ParameterKey", + "documentation":"

The name of the categorical hyperparameter to tune.

" + }, + "Values":{ + "shape":"ParameterValues", + "documentation":"

A list of the categories for the hyperparameter.

" + } + }, + "documentation":"

A list of categorical hyperparameters to tune.

" + }, + "CategoricalParameterRanges":{ + "type":"list", + "member":{"shape":"CategoricalParameterRange"}, + "max":20, + "min":0 }, "Channel":{ "type":"structure", @@ -456,11 +543,11 @@ }, "CompressionType":{ "shape":"CompressionType", - "documentation":"

If training data is compressed, the compression type. The default value is None. CompressionType is used only in PIPE input mode. In FILE mode, leave this field unset or set it to None.

" + "documentation":"

If training data is compressed, the compression type. The default value is None. CompressionType is used only in Pipe input mode. In File mode, leave this field unset or set it to None.

" }, "RecordWrapperType":{ "shape":"RecordWrapper", - "documentation":"

Specify RecordIO as the value when input data is in raw format but the training algorithm requires the RecordIO format, in which caseAmazon SageMaker wraps each individual S3 object in a RecordIO record. If the input data is already in RecordIO format, you don't need to set this attribute. For more information, see Create a Dataset Using RecordIO.

In FILE mode, leave this field unset or set it to None.

" + "documentation":"

Specify RecordIO as the value when input data is in raw format but the training algorithm requires the RecordIO format, in which case, Amazon SageMaker wraps each individual S3 object in a RecordIO record. If the input data is already in RecordIO format, you don't need to set this attribute. For more information, see Create a Dataset Using RecordIO.

In FILE mode, leave this field unset or set it to None.

" } }, "documentation":"

A channel is a named input source that training algorithms can consume.

" @@ -488,11 +575,11 @@ }, "Image":{ "shape":"Image", - "documentation":"

The Amazon EC2 Container Registry (Amazon ECR) path where inference code is stored. If you are using your own custom algorithm instead of an algorithm provided by Amazon SageMaker, the inference code must meet Amazon SageMaker requirements. For more information, see Using Your Own Algorithms with Amazon SageMaker

" + "documentation":"

The Amazon EC2 Container Registry (Amazon ECR) path where inference code is stored. If you are using your own custom algorithm instead of an algorithm provided by Amazon SageMaker, the inference code must meet Amazon SageMaker requirements. Amazon SageMaker supports both registry/repository[:tag] and registry/repository[@digest] image path formats. For more information, see Using Your Own Algorithms with Amazon SageMaker

" }, "ModelDataUrl":{ "shape":"Url", - "documentation":"

The S3 path where the model artifacts, which result from model training, are stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix).

" + "documentation":"

The S3 path where the model artifacts, which result from model training, are stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix).

If you provide a value for this parameter, Amazon SageMaker uses AWS Security Token Service to download model artifacts from the S3 path you provide. AWS STS is activated in your IAM user account by default. If you previously deactivated AWS STS for a region, you need to reactivate AWS STS for that region. For more information, see Activating and Deactivating AWS STS i an AWS Region in the AWS Identity and Access Management User Guide.

" }, "Environment":{ "shape":"EnvironmentMap", @@ -510,6 +597,35 @@ "type":"string", "max":256 }, + "ContinuousParameterRange":{ + "type":"structure", + "required":[ + "Name", + "MinValue", + "MaxValue" + ], + "members":{ + "Name":{ + "shape":"ParameterKey", + "documentation":"

The name of the continuous hyperparameter to tune.

" + }, + "MinValue":{ + "shape":"ParameterValue", + "documentation":"

The minimum value for the hyperparameter. The tuning job uses floating-point values between this value and MaxValuefor tuning.

" + }, + "MaxValue":{ + "shape":"ParameterValue", + "documentation":"

The maximum value for the hyperparameter. The tuning job uses floating-point values between MinValue value and this value for tuning.

" + } + }, + "documentation":"

A list of continuous hyperparameters to tune.

" + }, + "ContinuousParameterRanges":{ + "type":"list", + "member":{"shape":"ContinuousParameterRange"}, + "max":20, + "min":0 + }, "CreateEndpointConfigInput":{ "type":"structure", "required":[ @@ -576,6 +692,42 @@ } } }, + "CreateHyperParameterTuningJobRequest":{ + "type":"structure", + "required":[ + "HyperParameterTuningJobName", + "HyperParameterTuningJobConfig", + "TrainingJobDefinition" + ], + "members":{ + "HyperParameterTuningJobName":{ + "shape":"HyperParameterTuningJobName", + "documentation":"

The name of the tuning job. This name is the prefix for the names of all training jobs that this tuning job launches. The name must be unique within the same AWS account and AWS Region. Names are not case sensitive, and must be between 1-32 characters.

" + }, + "HyperParameterTuningJobConfig":{ + "shape":"HyperParameterTuningJobConfig", + "documentation":"

The HyperParameterTuningJobConfig object that describes the tuning job, including the search strategy, metric used to evaluate training jobs, ranges of parameters to search, and resource limits for the tuning job.

" + }, + "TrainingJobDefinition":{ + "shape":"HyperParameterTrainingJobDefinition", + "documentation":"

The HyperParameterTrainingJobDefinition object that describes the training jobs that this tuning job launches, including static hyperparameters, input data configuration, output data configuration, resource configuration, and stopping condition.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

An array of key-value pairs. You can use tags to categorize your AWS resources in different ways, for example, by purpose, owner, or environment. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

" + } + } + }, + "CreateHyperParameterTuningJobResponse":{ + "type":"structure", + "required":["HyperParameterTuningJobArn"], + "members":{ + "HyperParameterTuningJobArn":{ + "shape":"HyperParameterTuningJobArn", + "documentation":"

The Amazon Resource Name (ARN) of the tuning job.

" + } + } + }, "CreateModelInput":{ "type":"structure", "required":[ @@ -594,7 +746,7 @@ }, "ExecutionRoleArn":{ "shape":"RoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker can assume to access model artifacts and docker image for deployment on ML compute instances. Deploying on ML compute instances is part of model hosting. For more information, see Amazon SageMaker Roles.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker can assume to access model artifacts and docker image for deployment on ML compute instances. Deploying on ML compute instances is part of model hosting. For more information, see Amazon SageMaker Roles.

To be able to pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission.

" }, "Tags":{ "shape":"TagList", @@ -602,7 +754,7 @@ }, "VpcConfig":{ "shape":"VpcConfig", - "documentation":"

A object that specifies the VPC that you want your model to connect to. Control access to and from your training container by configuring the VPC. For more information, see host-vpc.

" + "documentation":"

A VpcConfig object that specifies the VPC that you want your model to connect to. Control access to and from your model container by configuring the VPC. For more information, see host-vpc.

" } } }, @@ -642,7 +794,7 @@ }, "RoleArn":{ "shape":"RoleArn", - "documentation":"

When you send any requests to AWS resources from the notebook instance, Amazon SageMaker assumes this role to perform tasks on your behalf. You must grant this role necessary permissions so Amazon SageMaker can perform these tasks. The policy must allow the Amazon SageMaker service principal (sagemaker.amazonaws.com) permissions to assume this role. For more information, see Amazon SageMaker Roles.

" + "documentation":"

When you send any requests to AWS resources from the notebook instance, Amazon SageMaker assumes this role to perform tasks on your behalf. You must grant this role necessary permissions so Amazon SageMaker can perform these tasks. The policy must allow the Amazon SageMaker service principal (sagemaker.amazonaws.com) permissions to assume this role. For more information, see Amazon SageMaker Roles.

To be able to pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission.

" }, "KmsKeyId":{ "shape":"KmsKeyId", @@ -739,7 +891,7 @@ }, "HyperParameters":{ "shape":"HyperParameters", - "documentation":"

Algorithm-specific parameters. You set hyperparameters before you start the learning process. Hyperparameters influence the quality of the model. For a list of hyperparameters for each training algorithm provided by Amazon SageMaker, see Algorithms.

You can specify a maximum of 100 hyperparameters. Each hyperparameter is a key-value pair. Each key and value is limited to 256 characters, as specified by the Length Constraint.

" + "documentation":"

Algorithm-specific parameters that influence the quality of the model. You set hyperparameters before you start the learning process. For a list of hyperparameters for each training algorithm provided by Amazon SageMaker, see Algorithms.

You can specify a maximum of 100 hyperparameters. Each hyperparameter is a key-value pair. Each key and value is limited to 256 characters, as specified by the Length Constraint.

" }, "AlgorithmSpecification":{ "shape":"AlgorithmSpecification", @@ -747,7 +899,7 @@ }, "RoleArn":{ "shape":"RoleArn", - "documentation":"

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf.

During model training, Amazon SageMaker needs your permission to read input data from an S3 bucket, download a Docker image that contains training code, write model artifacts to an S3 bucket, write logs to Amazon CloudWatch Logs, and publish metrics to Amazon CloudWatch. You grant permissions for all of these tasks to an IAM role. For more information, see Amazon SageMaker Roles.

" + "documentation":"

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf.

During model training, Amazon SageMaker needs your permission to read input data from an S3 bucket, download a Docker image that contains training code, write model artifacts to an S3 bucket, write logs to Amazon CloudWatch Logs, and publish metrics to Amazon CloudWatch. You grant permissions for all of these tasks to an IAM role. For more information, see Amazon SageMaker Roles.

To be able to pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission.

" }, "InputDataConfig":{ "shape":"InputDataConfig", @@ -763,7 +915,7 @@ }, "VpcConfig":{ "shape":"VpcConfig", - "documentation":"

A object that specifies the VPC that you want your training job to connect to. Control access to and from your training container by configuring the VPC. For more information, see train-vpc

" + "documentation":"

A VpcConfig object that specifies the VPC that you want your training job to connect to. Control access to and from your training container by configuring the VPC. For more information, see train-vpc

" }, "StoppingCondition":{ "shape":"StoppingCondition", @@ -869,6 +1021,28 @@ "members":{ } }, + "DeployedImage":{ + "type":"structure", + "members":{ + "SpecifiedImage":{ + "shape":"Image", + "documentation":"

The image path you specified when you created the model.

" + }, + "ResolvedImage":{ + "shape":"Image", + "documentation":"

The specific digest path of the image hosted in this ProductionVariant.

" + }, + "ResolutionTime":{ + "shape":"Timestamp", + "documentation":"

The date and time when the image path for the model resolved to the ResolvedImage

" + } + }, + "documentation":"

Gets the Amazon EC2 Container Registry path of the docker image of the model that is hosted in this ProductionVariant.

If you used the registry/repository[:tag] form to to specify the image path of the primary container when you created the model hosted in this ProductionVariant, the path resolves to a path of the form registry/repository[@digest]. A digest is a hash value that identifies a specific version of an image. For information about Amazon ECR paths, see Pulling an Image in the Amazon ECR User Guide.

" + }, + "DeployedImages":{ + "type":"list", + "member":{"shape":"DeployedImage"} + }, "DescribeEndpointConfigInput":{ "type":"structure", "required":["EndpointConfigName"], @@ -945,7 +1119,7 @@ }, "ProductionVariants":{ "shape":"ProductionVariantSummaryList", - "documentation":"

An array of ProductionVariant objects, one for each model hosted behind this endpoint.

" + "documentation":"

An array of ProductionVariantSummary objects, one for each model hosted behind this endpoint.

" }, "EndpointStatus":{ "shape":"EndpointStatus", @@ -965,6 +1139,79 @@ } } }, + "DescribeHyperParameterTuningJobRequest":{ + "type":"structure", + "required":["HyperParameterTuningJobName"], + "members":{ + "HyperParameterTuningJobName":{ + "shape":"HyperParameterTuningJobName", + "documentation":"

The name of the tuning job to describe.

" + } + } + }, + "DescribeHyperParameterTuningJobResponse":{ + "type":"structure", + "required":[ + "HyperParameterTuningJobName", + "HyperParameterTuningJobArn", + "HyperParameterTuningJobConfig", + "TrainingJobDefinition", + "HyperParameterTuningJobStatus", + "CreationTime", + "TrainingJobStatusCounters", + "ObjectiveStatusCounters" + ], + "members":{ + "HyperParameterTuningJobName":{ + "shape":"HyperParameterTuningJobName", + "documentation":"

The name of the tuning job.

" + }, + "HyperParameterTuningJobArn":{ + "shape":"HyperParameterTuningJobArn", + "documentation":"

The Amazon Resource Name (ARN) of the tuning job.

" + }, + "HyperParameterTuningJobConfig":{ + "shape":"HyperParameterTuningJobConfig", + "documentation":"

The HyperParameterTuningJobConfig object that specifies the configuration of the tuning job.

" + }, + "TrainingJobDefinition":{ + "shape":"HyperParameterTrainingJobDefinition", + "documentation":"

The HyperParameterTrainingJobDefinition object that specifies the definition of the training jobs that this tuning job launches.

" + }, + "HyperParameterTuningJobStatus":{ + "shape":"HyperParameterTuningJobStatus", + "documentation":"

The status of the tuning job: InProgress, Completed, Failed, Stopping, or Stopped.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The date and time that the tuning job started.

" + }, + "HyperParameterTuningEndTime":{ + "shape":"Timestamp", + "documentation":"

The date and time that the tuning job ended.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

The date and time that the status of the tuning job was modified.

" + }, + "TrainingJobStatusCounters":{ + "shape":"TrainingJobStatusCounters", + "documentation":"

The TrainingJobStatusCounters object that specifies the number of training jobs, categorized by status, that this tuning job launched.

" + }, + "ObjectiveStatusCounters":{ + "shape":"ObjectiveStatusCounters", + "documentation":"

The ObjectiveStatusCounters object that specifies the number of training jobs, categorized by the status of their final objective metric, that this tuning job launched.

" + }, + "BestTrainingJob":{ + "shape":"HyperParameterTrainingJobSummary", + "documentation":"

A TrainingJobSummary object that describes the training job that completed with the best current HyperParameterTuningJobObjective.

" + }, + "FailureReason":{ + "shape":"FailureReason", + "documentation":"

If the tuning job failed, the reason it failed.

" + } + } + }, "DescribeModelInput":{ "type":"structure", "required":["ModelName"], @@ -999,7 +1246,7 @@ }, "VpcConfig":{ "shape":"VpcConfig", - "documentation":"

A object that specifies the VPC that this model has access to. For more information, see host-vpc

" + "documentation":"

A VpcConfig object that specifies the VPC that this model has access to. For more information, see host-vpc

" }, "CreationTime":{ "shape":"Timestamp", @@ -1158,6 +1405,10 @@ "shape":"TrainingJobArn", "documentation":"

The Amazon Resource Name (ARN) of the training job.

" }, + "TuningJobArn":{ + "shape":"HyperParameterTuningJobArn", + "documentation":"

The Amazon Resource Name (ARN) of the associated hyperparameter tuning job if the training job was launched by a hyperparameter tuning job.

" + }, "ModelArtifacts":{ "shape":"ModelArtifacts", "documentation":"

Information about the Amazon S3 location that is configured for storing model artifacts.

" @@ -1200,7 +1451,7 @@ }, "VpcConfig":{ "shape":"VpcConfig", - "documentation":"

A object that specifies the VPC that this training job has access to. For more information, see train-vpc.

" + "documentation":"

A VpcConfig object that specifies the VPC that this training job has access to. For more information, see train-vpc.

" }, "StoppingCondition":{ "shape":"StoppingCondition", @@ -1212,11 +1463,11 @@ }, "TrainingStartTime":{ "shape":"Timestamp", - "documentation":"

A timestamp that indicates when training started.

" + "documentation":"

Indicates the time when the training job starts on training instances. You are billed for the time interval between this time and the value of TrainingEndTime. The start time in CloudWatch Logs might be later than this time. The difference is due to the time it takes to download the training data and to the size of the training container.

" }, "TrainingEndTime":{ "shape":"Timestamp", - "documentation":"

A timestamp that indicates when model training ended.

" + "documentation":"

Indicates the time when the training job ends on training instances. You are billed for the time interval between the value of TrainingStartTime and this time. For successful jobs and stopped jobs, this is the time after model artifacts are uploaded. For failed jobs, this is the time when Amazon SageMaker detects a job failure.

" }, "LastModifiedTime":{ "shape":"Timestamp", @@ -1393,6 +1644,299 @@ "type":"string", "max":1024 }, + "FinalHyperParameterTuningJobObjectiveMetric":{ + "type":"structure", + "required":[ + "MetricName", + "Value" + ], + "members":{ + "Type":{ + "shape":"HyperParameterTuningJobObjectiveType", + "documentation":"

Whether to minimize or maximize the objective metric. Valid values are Minimize and Maximize.

" + }, + "MetricName":{ + "shape":"MetricName", + "documentation":"

The name of the objective metric.

" + }, + "Value":{ + "shape":"MetricValue", + "documentation":"

The value of the objective metric.

" + } + }, + "documentation":"

Shows the final value for the objective metric for a training job that was launched by a hyperparameter tuning job. You define the objective metric in the HyperParameterTuningJobObjective parameter of HyperParameterTuningJobConfig.

" + }, + "HyperParameterAlgorithmSpecification":{ + "type":"structure", + "required":[ + "TrainingImage", + "TrainingInputMode" + ], + "members":{ + "TrainingImage":{ + "shape":"AlgorithmImage", + "documentation":"

The registry path of the Docker image that contains the training algorithm. For information about Docker registry paths for built-in algorithms, see sagemaker-algo-docker-registry-paths.

" + }, + "TrainingInputMode":{ + "shape":"TrainingInputMode", + "documentation":"

The input mode that the algorithm supports: File or Pipe. In File input mode, Amazon SageMaker downloads the training data from Amazon S3 to the storage volume that is attached to the training instance and mounts the directory to the Docker volume for the training container. In Pipe input mode, Amazon SageMaker streams data directly from Amazon S3 to the container.

If you specify File mode, make sure that you provision the storage volume that is attached to the training instance with enough capacity to accommodate the training data downloaded from Amazon S3, the model artifacts, and intermediate information.

For more information about input modes, see Algorithms.

" + }, + "MetricDefinitions":{ + "shape":"MetricDefinitionList", + "documentation":"

An array of MetricDefinition objects that specify the metrics that the algorithm emits.

" + } + }, + "documentation":"

Specifies which training algorithm to use for training jobs that a hyperparameter tuning job launches and the metrics to monitor.

" + }, + "HyperParameterTrainingJobDefinition":{ + "type":"structure", + "required":[ + "AlgorithmSpecification", + "RoleArn", + "InputDataConfig", + "OutputDataConfig", + "ResourceConfig", + "StoppingCondition" + ], + "members":{ + "StaticHyperParameters":{ + "shape":"HyperParameters", + "documentation":"

Specifies the values of hyperparameters that do not change for the tuning job.

" + }, + "AlgorithmSpecification":{ + "shape":"HyperParameterAlgorithmSpecification", + "documentation":"

The HyperParameterAlgorithmSpecification object that specifies the algorithm to use for the training jobs that the tuning job launches.

" + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role associated with the training jobs that the tuning job launches.

" + }, + "InputDataConfig":{ + "shape":"InputDataConfig", + "documentation":"

An array of Channel objects that specify the input for the training jobs that the tuning job launches.

" + }, + "VpcConfig":{ + "shape":"VpcConfig", + "documentation":"

The VpcConfig object that specifies the VPC that you want the training jobs that this hyperparameter tuning job launches to connect to. Control access to and from your training container by configuring the VPC. For more information, see train-vpc.

" + }, + "OutputDataConfig":{ + "shape":"OutputDataConfig", + "documentation":"

Specifies the path to the Amazon S3 bucket where you store model artifacts from the training jobs that the tuning job launches.

" + }, + "ResourceConfig":{ + "shape":"ResourceConfig", + "documentation":"

The resources, including the compute instances and storage volumes, to use for the training jobs that the tuning job launches.

Storage volumes store model artifacts and incremental states. Training algorithms might also use storage volumes for scratch space. If you want Amazon SageMaker to use the storage volume to store the training data, choose File as the TrainingInputMode in the algorithm specification. For distributed training algorithms, specify an instance count greater than 1.

" + }, + "StoppingCondition":{ + "shape":"StoppingCondition", + "documentation":"

Sets a maximum duration for the training jobs that the tuning job launches. Use this parameter to limit model training costs.

To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal. This delays job termination for 120 seconds. Algorithms might use this 120-second window to save the model artifacts.

When Amazon SageMaker terminates a job because the stopping condition has been met, training algorithms provided by Amazon SageMaker save the intermediate results of the job.

" + } + }, + "documentation":"

Defines the training jobs launched by a hyperparameter tuning job.

" + }, + "HyperParameterTrainingJobSummaries":{ + "type":"list", + "member":{"shape":"HyperParameterTrainingJobSummary"} + }, + "HyperParameterTrainingJobSummary":{ + "type":"structure", + "required":[ + "TrainingJobName", + "TrainingJobArn", + "CreationTime", + "TrainingJobStatus", + "TunedHyperParameters" + ], + "members":{ + "TrainingJobName":{ + "shape":"TrainingJobName", + "documentation":"

The name of the training job.

" + }, + "TrainingJobArn":{ + "shape":"TrainingJobArn", + "documentation":"

The Amazon Resource Name (ARN) of the training job.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The date and time that the training job was created.

" + }, + "TrainingStartTime":{ + "shape":"Timestamp", + "documentation":"

The date and time that the training job started.

" + }, + "TrainingEndTime":{ + "shape":"Timestamp", + "documentation":"

The date and time that the training job ended.

" + }, + "TrainingJobStatus":{ + "shape":"TrainingJobStatus", + "documentation":"

The status of the training job.

" + }, + "TunedHyperParameters":{ + "shape":"HyperParameters", + "documentation":"

A list of the hyperparameters for which you specified ranges to search.

" + }, + "FailureReason":{ + "shape":"FailureReason", + "documentation":"

The reason that the training job failed.

" + }, + "FinalHyperParameterTuningJobObjectiveMetric":{ + "shape":"FinalHyperParameterTuningJobObjectiveMetric", + "documentation":"

The FinalHyperParameterTuningJobObjectiveMetric object that specifies the value of the objective metric of the tuning job that launched this training job.

" + }, + "ObjectiveStatus":{ + "shape":"ObjectiveStatus", + "documentation":"

The status of the objective metric for the training job:

  • Succeeded: The final objective metric for the training job was evaluated by the hyperparameter tuning job and used in the hyperparameter tuning process.

  • Pending: The training job is in progress and evaluation of its final objective metric is pending.

  • Failed: The final objective metric for the training job was not evaluated, and was not used in the hyperparameter tuning process. This typically occurs when the training job failed or did not emit an objective metric.

" + } + }, + "documentation":"

Specifies summary information about a training job.

" + }, + "HyperParameterTuningJobArn":{ + "type":"string", + "max":256, + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:hyper-parameter-tuning-job/.*" + }, + "HyperParameterTuningJobConfig":{ + "type":"structure", + "required":[ + "Strategy", + "HyperParameterTuningJobObjective", + "ResourceLimits", + "ParameterRanges" + ], + "members":{ + "Strategy":{ + "shape":"HyperParameterTuningJobStrategyType", + "documentation":"

Specifies the search strategy for hyperparameters. Currently, the only valid value is Bayesian.

" + }, + "HyperParameterTuningJobObjective":{ + "shape":"HyperParameterTuningJobObjective", + "documentation":"

The HyperParameterTuningJobObjective object that specifies the objective metric for this tuning job.

" + }, + "ResourceLimits":{ + "shape":"ResourceLimits", + "documentation":"

The ResourceLimits object that specifies the maximum number of training jobs and parallel training jobs for this tuning job.

" + }, + "ParameterRanges":{ + "shape":"ParameterRanges", + "documentation":"

The ParameterRanges object that specifies the ranges of hyperparameters that this tuning job searches.

" + } + }, + "documentation":"

Configures a hyperparameter tuning job.

" + }, + "HyperParameterTuningJobName":{ + "type":"string", + "max":32, + "min":1, + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*" + }, + "HyperParameterTuningJobObjective":{ + "type":"structure", + "required":[ + "Type", + "MetricName" + ], + "members":{ + "Type":{ + "shape":"HyperParameterTuningJobObjectiveType", + "documentation":"

Whether to minimize or maximize the objective metric.

" + }, + "MetricName":{ + "shape":"MetricName", + "documentation":"

The name of the metric to use for the objective metric.

" + } + }, + "documentation":"

Defines the objective metric for a hyperparameter tuning job. Hyperparameter tuning uses the value of this metric to evaluate the training jobs it launches, and returns the training job that results in either the highest or lowest value for this metric, depending on the value you specify for the Type parameter.

" + }, + "HyperParameterTuningJobObjectiveType":{ + "type":"string", + "enum":[ + "Maximize", + "Minimize" + ] + }, + "HyperParameterTuningJobSortByOptions":{ + "type":"string", + "enum":[ + "Name", + "Status", + "CreationTime" + ] + }, + "HyperParameterTuningJobStatus":{ + "type":"string", + "enum":[ + "Completed", + "InProgress", + "Failed", + "Stopped", + "Stopping" + ] + }, + "HyperParameterTuningJobStrategyType":{ + "type":"string", + "documentation":"

The strategy hyperparameter tuning uses to find the best combination of hyperparameters for your model. Currently, the only supported value is Bayesian.

", + "enum":["Bayesian"] + }, + "HyperParameterTuningJobSummaries":{ + "type":"list", + "member":{"shape":"HyperParameterTuningJobSummary"} + }, + "HyperParameterTuningJobSummary":{ + "type":"structure", + "required":[ + "HyperParameterTuningJobName", + "HyperParameterTuningJobArn", + "HyperParameterTuningJobStatus", + "Strategy", + "CreationTime", + "TrainingJobStatusCounters", + "ObjectiveStatusCounters" + ], + "members":{ + "HyperParameterTuningJobName":{ + "shape":"HyperParameterTuningJobName", + "documentation":"

The name of the tuning job.

" + }, + "HyperParameterTuningJobArn":{ + "shape":"HyperParameterTuningJobArn", + "documentation":"

The Amazon Resource Name (ARN) of the tuning job.

" + }, + "HyperParameterTuningJobStatus":{ + "shape":"HyperParameterTuningJobStatus", + "documentation":"

The status of the tuning job.

" + }, + "Strategy":{ + "shape":"HyperParameterTuningJobStrategyType", + "documentation":"

Specifies the search strategy hyperparameter tuning uses to choose which hyperparameters to use for each iteration. Currently, the only valid value is Bayesian.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The date and time that the tuning job was created.

" + }, + "HyperParameterTuningEndTime":{ + "shape":"Timestamp", + "documentation":"

The date and time that the tuning job ended.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

The date and time that the tuning job was modified.

" + }, + "TrainingJobStatusCounters":{ + "shape":"TrainingJobStatusCounters", + "documentation":"

The TrainingJobStatusCounters object that specifies the numbers of training jobs, categorized by status, that this tuning job launched.

" + }, + "ObjectiveStatusCounters":{ + "shape":"ObjectiveStatusCounters", + "documentation":"

The ObjectiveStatusCounters object that specifies the numbers of training jobs, categorized by objective metric status, that this tuning job launched.

" + }, + "ResourceLimits":{ + "shape":"ResourceLimits", + "documentation":"

The ResourceLimits object that specifies the maximum number of training jobs and parallel training jobs allowed for this tuning job.

" + } + }, + "documentation":"

Provides summary information about a hyperparameter tuning job.

" + }, "HyperParameters":{ "type":"map", "key":{"shape":"ParameterKey"}, @@ -1431,6 +1975,35 @@ "ml.p3.16xlarge" ] }, + "IntegerParameterRange":{ + "type":"structure", + "required":[ + "Name", + "MinValue", + "MaxValue" + ], + "members":{ + "Name":{ + "shape":"ParameterKey", + "documentation":"

The name of the hyperparameter to search.

" + }, + "MinValue":{ + "shape":"ParameterValue", + "documentation":"

The minimum value of the hyperparameter to search.

" + }, + "MaxValue":{ + "shape":"ParameterValue", + "documentation":"

The maximum value of the hyperparameter to search.

" + } + }, + "documentation":"

For a hyperparameter of the integer type, specifies the range that a hyperparameter tuning job searches.

" + }, + "IntegerParameterRanges":{ + "type":"list", + "member":{"shape":"IntegerParameterRange"}, + "max":20, + "min":0 + }, "KmsKeyId":{ "type":"string", "max":2048 @@ -1542,6 +2115,66 @@ } } }, + "ListHyperParameterTuningJobsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the result of the previous ListHyperParameterTuningJobs request was truncated, the response includes a NextToken. To retrieve the next set of tuning jobs, use the token in the next request.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of tuning jobs to return. The default value is 10.

", + "box":true + }, + "SortBy":{ + "shape":"HyperParameterTuningJobSortByOptions", + "documentation":"

The field to sort results by. The default is Name.

" + }, + "SortOrder":{ + "shape":"SortOrder", + "documentation":"

The sort order for results. The default is Ascending.

" + }, + "NameContains":{ + "shape":"NameContains", + "documentation":"

A string in the tuning job name. This filter returns only tuning jobs whose name contains the specified string.

" + }, + "CreationTimeAfter":{ + "shape":"Timestamp", + "documentation":"

A filter that returns only tuning jobs that were created after the specified time.

" + }, + "CreationTimeBefore":{ + "shape":"Timestamp", + "documentation":"

A filter that returns only tuning jobs that were created before the specified time.

" + }, + "LastModifiedTimeAfter":{ + "shape":"Timestamp", + "documentation":"

A filter that returns only tuning jobs that were modified after the specified time.

" + }, + "LastModifiedTimeBefore":{ + "shape":"Timestamp", + "documentation":"

A filter that returns only tuning jobs that were modified before the specified time.

" + }, + "StatusEquals":{ + "shape":"HyperParameterTuningJobStatus", + "documentation":"

A filter that returns only tuning jobs with the specified status.

" + } + } + }, + "ListHyperParameterTuningJobsResponse":{ + "type":"structure", + "required":["HyperParameterTuningJobSummaries"], + "members":{ + "HyperParameterTuningJobSummaries":{ + "shape":"HyperParameterTuningJobSummaries", + "documentation":"

A list of HyperParameterTuningJobSummary objects that describe the tuning jobs that the ListHyperParameterTuningJobs request returned.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the result of this ListHyperParameterTuningJobs request was truncated, the response includes a NextToken. To retrieve the next set of tuning jobs, use the token in the next request.

" + } + } + }, "ListModelsInput":{ "type":"structure", "members":{ @@ -1740,6 +2373,50 @@ } } }, + "ListTrainingJobsForHyperParameterTuningJobRequest":{ + "type":"structure", + "required":["HyperParameterTuningJobName"], + "members":{ + "HyperParameterTuningJobName":{ + "shape":"HyperParameterTuningJobName", + "documentation":"

The name of the tuning job whose training jobs you want to list.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the result of the previous ListTrainingJobsForHyperParameterTuningJob request was truncated, the response includes a NextToken. To retrieve the next set of training jobs, use the token in the next request.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of training jobs to return. The default value is 10.

" + }, + "StatusEquals":{ + "shape":"TrainingJobStatus", + "documentation":"

A filter that returns only training jobs with the specified status.

" + }, + "SortBy":{ + "shape":"TrainingJobSortByOptions", + "documentation":"

The field to sort results by. The default is Name.

If the value of this field is FinalObjectiveMetricValue, any training jobs that did not return an objective metric are not listed.

" + }, + "SortOrder":{ + "shape":"SortOrder", + "documentation":"

The sort order for results. The default is Ascending.

" + } + } + }, + "ListTrainingJobsForHyperParameterTuningJobResponse":{ + "type":"structure", + "required":["TrainingJobSummaries"], + "members":{ + "TrainingJobSummaries":{ + "shape":"HyperParameterTrainingJobSummaries", + "documentation":"

A list of TrainingJobSummary objects that describe the training jobs that the ListTrainingJobsForHyperParameterTuningJob request returned.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the result of this ListTrainingJobsForHyperParameterTuningJob request was truncated, the response includes a NextToken. To retrieve the next set of training jobs, use the token in the next request.

" + } + } + }, "ListTrainingJobsRequest":{ "type":"structure", "members":{ @@ -1770,7 +2447,7 @@ }, "NameContains":{ "shape":"NameContains", - "documentation":"

A string in the training job name. This filter returns only models whose name contains the specified string.

" + "documentation":"

A string in the training job name. This filter returns only training jobs whose name contains the specified string.

" }, "StatusEquals":{ "shape":"TrainingJobStatus", @@ -1800,6 +2477,14 @@ } } }, + "MaxNumberOfTrainingJobs":{ + "type":"integer", + "min":1 + }, + "MaxParallelTrainingJobs":{ + "type":"integer", + "min":1 + }, "MaxResults":{ "type":"integer", "max":100, @@ -1809,6 +2494,41 @@ "type":"integer", "min":1 }, + "MetricDefinition":{ + "type":"structure", + "required":[ + "Name", + "Regex" + ], + "members":{ + "Name":{ + "shape":"MetricName", + "documentation":"

The name of the metric.

" + }, + "Regex":{ + "shape":"MetricRegex", + "documentation":"

A regular expression that searches the output of a training job and gets the value of the metric. For more information about using regular expressions to define metrics, see automatic-model-tuning-define-metrics.

" + } + }, + "documentation":"

Specifies a metric that the training algorithm writes to stderr or stdout. Amazon SageMakerHyperparamter tuning captures all defined metrics. You specify one metric that a hyperparameter tuning job uses as its objective metric to choose the best training job.

" + }, + "MetricDefinitionList":{ + "type":"list", + "member":{"shape":"MetricDefinition"}, + "max":20, + "min":0 + }, + "MetricName":{ + "type":"string", + "max":255, + "min":1 + }, + "MetricRegex":{ + "type":"string", + "max":500, + "min":1 + }, + "MetricValue":{"type":"float"}, "ModelArn":{ "type":"string", "max":2048, @@ -1992,7 +2712,8 @@ "Stopping", "Stopped", "Failed", - "Deleting" + "Deleting", + "Updating" ] }, "NotebookInstanceSummary":{ @@ -2042,6 +2763,36 @@ "member":{"shape":"NotebookInstanceSummary"} }, "NotebookInstanceUrl":{"type":"string"}, + "ObjectiveStatus":{ + "type":"string", + "enum":[ + "Succeeded", + "Pending", + "Failed" + ] + }, + "ObjectiveStatusCounter":{ + "type":"integer", + "min":0 + }, + "ObjectiveStatusCounters":{ + "type":"structure", + "members":{ + "Succeeded":{ + "shape":"ObjectiveStatusCounter", + "documentation":"

The number of training jobs whose final objective metric was evaluated by the hyperparameter tuning job and used in the hyperparameter tuning process.

" + }, + "Pending":{ + "shape":"ObjectiveStatusCounter", + "documentation":"

The number of training jobs that are in progress and pending evaluation of their final objective metric.

" + }, + "Failed":{ + "shape":"ObjectiveStatusCounter", + "documentation":"

The number of training jobs whose final objective metric was not evaluated and used in the hyperparameter tuning process. This typically occurs when the training job failed or did not emit an objective metric.

" + } + }, + "documentation":"

Specifies the number of training jobs that this hyperparameter tuning job launched, categorized by the status of their objective metric. The objective metric status shows whether the final objective metric for the training job has been evaluated by the tuning job and used in the hyperparameter tuning process.

" + }, "OrderKey":{ "type":"string", "enum":[ @@ -2055,7 +2806,7 @@ "members":{ "KmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.

If the configuration of the output S3 bucket requires server-side encryption for objects, and you don't provide the KMS key ID, Amazon SageMaker uses the default service key. For more information, see KMS-Managed Encryption Keys in Amazon Simple Storage Service developer guide.

The KMS key policy must grant permission to the IAM role you specify in your CreateTrainingJob request. Using Key Policies in AWS KMS in the AWS Key Management Service Developer Guide.

" + "documentation":"

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.

If you don't provide the KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. For more information, see KMS-Managed Encryption Keys in Amazon Simple Storage Service developer guide.

The KMS key policy must grant permission to the IAM role you specify in your CreateTrainingJob request. Using Key Policies in AWS KMS in the AWS Key Management Service Developer Guide.

" }, "S3OutputPath":{ "shape":"S3Uri", @@ -2072,10 +2823,34 @@ "type":"string", "max":256 }, + "ParameterRanges":{ + "type":"structure", + "members":{ + "IntegerParameterRanges":{ + "shape":"IntegerParameterRanges", + "documentation":"

The array of IntegerParameterRange objects that specify ranges of integer hyperparameters that a hyperparameter tuning job searches.

" + }, + "ContinuousParameterRanges":{ + "shape":"ContinuousParameterRanges", + "documentation":"

The array of ContinuousParameterRange objects that specify ranges of continuous hyperparameters that a hyperparameter tuning job searches.

" + }, + "CategoricalParameterRanges":{ + "shape":"CategoricalParameterRanges", + "documentation":"

The array of CategoricalParameterRange objects that specify ranges of categorical hyperparameters that a hyperparameter tuning job searches.

" + } + }, + "documentation":"

Specifies ranges of integer, continuous, and categorical hyperparameters that a hyperparameter tuning job searches.

" + }, "ParameterValue":{ "type":"string", "max":256 }, + "ParameterValues":{ + "type":"list", + "member":{"shape":"ParameterValue"}, + "max":20, + "min":1 + }, "ProductionVariant":{ "type":"structure", "required":[ @@ -2158,6 +2933,10 @@ "shape":"VariantName", "documentation":"

The name of the variant.

" }, + "DeployedImages":{ + "shape":"DeployedImages", + "documentation":"

An array of DeployedImage objects that specify the Amazon EC2 Container Registry paths of the inference images deployed on instances of this ProductionVariant.

" + }, "CurrentWeight":{ "shape":"VariantWeight", "documentation":"

The weight associated with the variant.

" @@ -2236,6 +3015,24 @@ "documentation":"

You have exceeded an Amazon SageMaker resource limit. For example, you might have too many training jobs created.

", "exception":true }, + "ResourceLimits":{ + "type":"structure", + "required":[ + "MaxNumberOfTrainingJobs", + "MaxParallelTrainingJobs" + ], + "members":{ + "MaxNumberOfTrainingJobs":{ + "shape":"MaxNumberOfTrainingJobs", + "documentation":"

The maximum number of training jobs that a hyperparameter tuning job can launch.

" + }, + "MaxParallelTrainingJobs":{ + "shape":"MaxParallelTrainingJobs", + "documentation":"

The maximum number of concurrent training jobs that a hyperparameter tuning job can launch.

" + } + }, + "documentation":"

Specifies the maximum number of training jobs and parallel training jobs that a hyperparameter tuning job can launch.

" + }, "ResourceNotFound":{ "type":"structure", "members":{ @@ -2344,6 +3141,16 @@ } } }, + "StopHyperParameterTuningJobRequest":{ + "type":"structure", + "required":["HyperParameterTuningJobName"], + "members":{ + "HyperParameterTuningJobName":{ + "shape":"HyperParameterTuningJobName", + "documentation":"

The name of the tuning job to stop.

" + } + } + }, "StopNotebookInstanceInput":{ "type":"structure", "required":["NotebookInstanceName"], @@ -2476,7 +3283,7 @@ "TrainingJobArn":{ "type":"string", "max":256, - "pattern":"arn:aws[a-z\\-]*:sagemaker:[\\p{Alnum}\\-]*:[0-9]{12}:training-job/.*" + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:training-job/.*" }, "TrainingJobName":{ "type":"string", @@ -2484,6 +3291,15 @@ "min":1, "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*" }, + "TrainingJobSortByOptions":{ + "type":"string", + "enum":[ + "Name", + "CreationTime", + "Status", + "FinalObjectiveMetricValue" + ] + }, "TrainingJobStatus":{ "type":"string", "enum":[ @@ -2494,6 +3310,36 @@ "Stopped" ] }, + "TrainingJobStatusCounter":{ + "type":"integer", + "min":0 + }, + "TrainingJobStatusCounters":{ + "type":"structure", + "members":{ + "Completed":{ + "shape":"TrainingJobStatusCounter", + "documentation":"

The number of completed training jobs launched by a hyperparameter tuning job.

" + }, + "InProgress":{ + "shape":"TrainingJobStatusCounter", + "documentation":"

The number of in-progress training jobs launched by a hyperparameter tuning job.

" + }, + "RetryableError":{ + "shape":"TrainingJobStatusCounter", + "documentation":"

The number of training jobs that failed, but can be retried. A failed training job can be retried only if it failed because an internal service error occurred.

" + }, + "NonRetryableError":{ + "shape":"TrainingJobStatusCounter", + "documentation":"

The number of training jobs that failed and can't be retried. A failed training job can't be retried if it failed because a client error occurred.

" + }, + "Stopped":{ + "shape":"TrainingJobStatusCounter", + "documentation":"

The number of training jobs launched by a hyperparameter tuning job that were manually stopped.

" + } + }, + "documentation":"

The numbers of training jobs launched by a hyperparameter tuning job, categorized by status.

" + }, "TrainingJobSummaries":{ "type":"list", "member":{"shape":"TrainingJobSummary"} @@ -2602,7 +3448,7 @@ }, "RoleArn":{ "shape":"RoleArn", - "documentation":"

Amazon Resource Name (ARN) of the IAM role to associate with the instance.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker can assume to access the notebook instance. For more information, see Amazon SageMaker Roles.

To be able to pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission.

" } } }, diff --git a/botocore/data/sagemaker/2017-07-24/waiters-2.json b/botocore/data/sagemaker/2017-07-24/waiters-2.json index 4dded387..0fbb61eb 100644 --- a/botocore/data/sagemaker/2017-07-24/waiters-2.json +++ b/botocore/data/sagemaker/2017-07-24/waiters-2.json @@ -21,7 +21,7 @@ ] }, "NotebookInstanceStopped": { - "delay": 30, + "delay": 30, "operation": "DescribeNotebookInstance", "maxAttempts": 60, "acceptors": [ @@ -38,7 +38,7 @@ "argument": "NotebookInstanceStatus" } ] - }, + }, "NotebookInstanceDeleted": { "delay": 30, "maxAttempts": 60, @@ -48,7 +48,7 @@ "expected": "ValidationException", "matcher": "error", "state": "success" - }, + }, { "expected": "Failed", "matcher": "path", diff --git a/botocore/data/sdb/2009-04-15/service-2.json b/botocore/data/sdb/2009-04-15/service-2.json index 697c0dde..1f243edc 100644 --- a/botocore/data/sdb/2009-04-15/service-2.json +++ b/botocore/data/sdb/2009-04-15/service-2.json @@ -4,6 +4,7 @@ "apiVersion":"2009-04-15", "endpointPrefix":"sdb", "serviceFullName":"Amazon SimpleDB", + "serviceId":"SimpleDB", "signatureVersion":"v2", "xmlNamespace":"http://sdb.amazonaws.com/doc/2009-04-15/", "protocol":"query" diff --git a/botocore/data/secretsmanager/2017-10-17/service-2.json b/botocore/data/secretsmanager/2017-10-17/service-2.json index 8db0ca6d..a093e72f 100644 --- a/botocore/data/secretsmanager/2017-10-17/service-2.json +++ b/botocore/data/secretsmanager/2017-10-17/service-2.json @@ -45,9 +45,25 @@ {"shape":"ResourceExistsException"}, {"shape":"ResourceNotFoundException"}, {"shape":"MalformedPolicyDocumentException"}, - {"shape":"InternalServiceError"} + {"shape":"InternalServiceError"}, + {"shape":"PreconditionNotMetException"} ], - "documentation":"

Creates a new secret. A secret in Secrets Manager consists of both the protected secret data and the important information needed to manage the secret.

Secrets Manager stores the encrypted secret data in one of a collection of \"versions\" associated with the secret. Each version contains a copy of the encrypted secret data. Each version is associated with one or more \"staging labels\" that identify where the version is in the rotation cycle. The SecretVersionsToStages field of the secret contains the mapping of staging labels to the active versions of the secret. Versions without a staging label are considered deprecated and are not included in the list.

You provide the secret data to be encrypted by putting text in either the SecretString parameter or binary data in the SecretBinary parameter, but not both. If you include SecretString or SecretBinary then Secrets Manager also creates an initial secret version and automatically attaches the staging label AWSCURRENT to the new version.

  • If you call an operation that needs to encrypt or decrypt the SecretString or SecretBinary for a secret in the same account as the calling user and that secret doesn't specify a KMS encryption key, Secrets Manager uses the account's default AWS managed customer master key (CMK) with the alias aws/secretsmanager. If this key doesn't already exist in your account then Secrets Manager creates it for you automatically. All users in the same AWS account automatically have access to use the default CMK. Note that if an Secrets Manager API call results in AWS having to create the account's AWS-managed CMK, it can result in a one-time significant delay in returning the result.

  • If the secret is in a different AWS account from the credentials calling an API that requires encryption or decryption of the secret value then you must create and use a custom KMS CMK because you can't access the default CMK for the account using credentials from a different AWS account. Store the ARN of the CMK in the secret when you create the secret or when you update it by including it in the KMSKeyId. If you call an API that must encrypt or decrypt SecretString or SecretBinary using credentials from a different account then the KMS key policy must grant cross-account access to that other account's user or role for both the kms:GenerateDataKey and kms:Decrypt operations.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:CreateSecret

  • kms:GenerateDataKey - needed only if you use a customer-created KMS key to encrypt the secret. You do not need this permission to use the account's default AWS managed CMK for Secrets Manager.

  • kms:Decrypt - needed only if you use a customer-created KMS key to encrypt the secret. You do not need this permission to use the account's default AWS managed CMK for Secrets Manager.

Related operations

  • To delete a secret, use DeleteSecret.

  • To modify an existing secret, use UpdateSecret.

  • To create a new version of a secret, use PutSecretValue.

  • To retrieve the encrypted secure string and secure binary values, use GetSecretValue.

  • To retrieve all other details for a secret, use DescribeSecret. This does not include the encrypted secure string and secure binary values.

  • To retrieve the list of secret versions associated with the current secret, use DescribeSecret and examine the SecretVersionsToStages response value.

" + "documentation":"

Creates a new secret. A secret in Secrets Manager consists of both the protected secret data and the important information needed to manage the secret.

Secrets Manager stores the encrypted secret data in one of a collection of \"versions\" associated with the secret. Each version contains a copy of the encrypted secret data. Each version is associated with one or more \"staging labels\" that identify where the version is in the rotation cycle. The SecretVersionsToStages field of the secret contains the mapping of staging labels to the active versions of the secret. Versions without a staging label are considered deprecated and are not included in the list.

You provide the secret data to be encrypted by putting text in either the SecretString parameter or binary data in the SecretBinary parameter, but not both. If you include SecretString or SecretBinary then Secrets Manager also creates an initial secret version and automatically attaches the staging label AWSCURRENT to the new version.

  • If you call an operation that needs to encrypt or decrypt the SecretString or SecretBinary for a secret in the same account as the calling user and that secret doesn't specify a AWS KMS encryption key, Secrets Manager uses the account's default AWS managed customer master key (CMK) with the alias aws/secretsmanager. If this key doesn't already exist in your account then Secrets Manager creates it for you automatically. All users in the same AWS account automatically have access to use the default CMK. Note that if an Secrets Manager API call results in AWS having to create the account's AWS-managed CMK, it can result in a one-time significant delay in returning the result.

  • If the secret is in a different AWS account from the credentials calling an API that requires encryption or decryption of the secret value then you must create and use a custom AWS KMS CMK because you can't access the default CMK for the account using credentials from a different AWS account. Store the ARN of the CMK in the secret when you create the secret or when you update it by including it in the KMSKeyId. If you call an API that must encrypt or decrypt SecretString or SecretBinary using credentials from a different account then the AWS KMS key policy must grant cross-account access to that other account's user or role for both the kms:GenerateDataKey and kms:Decrypt operations.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:CreateSecret

  • kms:GenerateDataKey - needed only if you use a customer-managed AWS KMS key to encrypt the secret. You do not need this permission to use the account's default AWS managed CMK for Secrets Manager.

  • kms:Decrypt - needed only if you use a customer-managed AWS KMS key to encrypt the secret. You do not need this permission to use the account's default AWS managed CMK for Secrets Manager.

Related operations

  • To delete a secret, use DeleteSecret.

  • To modify an existing secret, use UpdateSecret.

  • To create a new version of a secret, use PutSecretValue.

  • To retrieve the encrypted secure string and secure binary values, use GetSecretValue.

  • To retrieve all other details for a secret, use DescribeSecret. This does not include the encrypted secure string and secure binary values.

  • To retrieve the list of secret versions associated with the current secret, use DescribeSecret and examine the SecretVersionsToStages response value.

" + }, + "DeleteResourcePolicy":{ + "name":"DeleteResourcePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteResourcePolicyRequest"}, + "output":{"shape":"DeleteResourcePolicyResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceError"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Deletes the resource-based permission policy that's attached to the secret.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:DeleteResourcePolicy

Related operations

  • To attach a resource policy to a secret, use PutResourcePolicy.

  • To retrieve the current resource-based policy that's attached to a secret, use GetResourcePolicy.

  • To list all of the currently available secrets, use ListSecrets.

" }, "DeleteSecret":{ "name":"DeleteSecret", @@ -94,6 +110,21 @@ ], "documentation":"

Generates a random password of the specified complexity. This operation is intended for use in the Lambda rotation function. Per best practice, we recommend that you specify the maximum length and include every character type that the system you are generating a password for can support.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:GetRandomPassword

" }, + "GetResourcePolicy":{ + "name":"GetResourcePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetResourcePolicyRequest"}, + "output":{"shape":"GetResourcePolicyResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceError"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Retrieves the JSON text of the resource-based policy document that's attached to the specified secret. The JSON request string input and response output are shown formatted with white space and line breaks for better readability. Submit your input as a single line JSON string.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:GetResourcePolicy

Related operations

" + }, "GetSecretValue":{ "name":"GetSecretValue", "http":{ @@ -109,7 +140,7 @@ {"shape":"DecryptionFailure"}, {"shape":"InternalServiceError"} ], - "documentation":"

Retrieves the contents of the encrypted fields SecretString or SecretBinary from the specified version of a secret, whichever contains content.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:GetSecretValue

  • kms:Decrypt - required only if you use a customer-created KMS key to encrypt the secret. You do not need this permission to use the account's default AWS managed CMK for Secrets Manager.

Related operations

  • To create a new version of the secret with different encrypted information, use PutSecretValue.

  • To retrieve the non-encrypted details for the secret, use DescribeSecret.

" + "documentation":"

Retrieves the contents of the encrypted fields SecretString or SecretBinary from the specified version of a secret, whichever contains content.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:GetSecretValue

  • kms:Decrypt - required only if you use a customer-managed AWS KMS key to encrypt the secret. You do not need this permission to use the account's default AWS managed CMK for Secrets Manager.

Related operations

  • To create a new version of the secret with different encrypted information, use PutSecretValue.

  • To retrieve the non-encrypted details for the secret, use DescribeSecret.

" }, "ListSecretVersionIds":{ "name":"ListSecretVersionIds", @@ -141,6 +172,23 @@ ], "documentation":"

Lists all of the secrets that are stored by Secrets Manager in the AWS account. To list the versions currently stored for a specific secret, use ListSecretVersionIds. The encrypted fields SecretString and SecretBinary are not included in the output. To get that information, call the GetSecretValue operation.

Always check the NextToken response parameter when calling any of the List* operations. These operations can occasionally return an empty or shorter than expected list of results even when there are more results available. When this happens, the NextToken response parameter contains a value to pass to the next call to the same API to request the next part of the list.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:ListSecrets

Related operations

" }, + "PutResourcePolicy":{ + "name":"PutResourcePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutResourcePolicyRequest"}, + "output":{"shape":"PutResourcePolicyResponse"}, + "errors":[ + {"shape":"MalformedPolicyDocumentException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InternalServiceError"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Attaches the contents of the specified resource-based permission policy to a secret. A resource-based policy is optional. Alternatively, you can use IAM identity-based policies that specify the secret's Amazon Resource Name (ARN) in the policy statement's Resources element. You can also use a combination of both identity-based and resource-based policies. The affected users and roles receive the permissions that are permitted by all of the relevant policies. For more information, see Using Resource-Based Policies for AWS Secrets Manager. For the complete description of the AWS policy syntax and grammar, see IAM JSON Policy Reference in the IAM User Guide.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:PutResourcePolicy

Related operations

  • To retrieve the resource policy that's attached to a secret, use GetResourcePolicy.

  • To delete the resource-based policy that's attached to a secret, use DeleteResourcePolicy.

  • To list all of the currently available secrets, use ListSecrets.

" + }, "PutSecretValue":{ "name":"PutSecretValue", "http":{ @@ -158,7 +206,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServiceError"} ], - "documentation":"

Stores a new encrypted secret value in the specified secret. To do this, the operation creates a new version and attaches it to the secret. The version can contain a new SecretString value or a new SecretBinary value. You can also specify the staging labels that are initially attached to the new version.

The Secrets Manager console uses only the SecretString field. To add binary data to a secret with the SecretBinary field you must use the AWS CLI or one of the AWS SDKs.

  • If this operation creates the first version for the secret then Secrets Manager automatically attaches the staging label AWSCURRENT to the new version.

  • If another version of this secret already exists, then this operation does not automatically move any staging labels other than those that you explicitly specify in the VersionStages parameter.

  • If this operation moves the staging label AWSCURRENT from another version to this version (because you included it in the StagingLabels parameter) then Secrets Manager also automatically moves the staging label AWSPREVIOUS to the version that AWSCURRENT was removed from.

  • This operation is idempotent. If a version with a SecretVersionId with the same value as the ClientRequestToken parameter already exists and you specify the same secret data, the operation succeeds but does nothing. However, if the secret data is different, then the operation fails because you cannot modify an existing version; you can only create new ones.

  • If you call an operation that needs to encrypt or decrypt the SecretString or SecretBinary for a secret in the same account as the calling user and that secret doesn't specify a KMS encryption key, Secrets Manager uses the account's default AWS managed customer master key (CMK) with the alias aws/secretsmanager. If this key doesn't already exist in your account then Secrets Manager creates it for you automatically. All users in the same AWS account automatically have access to use the default CMK. Note that if an Secrets Manager API call results in AWS having to create the account's AWS-managed CMK, it can result in a one-time significant delay in returning the result.

  • If the secret is in a different AWS account from the credentials calling an API that requires encryption or decryption of the secret value then you must create and use a custom KMS CMK because you can't access the default CMK for the account using credentials from a different AWS account. Store the ARN of the CMK in the secret when you create the secret or when you update it by including it in the KMSKeyId. If you call an API that must encrypt or decrypt SecretString or SecretBinary using credentials from a different account then the KMS key policy must grant cross-account access to that other account's user or role for both the kms:GenerateDataKey and kms:Decrypt operations.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:PutSecretValue

  • kms:GenerateDataKey - needed only if you use a customer-created KMS key to encrypt the secret. You do not need this permission to use the account's AWS managed CMK for Secrets Manager.

  • kms:Encrypt - needed only if you use a customer-created KMS key to encrypt the secret. You do not need this permission to use the account's AWS managed CMK for Secrets Manager.

Related operations

" + "documentation":"

Stores a new encrypted secret value in the specified secret. To do this, the operation creates a new version and attaches it to the secret. The version can contain a new SecretString value or a new SecretBinary value. You can also specify the staging labels that are initially attached to the new version.

The Secrets Manager console uses only the SecretString field. To add binary data to a secret with the SecretBinary field you must use the AWS CLI or one of the AWS SDKs.

  • If this operation creates the first version for the secret then Secrets Manager automatically attaches the staging label AWSCURRENT to the new version.

  • If another version of this secret already exists, then this operation does not automatically move any staging labels other than those that you explicitly specify in the VersionStages parameter.

  • If this operation moves the staging label AWSCURRENT from another version to this version (because you included it in the StagingLabels parameter) then Secrets Manager also automatically moves the staging label AWSPREVIOUS to the version that AWSCURRENT was removed from.

  • This operation is idempotent. If a version with a SecretVersionId with the same value as the ClientRequestToken parameter already exists and you specify the same secret data, the operation succeeds but does nothing. However, if the secret data is different, then the operation fails because you cannot modify an existing version; you can only create new ones.

  • If you call an operation that needs to encrypt or decrypt the SecretString or SecretBinary for a secret in the same account as the calling user and that secret doesn't specify a AWS KMS encryption key, Secrets Manager uses the account's default AWS managed customer master key (CMK) with the alias aws/secretsmanager. If this key doesn't already exist in your account then Secrets Manager creates it for you automatically. All users in the same AWS account automatically have access to use the default CMK. Note that if an Secrets Manager API call results in AWS having to create the account's AWS-managed CMK, it can result in a one-time significant delay in returning the result.

  • If the secret is in a different AWS account from the credentials calling an API that requires encryption or decryption of the secret value then you must create and use a custom AWS KMS CMK because you can't access the default CMK for the account using credentials from a different AWS account. Store the ARN of the CMK in the secret when you create the secret or when you update it by including it in the KMSKeyId. If you call an API that must encrypt or decrypt SecretString or SecretBinary using credentials from a different account then the AWS KMS key policy must grant cross-account access to that other account's user or role for both the kms:GenerateDataKey and kms:Decrypt operations.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:PutSecretValue

  • kms:GenerateDataKey - needed only if you use a customer-managed AWS KMS key to encrypt the secret. You do not need this permission to use the account's default AWS managed CMK for Secrets Manager.

Related operations

" }, "RestoreSecret":{ "name":"RestoreSecret", @@ -201,6 +249,7 @@ "input":{"shape":"TagResourceRequest"}, "errors":[ {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"}, {"shape":"InvalidParameterException"}, {"shape":"InternalServiceError"} ], @@ -215,6 +264,7 @@ "input":{"shape":"UntagResourceRequest"}, "errors":[ {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"}, {"shape":"InvalidParameterException"}, {"shape":"InternalServiceError"} ], @@ -236,9 +286,10 @@ {"shape":"ResourceExistsException"}, {"shape":"ResourceNotFoundException"}, {"shape":"MalformedPolicyDocumentException"}, - {"shape":"InternalServiceError"} + {"shape":"InternalServiceError"}, + {"shape":"PreconditionNotMetException"} ], - "documentation":"

Modifies many of the details of a secret. If you include a ClientRequestToken and either SecretString or SecretBinary then it also creates a new version attached to the secret.

To modify the rotation configuration of a secret, use RotateSecret instead.

The Secrets Manager console uses only the SecretString parameter and therefore limits you to encrypting and storing only a text string. To encrypt and store binary data as part of the version of a secret, you must use either the AWS CLI or one of the AWS SDKs.

  • If a version with a SecretVersionId with the same value as the ClientRequestToken parameter already exists, the operation generates an error. You cannot modify an existing version, you can only create new ones.

  • If you include SecretString or SecretBinary to create a new secret version, Secrets Manager automatically attaches the staging label AWSCURRENT to the new version.

  • If you call an operation that needs to encrypt or decrypt the SecretString or SecretBinary for a secret in the same account as the calling user and that secret doesn't specify a KMS encryption key, Secrets Manager uses the account's default AWS managed customer master key (CMK) with the alias aws/secretsmanager. If this key doesn't already exist in your account then Secrets Manager creates it for you automatically. All users in the same AWS account automatically have access to use the default CMK. Note that if an Secrets Manager API call results in AWS having to create the account's AWS-managed CMK, it can result in a one-time significant delay in returning the result.

  • If the secret is in a different AWS account from the credentials calling an API that requires encryption or decryption of the secret value then you must create and use a custom KMS CMK because you can't access the default CMK for the account using credentials from a different AWS account. Store the ARN of the CMK in the secret when you create the secret or when you update it by including it in the KMSKeyId. If you call an API that must encrypt or decrypt SecretString or SecretBinary using credentials from a different account then the KMS key policy must grant cross-account access to that other account's user or role for both the kms:GenerateDataKey and kms:Decrypt operations.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:UpdateSecret

  • kms:GenerateDataKey - needed only if you use a custom KMS key to encrypt the secret. You do not need this permission to use the account's AWS managed CMK for Secrets Manager.

  • kms:Decrypt - needed only if you use a custom KMS key to encrypt the secret. You do not need this permission to use the account's AWS managed CMK for Secrets Manager.

Related operations

" + "documentation":"

Modifies many of the details of a secret. If you include a ClientRequestToken and either SecretString or SecretBinary then it also creates a new version attached to the secret.

To modify the rotation configuration of a secret, use RotateSecret instead.

The Secrets Manager console uses only the SecretString parameter and therefore limits you to encrypting and storing only a text string. To encrypt and store binary data as part of the version of a secret, you must use either the AWS CLI or one of the AWS SDKs.

  • If a version with a SecretVersionId with the same value as the ClientRequestToken parameter already exists, the operation generates an error. You cannot modify an existing version, you can only create new ones.

  • If you include SecretString or SecretBinary to create a new secret version, Secrets Manager automatically attaches the staging label AWSCURRENT to the new version.

  • If you call an operation that needs to encrypt or decrypt the SecretString or SecretBinary for a secret in the same account as the calling user and that secret doesn't specify a AWS KMS encryption key, Secrets Manager uses the account's default AWS managed customer master key (CMK) with the alias aws/secretsmanager. If this key doesn't already exist in your account then Secrets Manager creates it for you automatically. All users in the same AWS account automatically have access to use the default CMK. Note that if an Secrets Manager API call results in AWS having to create the account's AWS-managed CMK, it can result in a one-time significant delay in returning the result.

  • If the secret is in a different AWS account from the credentials calling an API that requires encryption or decryption of the secret value then you must create and use a custom AWS KMS CMK because you can't access the default CMK for the account using credentials from a different AWS account. Store the ARN of the CMK in the secret when you create the secret or when you update it by including it in the KMSKeyId. If you call an API that must encrypt or decrypt SecretString or SecretBinary using credentials from a different account then the AWS KMS key policy must grant cross-account access to that other account's user or role for both the kms:GenerateDataKey and kms:Decrypt operations.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:UpdateSecret

  • kms:GenerateDataKey - needed only if you use a custom AWS KMS key to encrypt the secret. You do not need this permission to use the account's AWS managed CMK for Secrets Manager.

  • kms:Decrypt - needed only if you use a custom AWS KMS key to encrypt the secret. You do not need this permission to use the account's AWS managed CMK for Secrets Manager.

Related operations

" }, "UpdateSecretVersionStage":{ "name":"UpdateSecretVersionStage", @@ -255,7 +306,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServiceError"} ], - "documentation":"

Modifies the staging labels attached to a version of a secret. Staging labels are used to track a version as it progresses through the secret rotation process. You can attach a staging label to only one version of a secret at a time. If a staging label to be added is already attached to another version, then it is moved--removed from the other version first and then attached to this one. For more information about staging labels, see Staging Labels in the AWS Secrets Manager User Guide.

The staging labels that you specify in the VersionStage parameter are added to the existing list of staging labels--they don't replace it.

You can move the AWSCURRENT staging label to this version by including it in this call.

Whenever you move AWSCURRENT, Secrets Manager automatically moves the label AWSPREVIOUS to the version that AWSCURRENT was removed from.

If this action results in the last label being removed from a version, then the version is considered to be 'deprecated' and can be deleted by Secrets Manager.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:UpdateSecretVersionStage

Related operations

  • To get the list of staging labels that are currently associated with a version of a secret, use DescribeSecret and examine the SecretVersionsToStages response value.

" + "documentation":"

Modifies the staging labels attached to a version of a secret. Staging labels are used to track a version as it progresses through the secret rotation process. You can attach a staging label to only one version of a secret at a time. If a staging label to be added is already attached to another version, then it is moved--removed from the other version first and then attached to this one. For more information about staging labels, see Staging Labels in the AWS Secrets Manager User Guide.

The staging labels that you specify in the VersionStage parameter are added to the existing list of staging labels--they don't replace it.

You can move the AWSCURRENT staging label to this version by including it in this call.

Whenever you move AWSCURRENT, Secrets Manager automatically moves the label AWSPREVIOUS to the version that AWSCURRENT was removed from.

If this action results in the last label being removed from a version, then the version is considered to be 'deprecated' and can be deleted by Secrets Manager.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:UpdateSecretVersionStage

Related operations

  • To get the list of staging labels that are currently associated with a version of a secret, use DescribeSecret and examine the SecretVersionsToStages response value.

" } }, "shapes":{ @@ -303,11 +354,11 @@ "members":{ "Name":{ "shape":"NameType", - "documentation":"

Specifies the friendly name of the new secret.

" + "documentation":"

Specifies the friendly name of the new secret.

The secret name must be ASCII letters, digits, or the following characters : /_+=.@-

" }, "ClientRequestToken":{ "shape":"ClientRequestTokenType", - "documentation":"

(Optional) If you include SecretString or SecretBinary, then an initial version is created as part of the secret, and this parameter specifies a unique identifier for the new version.

If you use the AWS CLI or one of the AWS SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes as the value for this parameter in the request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a ClientRequestToken yourself for the new version and include that value in the request.

This value helps ensure idempotency. Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during a rotation. We recommend that you generate a UUID-type value to ensure uniqueness of your versions within the specified secret.

  • If the ClientRequestToken value isn't already associated with a version of the secret then a new version of the secret is created.

  • If a version with this value already exists and that version's SecretString and SecretBinary values are the same as those in the request, then the request is ignored (the operation is idempotent).

  • If a version with this value already exists and that version's SecretString and SecretBinary values are different from those in the request then the request fails because you cannot modify an existing version. Instead, use PutSecretValue to create a new version.

This value becomes the SecretVersionId of the new version.

", + "documentation":"

(Optional) If you include SecretString or SecretBinary, then an initial version is created as part of the secret, and this parameter specifies a unique identifier for the new version.

If you use the AWS CLI or one of the AWS SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes it as the value for this parameter in the request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a ClientRequestToken yourself for the new version and include that value in the request.

This value helps ensure idempotency. Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during a rotation. We recommend that you generate a UUID-type value to ensure uniqueness of your versions within the specified secret.

  • If the ClientRequestToken value isn't already associated with a version of the secret then a new version of the secret is created.

  • If a version with this value already exists and that version's SecretString and SecretBinary values are the same as those in the request, then the request is ignored (the operation is idempotent).

  • If a version with this value already exists and that version's SecretString and SecretBinary values are different from those in the request then the request fails because you cannot modify an existing version. Instead, use PutSecretValue to create a new version.

This value becomes the SecretVersionId of the new version.

", "idempotencyToken":true }, "Description":{ @@ -316,7 +367,7 @@ }, "KmsKeyId":{ "shape":"KmsKeyIdType", - "documentation":"

(Optional) Specifies the ARN or alias of the AWS KMS customer master key (CMK) to be used to encrypt the SecretString or SecretBinary values in the versions stored in this secret.

If you don't specify this value, then Secrets Manager defaults to using the AWS account's default CMK (the one named aws/secretsmanager). If a KMS CMK with that name doesn't yet exist, then Secrets Manager creates it for you automatically the first time it needs to encrypt a version's SecretString or SecretBinary fields.

You can use the account's default CMK to encrypt and decrypt only if you call this operation using credentials from the same account that owns the secret. If the secret is in a different account, then you must create a custom CMK and specify the ARN in this field.

" + "documentation":"

(Optional) Specifies the ARN, Key ID, or alias of the AWS KMS customer master key (CMK) to be used to encrypt the SecretString or SecretBinary values in the versions stored in this secret.

You can specify any of the supported ways to identify a AWS KMS key ID. If you need to reference a CMK in a different account, you can use only the key ARN or the alias ARN.

If you don't specify this value, then Secrets Manager defaults to using the AWS account's default CMK (the one named aws/secretsmanager). If a AWS KMS CMK with that name doesn't yet exist, then Secrets Manager creates it for you automatically the first time it needs to encrypt a version's SecretString or SecretBinary fields.

You can use the account's default CMK to encrypt and decrypt only if you call this operation using credentials from the same account that owns the secret. If the secret is in a different account, then you must create a custom CMK and specify the ARN in this field.

" }, "SecretBinary":{ "shape":"SecretBinaryType", @@ -324,7 +375,7 @@ }, "SecretString":{ "shape":"SecretStringType", - "documentation":"

(Optional) Specifies text data that you want to encrypt and store in this new version of the secret.

Either SecretString or SecretBinary must have a value, but not both. They cannot both be empty.

If you create a secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the SecretString parameter. The Secrets Manager console stores the information as a JSON structure of key/value pairs that the Lambda rotation function knows how to parse.

For storing multiple values, we recommend that you use a JSON text string argument and specify key/value pairs. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide. For example:

[{\"Key\":\"username\",\"Value\":\"bob\"},{\"Key\":\"password\",\"Value\":\"abc123xyz456\"}]

If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text.

" + "documentation":"

(Optional) Specifies text data that you want to encrypt and store in this new version of the secret.

Either SecretString or SecretBinary must have a value, but not both. They cannot both be empty.

If you create a secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the SecretString parameter. The Secrets Manager console stores the information as a JSON structure of key/value pairs that the Lambda rotation function knows how to parse.

For storing multiple values, we recommend that you use a JSON text string argument and specify key/value pairs. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide. For example:

[{\"username\":\"bob\"},{\"password\":\"abc123xyz456\"}]

If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text.

" }, "Tags":{ "shape":"TagListType", @@ -358,6 +409,29 @@ "documentation":"

Secrets Manager can't decrypt the protected secret text using the provided KMS key.

", "exception":true }, + "DeleteResourcePolicyRequest":{ + "type":"structure", + "required":["SecretId"], + "members":{ + "SecretId":{ + "shape":"SecretIdType", + "documentation":"

Specifies the secret that you want to delete the attached resource-based policy for. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

" + } + } + }, + "DeleteResourcePolicyResponse":{ + "type":"structure", + "members":{ + "ARN":{ + "shape":"SecretARNType", + "documentation":"

The ARN of the secret that the resource-based policy was deleted for.

" + }, + "Name":{ + "shape":"NameType", + "documentation":"

The friendly name of the secret that the resource-based policy was deleted for.

" + } + } + }, "DeleteSecretRequest":{ "type":"structure", "required":["SecretId"], @@ -420,7 +494,7 @@ }, "KmsKeyId":{ "shape":"KmsKeyIdType", - "documentation":"

The ARN or alias of the AWS KMS customer master key (CMK) that's used to encrypt the SecretString or SecretBinary fields in each version of the secret. If you don't provide a key, then Secrets Manager defaults to encrypting the secret fields with the default KMS CMK (the one named awssecretsmanager) for this account.

" + "documentation":"

The ARN or alias of the AWS KMS customer master key (CMK) that's used to encrypt the SecretString or SecretBinary fields in each version of the secret. If you don't provide a key, then Secrets Manager defaults to encrypting the secret fields with the default AWS KMS CMK (the one named awssecretsmanager) for this account.

" }, "RotationEnabled":{ "shape":"RotationEnabledType", @@ -540,6 +614,33 @@ } } }, + "GetResourcePolicyRequest":{ + "type":"structure", + "required":["SecretId"], + "members":{ + "SecretId":{ + "shape":"SecretIdType", + "documentation":"

Specifies the secret that you want to retrieve the attached resource-based policy for. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

" + } + } + }, + "GetResourcePolicyResponse":{ + "type":"structure", + "members":{ + "ARN":{ + "shape":"SecretARNType", + "documentation":"

The ARN of the secret that the resource-based policy was retrieved for.

" + }, + "Name":{ + "shape":"NameType", + "documentation":"

The friendly name of the secret that the resource-based policy was retrieved for.

" + }, + "ResourcePolicy":{ + "shape":"NonEmptyResourcePolicyType", + "documentation":"

A JSON-formatted string that describes the permissions that are associated with the attached secret. These permissions are combined with any permissions that are associated with the user or role that attempts to access this secret. The combined permissions specify who can access the secret and what actions they can perform. For more information, see Authentication and Access Control for AWS Secrets Manager in the AWS Secrets Manager User Guide.

" + } + } + }, "GetSecretValueRequest":{ "type":"structure", "required":["SecretId"], @@ -623,7 +724,7 @@ "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"

You provided a parameter value that is not valid for the current state of the resource. For example, if you try to enable rotation on a secret, you must already have a Lambda function ARN configured or included as a parameter in this call.

", + "documentation":"

You provided a parameter value that is not valid for the current state of the resource.

Possible causes:

  • You tried to perform the operation on a secret that's currently marked deleted.

  • You tried to enable rotation on a secret that doesn't already have a Lambda function ARN configured and you didn't include such an ARN as a parameter in this call.

", "exception":true }, "KmsKeyIdType":{ @@ -737,11 +838,54 @@ "max":4096, "min":1 }, + "NonEmptyResourcePolicyType":{ + "type":"string", + "max":4096, + "min":1 + }, "PasswordLengthType":{ "type":"long", "max":4096, "min":1 }, + "PreconditionNotMetException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The request failed because you did not complete all the prerequisite steps.

", + "exception":true + }, + "PutResourcePolicyRequest":{ + "type":"structure", + "required":[ + "SecretId", + "ResourcePolicy" + ], + "members":{ + "SecretId":{ + "shape":"SecretIdType", + "documentation":"

Specifies the secret that you want to attach the resource-based policy to. You can specify either the ARN or the friendly name of the secret.

" + }, + "ResourcePolicy":{ + "shape":"NonEmptyResourcePolicyType", + "documentation":"

A JSON-formatted string that's constructed according to the grammar and syntax for an AWS resource-based policy. The policy in the string identifies who can access or manage this secret and its versions. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide.

" + } + } + }, + "PutResourcePolicyResponse":{ + "type":"structure", + "members":{ + "ARN":{ + "shape":"SecretARNType", + "documentation":"

The ARN of the secret that the resource-based policy was retrieved for.

" + }, + "Name":{ + "shape":"NameType", + "documentation":"

The friendly name of the secret that the resource-based policy was retrieved for.

" + } + } + }, "PutSecretValueRequest":{ "type":"structure", "required":["SecretId"], @@ -761,7 +905,7 @@ }, "SecretString":{ "shape":"SecretStringType", - "documentation":"

(Optional) Specifies text data that you want to encrypt and store in this new version of the secret. Either SecretString or SecretBinary must have a value, but not both. They cannot both be empty.

If you create this secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the SecretString parameter. The Secrets Manager console stores the information as a JSON structure of key/value pairs that the default Lambda rotation function knows how to parse.

For storing multiple values, we recommend that you use a JSON text string argument and specify key/value pairs. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide.

" + "documentation":"

(Optional) Specifies text data that you want to encrypt and store in this new version of the secret. Either SecretString or SecretBinary must have a value, but not both. They cannot both be empty.

If you create this secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the SecretString parameter. The Secrets Manager console stores the information as a JSON structure of key/value pairs that the default Lambda rotation function knows how to parse.

For storing multiple values, we recommend that you use a JSON text string argument and specify key/value pairs. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide.

For example:

[{\"username\":\"bob\"},{\"password\":\"abc123xyz456\"}]

If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text.

" }, "VersionStages":{ "shape":"SecretVersionStagesType", @@ -915,7 +1059,7 @@ "members":{ "ARN":{ "shape":"SecretARNType", - "documentation":"

The Amazon Resource Name (ARN) of the secret.

For more information about ARNs in Secrets Manager, see Policy Resources in the AWS Secrets Manager User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the secret.

For more information about ARNs in Secrets Manager, see Policy Resources in the AWS Secrets Manager User Guide.

" }, "Name":{ "shape":"SecretNameType", @@ -1121,7 +1265,7 @@ }, "KmsKeyId":{ "shape":"KmsKeyIdType", - "documentation":"

(Optional) Specifies the ARN or alias of the KMS customer master key (CMK) to be used to encrypt the protected text in the versions of this secret.

If you don't specify this value, then Secrets Manager defaults to using the default CMK in the account (the one named aws/secretsmanager). If a KMS CMK with that name doesn't exist, then Secrets Manager creates it for you automatically the first time it needs to encrypt a version's Plaintext or PlaintextString fields.

You can only use the account's default CMK to encrypt and decrypt if you call this operation using credentials from the same account that owns the secret. If the secret is in a different account, then you must create a custom CMK and provide the ARN in this field.

" + "documentation":"

(Optional) Specifies the ARN or alias of the AWS KMS customer master key (CMK) to be used to encrypt the protected text in the versions of this secret.

If you don't specify this value, then Secrets Manager defaults to using the default CMK in the account (the one named aws/secretsmanager). If a AWS KMS CMK with that name doesn't exist, then Secrets Manager creates it for you automatically the first time it needs to encrypt a version's Plaintext or PlaintextString fields.

You can only use the account's default CMK to encrypt and decrypt if you call this operation using credentials from the same account that owns the secret. If the secret is in a different account, then you must create a custom CMK and provide the ARN in this field.

" }, "SecretBinary":{ "shape":"SecretBinaryType", @@ -1129,7 +1273,7 @@ }, "SecretString":{ "shape":"SecretStringType", - "documentation":"

(Optional) Specifies text data that you want to encrypt and store in this new version of the secret. Either SecretBinary or SecretString must have a value, but not both. They cannot both be empty.

If you create this secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the SecretString parameter. The Secrets Manager console stores the information as a JSON structure of key/value pairs that the default Lambda rotation function knows how to parse.

For storing multiple values, we recommend that you use a JSON text string argument and specify key/value pairs. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide.

" + "documentation":"

(Optional) Specifies text data that you want to encrypt and store in this new version of the secret. Either SecretBinary or SecretString must have a value, but not both. They cannot both be empty.

If you create this secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the SecretString parameter. The Secrets Manager console stores the information as a JSON structure of key/value pairs that the default Lambda rotation function knows how to parse.

For storing multiple values, we recommend that you use a JSON text string argument and specify key/value pairs. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide. For example:

[{\"username\":\"bob\"},{\"password\":\"abc123xyz456\"}]

If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text.

" } } }, diff --git a/botocore/data/serverlessrepo/2017-09-08/service-2.json b/botocore/data/serverlessrepo/2017-09-08/service-2.json index 9f48f211..a53ae81e 100644 --- a/botocore/data/serverlessrepo/2017-09-08/service-2.json +++ b/botocore/data/serverlessrepo/2017-09-08/service-2.json @@ -27,7 +27,7 @@ }, "errors" : [ { "shape" : "TooManyRequestsException", - "documentation" : "

The client is sending more than the allowed number of requests per unit time.

" + "documentation" : "

The client is sending more than the allowed number of requests per unit of time.

" }, { "shape" : "BadRequestException", "documentation" : "

One of the parameters in the request is invalid.

" @@ -59,7 +59,7 @@ }, "errors" : [ { "shape" : "TooManyRequestsException", - "documentation" : "

The client is sending more than the allowed number of requests per unit time.

" + "documentation" : "

The client is sending more than the allowed number of requests per unit of time.

" }, { "shape" : "BadRequestException", "documentation" : "

One of the parameters in the request is invalid.

" @@ -91,7 +91,7 @@ }, "errors" : [ { "shape" : "TooManyRequestsException", - "documentation" : "

The client is sending more than the allowed number of requests per unit time.

" + "documentation" : "

The client is sending more than the allowed number of requests per unit of time.

" }, { "shape" : "BadRequestException", "documentation" : "

One of the parameters in the request is invalid.

" @@ -102,7 +102,7 @@ "shape" : "ForbiddenException", "documentation" : "

The client is not authenticated.

" } ], - "documentation" : "

Creates an AWS CloudFormation ChangeSet for the given application.

" + "documentation" : "

Creates an AWS CloudFormation change set for the given application.

" }, "DeleteApplication" : { "name" : "DeleteApplication", @@ -125,10 +125,10 @@ "documentation" : "

The client is not authenticated.

" }, { "shape" : "NotFoundException", - "documentation" : "

The resource (for example, an access policy statement) specified in the request does not exist.

" + "documentation" : "

The resource (for example, an access policy statement) specified in the request doesn't exist.

" }, { "shape" : "TooManyRequestsException", - "documentation" : "

The client is sending more than the allowed number of requests per unit time.

" + "documentation" : "

The client is sending more than the allowed number of requests per unit of time.

" }, { "shape" : "ConflictException", "documentation" : "

The resource already exists.

" @@ -151,10 +151,10 @@ }, "errors" : [ { "shape" : "NotFoundException", - "documentation" : "

The resource (for example, an access policy statement) specified in the request does not exist.

" + "documentation" : "

The resource (for example, an access policy statement) specified in the request doesn't exist.

" }, { "shape" : "TooManyRequestsException", - "documentation" : "

The client is sending more than the allowed number of requests per unit time.

" + "documentation" : "

The client is sending more than the allowed number of requests per unit of time.

" }, { "shape" : "BadRequestException", "documentation" : "

One of the parameters in the request is invalid.

" @@ -183,10 +183,10 @@ }, "errors" : [ { "shape" : "NotFoundException", - "documentation" : "

The resource (for example, an access policy statement) specified in the request does not exist.

" + "documentation" : "

The resource (for example, an access policy statement) specified in the request doesn't exist.

" }, { "shape" : "TooManyRequestsException", - "documentation" : "

The client is sending more than the allowed number of requests per unit time.

" + "documentation" : "

The client is sending more than the allowed number of requests per unit of time.

" }, { "shape" : "BadRequestException", "documentation" : "

One of the parameters in the request is invalid.

" @@ -197,7 +197,7 @@ "shape" : "ForbiddenException", "documentation" : "

The client is not authenticated.

" } ], - "documentation" : "

Gets the policy for the specified application.

" + "documentation" : "

Retrieves the policy for the application.

" }, "ListApplicationVersions" : { "name" : "ListApplicationVersions", @@ -215,10 +215,10 @@ }, "errors" : [ { "shape" : "NotFoundException", - "documentation" : "

The resource (for example, an access policy statement) specified in the request does not exist.

" + "documentation" : "

The resource (for example, an access policy statement) specified in the request doesn't exist.

" }, { "shape" : "TooManyRequestsException", - "documentation" : "

The client is sending more than the allowed number of requests per unit time.

" + "documentation" : "

The client is sending more than the allowed number of requests per unit of time.

" }, { "shape" : "BadRequestException", "documentation" : "

One of the parameters in the request is invalid.

" @@ -247,7 +247,7 @@ }, "errors" : [ { "shape" : "NotFoundException", - "documentation" : "

The resource (for example, an access policy statement) specified in the request does not exist.

" + "documentation" : "

The resource (for example, an access policy statement) specified in the request doesn't exist.

" }, { "shape" : "BadRequestException", "documentation" : "

One of the parameters in the request is invalid.

" @@ -276,10 +276,10 @@ }, "errors" : [ { "shape" : "NotFoundException", - "documentation" : "

The resource (for example, an access policy statement) specified in the request does not exist.

" + "documentation" : "

The resource (for example, an access policy statement) specified in the request doesn't exist.

" }, { "shape" : "TooManyRequestsException", - "documentation" : "

The client is sending more than the allowed number of requests per unit time.

" + "documentation" : "

The client is sending more than the allowed number of requests per unit of time.

" }, { "shape" : "BadRequestException", "documentation" : "

One of the parameters in the request is invalid.

" @@ -290,7 +290,7 @@ "shape" : "ForbiddenException", "documentation" : "

The client is not authenticated.

" } ], - "documentation" : "

Puts the policy for the specified application.

" + "documentation" : "

Sets the permission policy for an application. See\n Application Permissions\n for the list of supported actions that can be used with this operation.

" }, "UpdateApplication" : { "name" : "UpdateApplication", @@ -317,10 +317,10 @@ "documentation" : "

The client is not authenticated.

" }, { "shape" : "NotFoundException", - "documentation" : "

The resource (for example, an access policy statement) specified in the request does not exist.

" + "documentation" : "

The resource (for example, an access policy statement) specified in the request doesn't exist.

" }, { "shape" : "TooManyRequestsException", - "documentation" : "

The client is sending more than the allowed number of requests per unit time.

" + "documentation" : "

The client is sending more than the allowed number of requests per unit of time.

" }, { "shape" : "ConflictException", "documentation" : "

The resource already exists.

" @@ -340,17 +340,17 @@ "Author" : { "shape" : "__string", "locationName" : "author", - "documentation" : "

The name of the author publishing the app.

Min Length=1. Max Length=127.

Pattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";

" + "documentation" : "

The name of the author publishing the app.

Minimum length=1. Maximum length=127.

Pattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";

" }, "CreationTime" : { "shape" : "__string", "locationName" : "creationTime", - "documentation" : "

The date/time this resource was created.

" + "documentation" : "

The date and time this resource was created.

" }, "Description" : { "shape" : "__string", "locationName" : "description", - "documentation" : "

The description of the application.

Min Length=1. Max Length=256

" + "documentation" : "

The description of the application.

Minimum length=1. Maximum length=256

" }, "HomePageUrl" : { "shape" : "__string", @@ -360,22 +360,22 @@ "Labels" : { "shape" : "__listOf__string", "locationName" : "labels", - "documentation" : "

Labels to improve discovery of apps in search results.

Min Length=1. Max Length=127. Maximum number of labels: 10

Pattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";

" + "documentation" : "

Labels to improve discovery of apps in search results.

Minimum length=1. Maximum length=127. Maximum number of labels: 10

Pattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";

" }, "LicenseUrl" : { "shape" : "__string", "locationName" : "licenseUrl", - "documentation" : "

A link to a license file of the app that matches the spdxLicenseID of your application.

Max size 5 MB

" + "documentation" : "

A link to a license file of the app that matches the spdxLicenseID value of your application.

Maximum size 5 MB

" }, "Name" : { "shape" : "__string", "locationName" : "name", - "documentation" : "

The name of the application.

Min Length=1. Max Length=140

Pattern: \"[a-zA-Z0-9\\\\-]+\";

" + "documentation" : "

The name of the application.

Minimum length=1. Maximum length=140

Pattern: \"[a-zA-Z0-9\\\\-]+\";

" }, "ReadmeUrl" : { "shape" : "__string", "locationName" : "readmeUrl", - "documentation" : "

A link to the readme file that contains a more detailed description of the application and how it works in Markdown language.

Max size 5 MB

" + "documentation" : "

A link to the readme file in Markdown language that contains a more detailed description of the application and how it works.

Maximum size 5 MB

" }, "SpdxLicenseId" : { "shape" : "__string", @@ -397,7 +397,7 @@ "Applications" : { "shape" : "__listOfApplicationSummary", "locationName" : "applications", - "documentation" : "

Array of application summaries.

" + "documentation" : "

An array of application summaries.

" }, "NextToken" : { "shape" : "__string", @@ -405,7 +405,7 @@ "documentation" : "

The token to request the next page of results.

" } }, - "documentation" : "

List of application details.

", + "documentation" : "

A list of application details.

", "required" : [ "Applications" ] }, "ApplicationPolicy" : { @@ -414,7 +414,7 @@ "Statements" : { "shape" : "__listOfApplicationPolicyStatement", "locationName" : "statements", - "documentation" : "

Array of policy statements applied to the application.

" + "documentation" : "

An array of policy statements applied to the application.

" } }, "documentation" : "

Policy statements applied to the application.

", @@ -426,7 +426,7 @@ "Actions" : { "shape" : "__listOf__string", "locationName" : "actions", - "documentation" : "

A list of supported actions:

\n GetApplication\n

\n CreateCloudFormationChangeSet\n

\n ListApplicationVersions\n

\n SearchApplications\n

\n Deploy (Note: This action enables all other actions above.)

" + "documentation" : "

See Application Permissions for the list of supported actions.

" }, "Principals" : { "shape" : "__listOf__string", @@ -448,22 +448,22 @@ "ApplicationId" : { "shape" : "__string", "locationName" : "applicationId", - "documentation" : "

The application ARN.

" + "documentation" : "

The application Amazon Resource Name (ARN).

" }, "Author" : { "shape" : "__string", "locationName" : "author", - "documentation" : "

The name of the author publishing the app.

Min Length=1. Max Length=127.

Pattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";

" + "documentation" : "

The name of the author publishing the app.

Minimum length=1. Maximum length=127.

Pattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";

" }, "CreationTime" : { "shape" : "__string", "locationName" : "creationTime", - "documentation" : "

The date/time this resource was created.

" + "documentation" : "

The date and time this resource was created.

" }, "Description" : { "shape" : "__string", "locationName" : "description", - "documentation" : "

The description of the application.

Min Length=1. Max Length=256

" + "documentation" : "

The description of the application.

Minimum length=1. Maximum length=256

" }, "HomePageUrl" : { "shape" : "__string", @@ -473,12 +473,12 @@ "Labels" : { "shape" : "__listOf__string", "locationName" : "labels", - "documentation" : "

Labels to improve discovery of apps in search results.

Min Length=1. Max Length=127. Maximum number of labels: 10

Pattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";

" + "documentation" : "

Labels to improve discovery of apps in search results.

Minimum length=1. Maximum length=127. Maximum number of labels: 10

Pattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";

" }, "Name" : { "shape" : "__string", "locationName" : "name", - "documentation" : "

The name of the application.

Min Length=1. Max Length=140

Pattern: \"[a-zA-Z0-9\\\\-]+\";

" + "documentation" : "

The name of the application.

Minimum length=1. Maximum length=140

Pattern: \"[a-zA-Z0-9\\\\-]+\";

" }, "SpdxLicenseId" : { "shape" : "__string", @@ -500,10 +500,10 @@ "Versions" : { "shape" : "__listOfVersionSummary", "locationName" : "versions", - "documentation" : "

Array of version summaries for the application.

" + "documentation" : "

An array of version summaries for the application.

" } }, - "documentation" : "

List of version summaries for the application.

", + "documentation" : "

A list of version summaries for the application.

", "required" : [ "Versions" ] }, "BadRequestException" : { @@ -537,7 +537,7 @@ "ChangeSetId" : { "shape" : "__string", "locationName" : "changeSetId", - "documentation" : "

The ARN of the change set.

Length Constraints: Minimum length of 1.

Pattern: Amazon Resource Name (ARN):[-a-zA-Z0-9:/]*

" + "documentation" : "

The Amazon Resource Name (ARN) of the change set.

Length constraints: Minimum length of 1.

Pattern: ARN:[-a-zA-Z0-9:/]*

" }, "SemanticVersion" : { "shape" : "__string", @@ -579,12 +579,12 @@ "Author" : { "shape" : "__string", "locationName" : "author", - "documentation" : "

The name of the author publishing the app.

Min Length=1. Max Length=127.

Pattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";

" + "documentation" : "

The name of the author publishing the app.

Minimum length=1. Maximum length=127.

Pattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";

" }, "Description" : { "shape" : "__string", "locationName" : "description", - "documentation" : "

The description of the application.

Min Length=1. Max Length=256

" + "documentation" : "

The description of the application.

Minimum length=1. Maximum length=256

" }, "HomePageUrl" : { "shape" : "__string", @@ -594,32 +594,32 @@ "Labels" : { "shape" : "__listOf__string", "locationName" : "labels", - "documentation" : "

Labels to improve discovery of apps in search results.

Min Length=1. Max Length=127. Maximum number of labels: 10

Pattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";

" + "documentation" : "

Labels to improve discovery of apps in search results.

Minimum length=1. Maximum length=127. Maximum number of labels: 10

Pattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";

" }, "LicenseBody" : { "shape" : "__string", "locationName" : "licenseBody", - "documentation" : "

A raw text file that contains the license of the app that matches the spdxLicenseID of your application.

Max size 5 MB

" + "documentation" : "

A local text file that contains the license of the app that matches the spdxLicenseID value of your application.\n The file is of the format file://<path>/<filename>.

Maximum size 5 MB

Note: Only one of licenseBody and licenseUrl can be specified, otherwise an error will result.

" }, "LicenseUrl" : { "shape" : "__string", "locationName" : "licenseUrl", - "documentation" : "

A link to a license file of the app that matches the spdxLicenseID of your application.

Max size 5 MB

" + "documentation" : "

A link to the S3 object that contains the license of the app that matches the spdxLicenseID value of your application.

Maximum size 5 MB

Note: Only one of licenseBody and licenseUrl can be specified, otherwise an error will result.

" }, "Name" : { "shape" : "__string", "locationName" : "name", - "documentation" : "

The name of the application you want to publish.

Min Length=1. Max Length=140

Pattern: \"[a-zA-Z0-9\\\\-]+\";

" + "documentation" : "

The name of the application that you want to publish.

Minimum length=1. Maximum length=140

Pattern: \"[a-zA-Z0-9\\\\-]+\";

" }, "ReadmeBody" : { "shape" : "__string", "locationName" : "readmeBody", - "documentation" : "

A raw text Readme file that contains a more detailed description of the application and how it works in markdown language.

Max size 5 MB

" + "documentation" : "

A local text readme file in Markdown language that contains a more detailed description of the application and how it works.\n The file is of the format file://<path>/<filename>.

Maximum size 5 MB

Note: Only one of readmeBody and readmeUrl can be specified, otherwise an error will result.

" }, "ReadmeUrl" : { "shape" : "__string", "locationName" : "readmeUrl", - "documentation" : "

A link to the Readme file that contains a more detailed description of the application and how it works in markdown language.

Max size 5 MB

" + "documentation" : "

A link to the S3 object in Markdown language that contains a more detailed description of the application and how it works.

Maximum size 5 MB

Note: Only one of readmeBody and readmeUrl can be specified, otherwise an error will result.

" }, "SemanticVersion" : { "shape" : "__string", @@ -639,15 +639,15 @@ "TemplateBody" : { "shape" : "__string", "locationName" : "templateBody", - "documentation" : "

The raw packaged AWS SAM template of your application.

" + "documentation" : "

The local raw packaged AWS SAM template file of your application.\n The file is of the format file://<path>/<filename>.

Note: Only one of templateBody and templateUrl can be specified, otherwise an error will result.

" }, "TemplateUrl" : { "shape" : "__string", "locationName" : "templateUrl", - "documentation" : "

A link to the packaged AWS SAM template of your application.

" + "documentation" : "

A link to the S3 object cotaining the packaged AWS SAM template of your application.

Note: Only one of templateBody and templateUrl can be specified, otherwise an error will result.

" } }, - "documentation" : "

Create application request.

", + "documentation" : "

Create an application request.

", "required" : [ "Description", "Name", "Author" ] }, "CreateApplicationRequest" : { @@ -656,12 +656,12 @@ "Author" : { "shape" : "__string", "locationName" : "author", - "documentation" : "

The name of the author publishing the app.

Min Length=1. Max Length=127.

Pattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";

" + "documentation" : "

The name of the author publishing the app.

Minimum length=1. Maximum length=127.

Pattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";

" }, "Description" : { "shape" : "__string", "locationName" : "description", - "documentation" : "

The description of the application.

Min Length=1. Max Length=256

" + "documentation" : "

The description of the application.

Minimum length=1. Maximum length=256

" }, "HomePageUrl" : { "shape" : "__string", @@ -671,32 +671,32 @@ "Labels" : { "shape" : "__listOf__string", "locationName" : "labels", - "documentation" : "

Labels to improve discovery of apps in search results.

Min Length=1. Max Length=127. Maximum number of labels: 10

Pattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";

" + "documentation" : "

Labels to improve discovery of apps in search results.

Minimum length=1. Maximum length=127. Maximum number of labels: 10

Pattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";

" }, "LicenseBody" : { "shape" : "__string", "locationName" : "licenseBody", - "documentation" : "

A raw text file that contains the license of the app that matches the spdxLicenseID of your application.

Max size 5 MB

" + "documentation" : "

A local text file that contains the license of the app that matches the spdxLicenseID value of your application.\n The file is of the format file://<path>/<filename>.

Maximum size 5 MB

Note: Only one of licenseBody and licenseUrl can be specified, otherwise an error will result.

" }, "LicenseUrl" : { "shape" : "__string", "locationName" : "licenseUrl", - "documentation" : "

A link to a license file of the app that matches the spdxLicenseID of your application.

Max size 5 MB

" + "documentation" : "

A link to the S3 object that contains the license of the app that matches the spdxLicenseID value of your application.

Maximum size 5 MB

Note: Only one of licenseBody and licenseUrl can be specified, otherwise an error will result.

" }, "Name" : { "shape" : "__string", "locationName" : "name", - "documentation" : "

The name of the application you want to publish.

Min Length=1. Max Length=140

Pattern: \"[a-zA-Z0-9\\\\-]+\";

" + "documentation" : "

The name of the application that you want to publish.

Minimum length=1. Maximum length=140

Pattern: \"[a-zA-Z0-9\\\\-]+\";

" }, "ReadmeBody" : { "shape" : "__string", "locationName" : "readmeBody", - "documentation" : "

A raw text Readme file that contains a more detailed description of the application and how it works in markdown language.

Max size 5 MB

" + "documentation" : "

A local text readme file in Markdown language that contains a more detailed description of the application and how it works.\n The file is of the format file://<path>/<filename>.

Maximum size 5 MB

Note: Only one of readmeBody and readmeUrl can be specified, otherwise an error will result.

" }, "ReadmeUrl" : { "shape" : "__string", "locationName" : "readmeUrl", - "documentation" : "

A link to the Readme file that contains a more detailed description of the application and how it works in markdown language.

Max size 5 MB

" + "documentation" : "

A link to the S3 object in Markdown language that contains a more detailed description of the application and how it works.

Maximum size 5 MB

Note: Only one of readmeBody and readmeUrl can be specified, otherwise an error will result.

" }, "SemanticVersion" : { "shape" : "__string", @@ -716,14 +716,15 @@ "TemplateBody" : { "shape" : "__string", "locationName" : "templateBody", - "documentation" : "

The raw packaged AWS SAM template of your application.

" + "documentation" : "

The local raw packaged AWS SAM template file of your application.\n The file is of the format file://<path>/<filename>.

Note: Only one of templateBody and templateUrl can be specified, otherwise an error will result.

" }, "TemplateUrl" : { "shape" : "__string", "locationName" : "templateUrl", - "documentation" : "

A link to the packaged AWS SAM template of your application.

" + "documentation" : "

A link to the S3 object cotaining the packaged AWS SAM template of your application.

Note: Only one of templateBody and templateUrl can be specified, otherwise an error will result.

" } - } + }, + "required" : [ "Description", "Name", "Author" ] }, "CreateApplicationResponse" : { "type" : "structure", @@ -736,17 +737,17 @@ "Author" : { "shape" : "__string", "locationName" : "author", - "documentation" : "

The name of the author publishing the app.

Min Length=1. Max Length=127.

Pattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";

" + "documentation" : "

The name of the author publishing the app.

Minimum length=1. Maximum length=127.

Pattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";

" }, "CreationTime" : { "shape" : "__string", "locationName" : "creationTime", - "documentation" : "

The date/time this resource was created.

" + "documentation" : "

The date and time this resource was created.

" }, "Description" : { "shape" : "__string", "locationName" : "description", - "documentation" : "

The description of the application.

Min Length=1. Max Length=256

" + "documentation" : "

The description of the application.

Minimum length=1. Maximum length=256

" }, "HomePageUrl" : { "shape" : "__string", @@ -756,22 +757,22 @@ "Labels" : { "shape" : "__listOf__string", "locationName" : "labels", - "documentation" : "

Labels to improve discovery of apps in search results.

Min Length=1. Max Length=127. Maximum number of labels: 10

Pattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";

" + "documentation" : "

Labels to improve discovery of apps in search results.

Minimum length=1. Maximum length=127. Maximum number of labels: 10

Pattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";

" }, "LicenseUrl" : { "shape" : "__string", "locationName" : "licenseUrl", - "documentation" : "

A link to a license file of the app that matches the spdxLicenseID of your application.

Max size 5 MB

" + "documentation" : "

A link to a license file of the app that matches the spdxLicenseID value of your application.

Maximum size 5 MB

" }, "Name" : { "shape" : "__string", "locationName" : "name", - "documentation" : "

The name of the application.

Min Length=1. Max Length=140

Pattern: \"[a-zA-Z0-9\\\\-]+\";

" + "documentation" : "

The name of the application.

Minimum length=1. Maximum length=140

Pattern: \"[a-zA-Z0-9\\\\-]+\";

" }, "ReadmeUrl" : { "shape" : "__string", "locationName" : "readmeUrl", - "documentation" : "

A link to the readme file that contains a more detailed description of the application and how it works in Markdown language.

Max size 5 MB

" + "documentation" : "

A link to the readme file in Markdown language that contains a more detailed description of the application and how it works.

Maximum size 5 MB

" }, "SpdxLicenseId" : { "shape" : "__string", @@ -804,7 +805,7 @@ "documentation" : "

A link to the packaged AWS SAM template of your application.

" } }, - "documentation" : "

Create version request.

" + "documentation" : "

Create a version request.

" }, "CreateApplicationVersionRequest" : { "type" : "structure", @@ -813,7 +814,7 @@ "shape" : "__string", "location" : "uri", "locationName" : "applicationId", - "documentation" : "

The ID of the application to get.

" + "documentation" : "

The Amazon Resource Name (ARN) of the application.

" }, "SemanticVersion" : { "shape" : "__string", @@ -850,12 +851,12 @@ "CreationTime" : { "shape" : "__string", "locationName" : "creationTime", - "documentation" : "

The date/time this resource was created.

" + "documentation" : "

The date and time this resource was created.

" }, "ParameterDefinitions" : { "shape" : "__listOfParameterDefinition", "locationName" : "parameterDefinitions", - "documentation" : "

Array of parameter types supported by the application.

" + "documentation" : "

An array of parameter types supported by the application.

" }, "SemanticVersion" : { "shape" : "__string", @@ -893,7 +894,7 @@ "documentation" : "

The name or the unique ID of the stack for which you are creating a change set. AWS CloudFormation generates\n the change set by comparing this stack's information with the information that you submit, such as a modified\n template or different parameter input values.

Constraints: Minimum length of 1.

Pattern: ([a-zA-Z][-a-zA-Z0-9]*)|(arn:\\b(aws|aws-us-gov|aws-cn)\\b:[-a-zA-Z0-9:/._+]*)

" } }, - "documentation" : "

Create application ChangeSet request.

", + "documentation" : "

Create an application change set request.

", "required" : [ "StackName" ] }, "CreateCloudFormationChangeSetRequest" : { @@ -903,7 +904,7 @@ "shape" : "__string", "location" : "uri", "locationName" : "applicationId", - "documentation" : "

The ID of the application to get.

" + "documentation" : "

The Amazon Resource Name (ARN) of the application.

" }, "ParameterOverrides" : { "shape" : "__listOfParameterValue", @@ -921,7 +922,7 @@ "documentation" : "

The name or the unique ID of the stack for which you are creating a change set. AWS CloudFormation generates\n the change set by comparing this stack's information with the information that you submit, such as a modified\n template or different parameter input values.

Constraints: Minimum length of 1.

Pattern: ([a-zA-Z][-a-zA-Z0-9]*)|(arn:\\b(aws|aws-us-gov|aws-cn)\\b:[-a-zA-Z0-9:/._+]*)

" } }, - "required" : [ "ApplicationId" ] + "required" : [ "ApplicationId", "StackName" ] }, "CreateCloudFormationChangeSetResponse" : { "type" : "structure", @@ -934,7 +935,7 @@ "ChangeSetId" : { "shape" : "__string", "locationName" : "changeSetId", - "documentation" : "

The ARN of the change set.

Length Constraints: Minimum length of 1.

Pattern: Amazon Resource Name (ARN):[-a-zA-Z0-9:/]*

" + "documentation" : "

The Amazon Resource Name (ARN) of the change set.

Length constraints: Minimum length of 1.

Pattern: ARN:[-a-zA-Z0-9:/]*

" }, "SemanticVersion" : { "shape" : "__string", @@ -955,7 +956,7 @@ "shape" : "__string", "location" : "uri", "locationName" : "applicationId", - "documentation" : "

The ID of the application to get.

" + "documentation" : "

The Amazon Resource Name (ARN) of the application.

" } }, "required" : [ "ApplicationId" ] @@ -987,7 +988,7 @@ "shape" : "__string", "location" : "uri", "locationName" : "applicationId", - "documentation" : "

The ID of the application to get.

" + "documentation" : "

The Amazon Resource Name (ARN) of the application.

" } }, "required" : [ "ApplicationId" ] @@ -998,7 +999,7 @@ "Statements" : { "shape" : "__listOfApplicationPolicyStatement", "locationName" : "statements", - "documentation" : "

Array of policy statements applied to the application.

" + "documentation" : "

An array of policy statements applied to the application.

" } } }, @@ -1009,7 +1010,7 @@ "shape" : "__string", "location" : "uri", "locationName" : "applicationId", - "documentation" : "

The ID of the application to get.

" + "documentation" : "

The Amazon Resource Name (ARN) of the application.

" }, "SemanticVersion" : { "shape" : "__string", @@ -1031,17 +1032,17 @@ "Author" : { "shape" : "__string", "locationName" : "author", - "documentation" : "

The name of the author publishing the app.

Min Length=1. Max Length=127.

Pattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";

" + "documentation" : "

The name of the author publishing the app.

Minimum length=1. Maximum length=127.

Pattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";

" }, "CreationTime" : { "shape" : "__string", "locationName" : "creationTime", - "documentation" : "

The date/time this resource was created.

" + "documentation" : "

The date and time this resource was created.

" }, "Description" : { "shape" : "__string", "locationName" : "description", - "documentation" : "

The description of the application.

Min Length=1. Max Length=256

" + "documentation" : "

The description of the application.

Minimum length=1. Maximum length=256

" }, "HomePageUrl" : { "shape" : "__string", @@ -1051,22 +1052,22 @@ "Labels" : { "shape" : "__listOf__string", "locationName" : "labels", - "documentation" : "

Labels to improve discovery of apps in search results.

Min Length=1. Max Length=127. Maximum number of labels: 10

Pattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";

" + "documentation" : "

Labels to improve discovery of apps in search results.

Minimum length=1. Maximum length=127. Maximum number of labels: 10

Pattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";

" }, "LicenseUrl" : { "shape" : "__string", "locationName" : "licenseUrl", - "documentation" : "

A link to a license file of the app that matches the spdxLicenseID of your application.

Max size 5 MB

" + "documentation" : "

A link to a license file of the app that matches the spdxLicenseID value of your application.

Maximum size 5 MB

" }, "Name" : { "shape" : "__string", "locationName" : "name", - "documentation" : "

The name of the application.

Min Length=1. Max Length=140

Pattern: \"[a-zA-Z0-9\\\\-]+\";

" + "documentation" : "

The name of the application.

Minimum length=1. Maximum length=140

Pattern: \"[a-zA-Z0-9\\\\-]+\";

" }, "ReadmeUrl" : { "shape" : "__string", "locationName" : "readmeUrl", - "documentation" : "

A link to the readme file that contains a more detailed description of the application and how it works in Markdown language.

Max size 5 MB

" + "documentation" : "

A link to the readme file in Markdown language that contains a more detailed description of the application and how it works.

Maximum size 5 MB

" }, "SpdxLicenseId" : { "shape" : "__string", @@ -1107,7 +1108,7 @@ "shape" : "__string", "location" : "uri", "locationName" : "applicationId", - "documentation" : "

The ID of the application to get.

" + "documentation" : "

The Amazon Resource Name (ARN) of the application.

" }, "MaxItems" : { "shape" : "MaxItems", @@ -1135,7 +1136,7 @@ "Versions" : { "shape" : "__listOfVersionSummary", "locationName" : "versions", - "documentation" : "

Array of version summaries for the application.

" + "documentation" : "

An array of version summaries for the application.

" } } }, @@ -1162,7 +1163,7 @@ "Applications" : { "shape" : "__listOfApplicationSummary", "locationName" : "applications", - "documentation" : "

Array of application summaries.

" + "documentation" : "

An array of application summaries.

" }, "NextToken" : { "shape" : "__string", @@ -1187,10 +1188,10 @@ "Message" : { "shape" : "__string", "locationName" : "message", - "documentation" : "

The resource (for example, an access policy statement) specified in the request does not exist.

" + "documentation" : "

The resource (for example, an access policy statement) specified in the request doesn't exist.

" } }, - "documentation" : "

The resource (for example, an access policy statement) specified in the request does not exist.

", + "documentation" : "

The resource (for example, an access policy statement) specified in the request doesn't exist.

", "exception" : true, "error" : { "httpStatusCode" : 404 @@ -1207,12 +1208,12 @@ "AllowedValues" : { "shape" : "__listOf__string", "locationName" : "allowedValues", - "documentation" : "

Array containing the list of values allowed for the parameter.

" + "documentation" : "

An array containing the list of values allowed for the parameter.

" }, "ConstraintDescription" : { "shape" : "__string", "locationName" : "constraintDescription", - "documentation" : "

A string that explains a constraint when the constraint is violated. For example, without a constraint description,\n a parameter that has an allowed pattern of [A-Za-z0-9]+ displays the following error message when the user\n specifies an invalid value:

\n Malformed input-Parameter MyParameter must match pattern [A-Za-z0-9]+\n

By adding a constraint description, such as \"must contain only uppercase and lowercase letters, and numbers,\" you can display\n the following customized error message:

\n Malformed input-Parameter MyParameter must contain only uppercase and lowercase letters and numbers.\n

" + "documentation" : "

A string that explains a constraint when the constraint is violated. For example, without a constraint description,\n a parameter that has an allowed pattern of [A-Za-z0-9]+ displays the following error message when the user\n specifies an invalid value:

\n Malformed input-Parameter MyParameter must match pattern [A-Za-z0-9]+\n

By adding a constraint description, such as \"must contain only uppercase and lowercase letters and numbers,\" you can display\n the following customized error message:

\n Malformed input-Parameter MyParameter must contain only uppercase and lowercase letters and numbers.\n

" }, "DefaultValue" : { "shape" : "__string", @@ -1227,22 +1228,22 @@ "MaxLength" : { "shape" : "__integer", "locationName" : "maxLength", - "documentation" : "

An integer value that determines the largest number of characters you want to allow for String types.

" + "documentation" : "

An integer value that determines the largest number of characters that you want to allow for String types.

" }, "MaxValue" : { "shape" : "__integer", "locationName" : "maxValue", - "documentation" : "

A numeric value that determines the largest numeric value you want to allow for Number types.

" + "documentation" : "

A numeric value that determines the largest numeric value that you want to allow for Number types.

" }, "MinLength" : { "shape" : "__integer", "locationName" : "minLength", - "documentation" : "

An integer value that determines the smallest number of characters you want to allow for String types.

" + "documentation" : "

An integer value that determines the smallest number of characters that you want to allow for String types.

" }, "MinValue" : { "shape" : "__integer", "locationName" : "minValue", - "documentation" : "

A numeric value that determines the smallest numeric value you want to allow for Number types.

" + "documentation" : "

A numeric value that determines the smallest numeric value that you want to allow for Number types.

" }, "Name" : { "shape" : "__string", @@ -1262,7 +1263,7 @@ "Type" : { "shape" : "__string", "locationName" : "type", - "documentation" : "

The type of the parameter.

Valid values: String | Number | List<Number> | CommaDelimitedList\n

\n String: A literal string.

For example, users could specify \"MyUserName\".

\n Number: An integer or float. AWS CloudFormation validates the parameter value as a number; however, when you use the\n parameter elsewhere in your template (for example, by using the Ref intrinsic function), the parameter value becomes a string.

For example, users could specify \"8888\".

\n List<Number>: An array of integers or floats that are separated by commas. AWS CloudFormation validates the parameter value as numbers; however, when\n you use the parameter elsewhere in your template (for example, by using the Ref intrinsic function), the parameter value becomes a list of strings.

For example, users could specify \"80,20\", and a Ref results in [\"80\",\"20\"].

\n CommaDelimitedList: An array of literal strings that are separated by commas. The total number of strings should be one more than the total number of commas.\n Also, each member string is space-trimmed.

For example, users could specify \"test,dev,prod\", and a Ref results in [\"test\",\"dev\",\"prod\"].

" + "documentation" : "

The type of the parameter.

Valid values: String | Number | List<Number> | CommaDelimitedList\n

\n String: A literal string.

For example, users can specify \"MyUserName\".

\n Number: An integer or float. AWS CloudFormation validates the parameter value as a number. However, when you use the\n parameter elsewhere in your template (for example, by using the Ref intrinsic function), the parameter value becomes a string.

For example, users might specify \"8888\".

\n List<Number>: An array of integers or floats that are separated by commas. AWS CloudFormation validates the parameter value as numbers. However, when\n you use the parameter elsewhere in your template (for example, by using the Ref intrinsic function), the parameter value becomes a list of strings.

For example, users might specify \"80,20\", and then Ref results in [\"80\",\"20\"].

\n CommaDelimitedList: An array of literal strings that are separated by commas. The total number of strings should be one more than the total number of commas.\n Also, each member string is space-trimmed.

For example, users might specify \"test,dev,prod\", and then Ref results in [\"test\",\"dev\",\"prod\"].

" } }, "documentation" : "

Parameters supported by the application.

", @@ -1292,15 +1293,15 @@ "shape" : "__string", "location" : "uri", "locationName" : "applicationId", - "documentation" : "

The ID of the application to get.

" + "documentation" : "

The Amazon Resource Name (ARN) of the application.

" }, "Statements" : { "shape" : "__listOfApplicationPolicyStatement", "locationName" : "statements", - "documentation" : "

Array of policy statements applied to the application.

" + "documentation" : "

An array of policy statements applied to the application.

" } }, - "required" : [ "ApplicationId" ] + "required" : [ "ApplicationId", "Statements" ] }, "PutApplicationPolicyResponse" : { "type" : "structure", @@ -1308,7 +1309,7 @@ "Statements" : { "shape" : "__listOfApplicationPolicyStatement", "locationName" : "statements", - "documentation" : "

Array of policy statements applied to the application.

" + "documentation" : "

An array of policy statements applied to the application.

" } } }, @@ -1323,10 +1324,10 @@ "Message" : { "shape" : "__string", "locationName" : "message", - "documentation" : "

The client is sending more than the allowed number of requests per unit time.

" + "documentation" : "

The client is sending more than the allowed number of requests per unit of time.

" } }, - "documentation" : "

The client is sending more than the allowed number of requests per unit time.

", + "documentation" : "

The client is sending more than the allowed number of requests per unit of time.

", "exception" : true, "error" : { "httpStatusCode" : 429 @@ -1338,12 +1339,12 @@ "Author" : { "shape" : "__string", "locationName" : "author", - "documentation" : "

The name of the author publishing the app.

Min Length=1. Max Length=127.

Pattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";

" + "documentation" : "

The name of the author publishing the app.

Minimum length=1. Maximum length=127.

Pattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";

" }, "Description" : { "shape" : "__string", "locationName" : "description", - "documentation" : "

The description of the application.

Min Length=1. Max Length=256

" + "documentation" : "

The description of the application.

Minimum length=1. Maximum length=256

" }, "HomePageUrl" : { "shape" : "__string", @@ -1353,20 +1354,20 @@ "Labels" : { "shape" : "__listOf__string", "locationName" : "labels", - "documentation" : "

Labels to improve discovery of apps in search results.

Min Length=1. Max Length=127. Maximum number of labels: 10

Pattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";

" + "documentation" : "

Labels to improve discovery of apps in search results.

Minimum length=1. Maximum length=127. Maximum number of labels: 10

Pattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";

" }, "ReadmeBody" : { "shape" : "__string", "locationName" : "readmeBody", - "documentation" : "

A raw text Readme file that contains a more detailed description of the application and how it works in markdown language.

Max size 5 MB

" + "documentation" : "

A text readme file in Markdown language that contains a more detailed description of the application and how it works.

Maximum size 5 MB

" }, "ReadmeUrl" : { "shape" : "__string", "locationName" : "readmeUrl", - "documentation" : "

A link to the Readme file that contains a more detailed description of the application and how it works in markdown language.

Max size 5 MB

" + "documentation" : "

A link to the readme file in Markdown language that contains a more detailed description of the application and how it works.

Maximum size 5 MB

" } }, - "documentation" : "

Update application request.

" + "documentation" : "

Update the application request.

" }, "UpdateApplicationRequest" : { "type" : "structure", @@ -1375,17 +1376,17 @@ "shape" : "__string", "location" : "uri", "locationName" : "applicationId", - "documentation" : "

The ID of the application to get.

" + "documentation" : "

The Amazon Resource Name (ARN) of the application.

" }, "Author" : { "shape" : "__string", "locationName" : "author", - "documentation" : "

The name of the author publishing the app.

Min Length=1. Max Length=127.

Pattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";

" + "documentation" : "

The name of the author publishing the app.

Minimum length=1. Maximum length=127.

Pattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";

" }, "Description" : { "shape" : "__string", "locationName" : "description", - "documentation" : "

The description of the application.

Min Length=1. Max Length=256

" + "documentation" : "

The description of the application.

Minimum length=1. Maximum length=256

" }, "HomePageUrl" : { "shape" : "__string", @@ -1395,17 +1396,17 @@ "Labels" : { "shape" : "__listOf__string", "locationName" : "labels", - "documentation" : "

Labels to improve discovery of apps in search results.

Min Length=1. Max Length=127. Maximum number of labels: 10

Pattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";

" + "documentation" : "

Labels to improve discovery of apps in search results.

Minimum length=1. Maximum length=127. Maximum number of labels: 10

Pattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";

" }, "ReadmeBody" : { "shape" : "__string", "locationName" : "readmeBody", - "documentation" : "

A raw text Readme file that contains a more detailed description of the application and how it works in markdown language.

Max size 5 MB

" + "documentation" : "

A text readme file in Markdown language that contains a more detailed description of the application and how it works.

Maximum size 5 MB

" }, "ReadmeUrl" : { "shape" : "__string", "locationName" : "readmeUrl", - "documentation" : "

A link to the Readme file that contains a more detailed description of the application and how it works in markdown language.

Max size 5 MB

" + "documentation" : "

A link to the readme file in Markdown language that contains a more detailed description of the application and how it works.

Maximum size 5 MB

" } }, "required" : [ "ApplicationId" ] @@ -1421,17 +1422,17 @@ "Author" : { "shape" : "__string", "locationName" : "author", - "documentation" : "

The name of the author publishing the app.

Min Length=1. Max Length=127.

Pattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";

" + "documentation" : "

The name of the author publishing the app.

Minimum length=1. Maximum length=127.

Pattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";

" }, "CreationTime" : { "shape" : "__string", "locationName" : "creationTime", - "documentation" : "

The date/time this resource was created.

" + "documentation" : "

The date and time this resource was created.

" }, "Description" : { "shape" : "__string", "locationName" : "description", - "documentation" : "

The description of the application.

Min Length=1. Max Length=256

" + "documentation" : "

The description of the application.

Minimum length=1. Maximum length=256

" }, "HomePageUrl" : { "shape" : "__string", @@ -1441,22 +1442,22 @@ "Labels" : { "shape" : "__listOf__string", "locationName" : "labels", - "documentation" : "

Labels to improve discovery of apps in search results.

Min Length=1. Max Length=127. Maximum number of labels: 10

Pattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";

" + "documentation" : "

Labels to improve discovery of apps in search results.

Minimum length=1. Maximum length=127. Maximum number of labels: 10

Pattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";

" }, "LicenseUrl" : { "shape" : "__string", "locationName" : "licenseUrl", - "documentation" : "

A link to a license file of the app that matches the spdxLicenseID of your application.

Max size 5 MB

" + "documentation" : "

A link to a license file of the app that matches the spdxLicenseID value of your application.

Maximum size 5 MB

" }, "Name" : { "shape" : "__string", "locationName" : "name", - "documentation" : "

The name of the application.

Min Length=1. Max Length=140

Pattern: \"[a-zA-Z0-9\\\\-]+\";

" + "documentation" : "

The name of the application.

Minimum length=1. Maximum length=140

Pattern: \"[a-zA-Z0-9\\\\-]+\";

" }, "ReadmeUrl" : { "shape" : "__string", "locationName" : "readmeUrl", - "documentation" : "

A link to the readme file that contains a more detailed description of the application and how it works in Markdown language.

Max size 5 MB

" + "documentation" : "

A link to the readme file in Markdown language that contains a more detailed description of the application and how it works.

Maximum size 5 MB

" }, "SpdxLicenseId" : { "shape" : "__string", @@ -1481,12 +1482,12 @@ "CreationTime" : { "shape" : "__string", "locationName" : "creationTime", - "documentation" : "

The date/time this resource was created.

" + "documentation" : "

The date and time this resource was created.

" }, "ParameterDefinitions" : { "shape" : "__listOfParameterDefinition", "locationName" : "parameterDefinitions", - "documentation" : "

Array of parameter types supported by the application.

" + "documentation" : "

An array of parameter types supported by the application.

" }, "SemanticVersion" : { "shape" : "__string", @@ -1518,7 +1519,7 @@ "CreationTime" : { "shape" : "__string", "locationName" : "creationTime", - "documentation" : "

The date/time this resource was created.

" + "documentation" : "

The date and time this resource was created.

" }, "SemanticVersion" : { "shape" : "__string", @@ -1531,7 +1532,7 @@ "documentation" : "

A link to a public repository for the source code of your application.

" } }, - "documentation" : "

Application version summary.

", + "documentation" : "

An application version summary.

", "required" : [ "CreationTime", "ApplicationId", "SemanticVersion" ] }, "__boolean" : { @@ -1587,4 +1588,4 @@ } }, "documentation" : "

The AWS Serverless Application Repository makes it easy for developers and enterprises to quickly find\n and deploy serverless applications in the AWS Cloud. For more information about serverless applications,\n see Serverless Computing and Applications on the AWS website.

The AWS Serverless Application Repository is deeply integrated with the AWS Lambda console, so that developers of \n all levels can get started with serverless computing without needing to learn anything new. You can use category \n keywords to browse for applications such as web and mobile backends, data processing applications, or chatbots. \n You can also search for applications by name, publisher, or event source. To use an application, you simply choose it, \n configure any required fields, and deploy it with a few clicks.

You can also easily publish applications, sharing them publicly with the community at large, or privately\n within your team or across your organization. To publish a serverless application (or app), you can use the\n AWS Management Console, AWS Command Line Interface (AWS CLI), or AWS SDKs to upload the code. Along with the\n code, you upload a simple manifest file, also known as the AWS Serverless Application Model (AWS SAM) template.\n For more information about AWS SAM, see AWS Serverless Application Model (AWS SAM) on the AWS Labs\n GitHub repository.

The AWS Serverless Application Repository Developer Guide contains more information about the two developer\n experiences available:

    \n
  • \n

    Consuming Applications – Browse for applications and view information about them, including\n source code and readme files. Also install, configure, and deploy applications of your choosing.

    \n

    Publishing Applications – Configure and upload applications to make them available to other\n developers, and publish new versions of applications.

    \n
  • \n
" -} +} \ No newline at end of file diff --git a/botocore/data/servicecatalog/2015-12-10/service-2.json b/botocore/data/servicecatalog/2015-12-10/service-2.json index 96e5487a..2278c957 100644 --- a/botocore/data/servicecatalog/2015-12-10/service-2.json +++ b/botocore/data/servicecatalog/2015-12-10/service-2.json @@ -2260,6 +2260,10 @@ "PageSize":{ "shape":"PageSize", "documentation":"

The maximum number of items to return with this call.

" + }, + "PortfolioShareType":{ + "shape":"PortfolioShareType", + "documentation":"

The type of shared portfolios to list. The default is to list imported portfolios.

  • AWS_SERVICECATALOG - List default portfolios

  • IMPORTED - List imported portfolios

" } } }, @@ -2772,6 +2776,13 @@ "min":1 }, "PortfolioName":{"type":"string"}, + "PortfolioShareType":{ + "type":"string", + "enum":[ + "IMPORTED", + "AWS_SERVICECATALOG" + ] + }, "Principal":{ "type":"structure", "members":{ @@ -2814,7 +2825,8 @@ "enum":[ "CLOUD_FORMATION_TEMPLATE", "MARKETPLACE" - ] + ], + "max":8191 }, "ProductViewAggregationType":{"type":"string"}, "ProductViewAggregationValue":{ @@ -2886,9 +2898,18 @@ "key":{"shape":"ProductViewFilterBy"}, "value":{"shape":"ProductViewFilterValues"} }, - "ProductViewName":{"type":"string"}, - "ProductViewOwner":{"type":"string"}, - "ProductViewShortDescription":{"type":"string"}, + "ProductViewName":{ + "type":"string", + "max":8191 + }, + "ProductViewOwner":{ + "type":"string", + "max":8191 + }, + "ProductViewShortDescription":{ + "type":"string", + "max":8191 + }, "ProductViewSortBy":{ "type":"string", "enum":[ @@ -4021,9 +4042,18 @@ "type":"string", "pattern":"[\\u0009\\u000a\\u000d\\u0020-\\uD7FF\\uE000-\\uFFFD]*" }, - "SupportDescription":{"type":"string"}, - "SupportEmail":{"type":"string"}, - "SupportUrl":{"type":"string"}, + "SupportDescription":{ + "type":"string", + "max":8191 + }, + "SupportEmail":{ + "type":"string", + "max":254 + }, + "SupportUrl":{ + "type":"string", + "max":2083 + }, "Tag":{ "type":"structure", "required":[ diff --git a/botocore/data/ses/2010-12-01/service-2.json b/botocore/data/ses/2010-12-01/service-2.json index a3022516..ed07be63 100644 --- a/botocore/data/ses/2010-12-01/service-2.json +++ b/botocore/data/ses/2010-12-01/service-2.json @@ -68,7 +68,7 @@ {"shape":"InvalidSNSDestinationException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Creates a configuration set event destination.

When you create or update an event destination, you must provide one, and only one, destination. The destination can be Amazon CloudWatch, Amazon Kinesis Firehose, or Amazon Simple Notification Service (Amazon SNS).

An event destination is the AWS service to which Amazon SES publishes the email sending events associated with a configuration set. For information about using configuration sets, see the Amazon SES Developer Guide.

You can execute this operation no more than once per second.

" + "documentation":"

Creates a configuration set event destination.

When you create or update an event destination, you must provide one, and only one, destination. The destination can be CloudWatch, Amazon Kinesis Firehose, or Amazon Simple Notification Service (Amazon SNS).

An event destination is the AWS service to which Amazon SES publishes the email sending events associated with a configuration set. For information about using configuration sets, see the Amazon SES Developer Guide.

You can execute this operation no more than once per second.

" }, "CreateConfigurationSetTrackingOptions":{ "name":"CreateConfigurationSetTrackingOptions", @@ -86,7 +86,7 @@ {"shape":"TrackingOptionsAlreadyExistsException"}, {"shape":"InvalidTrackingOptionsException"} ], - "documentation":"

Creates an association between a configuration set and a custom domain for open and click event tracking.

By default, images and links used for tracking open and click events are hosted on domains operated by Amazon SES. You can configure a subdomain of your own to handle these events. For information about using configuration sets, see Configuring Custom Domains to Handle Open and Click Tracking in the Amazon SES Developer Guide.

" + "documentation":"

Creates an association between a configuration set and a custom domain for open and click event tracking.

By default, images and links used for tracking open and click events are hosted on domains operated by Amazon SES. You can configure a subdomain of your own to handle these events. For information about using custom domains, see the Amazon SES Developer Guide.

" }, "CreateCustomVerificationEmailTemplate":{ "name":"CreateCustomVerificationEmailTemplate", @@ -101,7 +101,7 @@ {"shape":"CustomVerificationEmailInvalidContentException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Creates a new custom verification email template.

For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

You can execute this operation no more than once per second.

" + "documentation":"

Creates a new custom verification email template.

For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

You can execute this operation no more than once per second.

" }, "CreateReceiptFilter":{ "name":"CreateReceiptFilter", @@ -225,7 +225,7 @@ {"shape":"ConfigurationSetDoesNotExistException"}, {"shape":"TrackingOptionsDoesNotExistException"} ], - "documentation":"

Deletes an association between a configuration set and a custom domain for open and click event tracking.

By default, images and links used for tracking open and click events are hosted on domains operated by Amazon SES. You can configure a subdomain of your own to handle these events. For information about using configuration sets, see Configuring Custom Domains to Handle Open and Click Tracking in the Amazon SES Developer Guide.

Deleting this kind of association will result in emails sent using the specified configuration set to capture open and click events using the standard, Amazon SES-operated domains.

" + "documentation":"

Deletes an association between a configuration set and a custom domain for open and click event tracking.

By default, images and links used for tracking open and click events are hosted on domains operated by Amazon SES. You can configure a subdomain of your own to handle these events. For information about using custom domains, see the Amazon SES Developer Guide.

Deleting this kind of association will result in emails sent using the specified configuration set to capture open and click events using the standard, Amazon SES-operated domains.

" }, "DeleteCustomVerificationEmailTemplate":{ "name":"DeleteCustomVerificationEmailTemplate", @@ -234,7 +234,7 @@ "requestUri":"/" }, "input":{"shape":"DeleteCustomVerificationEmailTemplateRequest"}, - "documentation":"

Deletes an existing custom verification email template.

For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

You can execute this operation no more than once per second.

" + "documentation":"

Deletes an existing custom verification email template.

For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

You can execute this operation no more than once per second.

" }, "DeleteIdentity":{ "name":"DeleteIdentity", @@ -401,7 +401,7 @@ "shape":"GetAccountSendingEnabledResponse", "resultWrapper":"GetAccountSendingEnabledResult" }, - "documentation":"

Returns the email sending status of the Amazon SES account.

You can execute this operation no more than once per second.

" + "documentation":"

Returns the email sending status of the Amazon SES account for the current region.

You can execute this operation no more than once per second.

" }, "GetCustomVerificationEmailTemplate":{ "name":"GetCustomVerificationEmailTemplate", @@ -417,7 +417,7 @@ "errors":[ {"shape":"CustomVerificationEmailTemplateDoesNotExistException"} ], - "documentation":"

Returns the custom email verification template for the template name you specify.

For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

You can execute this operation no more than once per second.

" + "documentation":"

Returns the custom email verification template for the template name you specify.

For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

You can execute this operation no more than once per second.

" }, "GetIdentityDkimAttributes":{ "name":"GetIdentityDkimAttributes", @@ -506,7 +506,7 @@ "shape":"GetSendStatisticsResponse", "resultWrapper":"GetSendStatisticsResult" }, - "documentation":"

Provides sending statistics for the Amazon SES account. The result is a list of data points, representing the last two weeks of sending activity. Each data point in the list contains statistics for a 15-minute period of time.

You can execute this operation no more than once per second.

" + "documentation":"

Provides sending statistics for the current AWS Region. The result is a list of data points, representing the last two weeks of sending activity. Each data point in the list contains statistics for a 15-minute period of time.

You can execute this operation no more than once per second.

" }, "GetTemplate":{ "name":"GetTemplate", @@ -535,7 +535,7 @@ "shape":"ListConfigurationSetsResponse", "resultWrapper":"ListConfigurationSetsResult" }, - "documentation":"

Provides a list of the configuration sets associated with your Amazon SES account. For information about using configuration sets, see Monitoring Your Amazon SES Sending Activity in the Amazon SES Developer Guide.

You can execute this operation no more than once per second. This operation will return up to 1,000 configuration sets each time it is run. If your Amazon SES account has more than 1,000 configuration sets, this operation will also return a NextToken element. You can then execute the ListConfigurationSets operation again, passing the NextToken parameter and the value of the NextToken element to retrieve additional results.

" + "documentation":"

Provides a list of the configuration sets associated with your Amazon SES account in the current AWS Region. For information about using configuration sets, see Monitoring Your Amazon SES Sending Activity in the Amazon SES Developer Guide.

You can execute this operation no more than once per second. This operation will return up to 1,000 configuration sets each time it is run. If your Amazon SES account has more than 1,000 configuration sets, this operation will also return a NextToken element. You can then execute the ListConfigurationSets operation again, passing the NextToken parameter and the value of the NextToken element to retrieve additional results.

" }, "ListCustomVerificationEmailTemplates":{ "name":"ListCustomVerificationEmailTemplates", @@ -548,7 +548,7 @@ "shape":"ListCustomVerificationEmailTemplatesResponse", "resultWrapper":"ListCustomVerificationEmailTemplatesResult" }, - "documentation":"

Lists the existing custom verification email templates for your account.

For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

You can execute this operation no more than once per second.

" + "documentation":"

Lists the existing custom verification email templates for your account in the current AWS Region.

For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

You can execute this operation no more than once per second.

" }, "ListIdentities":{ "name":"ListIdentities", @@ -561,7 +561,7 @@ "shape":"ListIdentitiesResponse", "resultWrapper":"ListIdentitiesResult" }, - "documentation":"

Returns a list containing all of the identities (email addresses and domains) for your AWS account, regardless of verification status.

You can execute this operation no more than once per second.

" + "documentation":"

Returns a list containing all of the identities (email addresses and domains) for your AWS account in the current AWS Region, regardless of verification status.

You can execute this operation no more than once per second.

" }, "ListIdentityPolicies":{ "name":"ListIdentityPolicies", @@ -587,7 +587,7 @@ "shape":"ListReceiptFiltersResponse", "resultWrapper":"ListReceiptFiltersResult" }, - "documentation":"

Lists the IP address filters associated with your AWS account.

For information about managing IP address filters, see the Amazon SES Developer Guide.

You can execute this operation no more than once per second.

" + "documentation":"

Lists the IP address filters associated with your AWS account in the current AWS Region.

For information about managing IP address filters, see the Amazon SES Developer Guide.

You can execute this operation no more than once per second.

" }, "ListReceiptRuleSets":{ "name":"ListReceiptRuleSets", @@ -600,7 +600,7 @@ "shape":"ListReceiptRuleSetsResponse", "resultWrapper":"ListReceiptRuleSetsResult" }, - "documentation":"

Lists the receipt rule sets that exist under your AWS account. If there are additional receipt rule sets to be retrieved, you will receive a NextToken that you can provide to the next call to ListReceiptRuleSets to retrieve the additional entries.

For information about managing receipt rule sets, see the Amazon SES Developer Guide.

You can execute this operation no more than once per second.

" + "documentation":"

Lists the receipt rule sets that exist under your AWS account in the current AWS Region. If there are additional receipt rule sets to be retrieved, you will receive a NextToken that you can provide to the next call to ListReceiptRuleSets to retrieve the additional entries.

For information about managing receipt rule sets, see the Amazon SES Developer Guide.

You can execute this operation no more than once per second.

" }, "ListTemplates":{ "name":"ListTemplates", @@ -613,7 +613,7 @@ "shape":"ListTemplatesResponse", "resultWrapper":"ListTemplatesResult" }, - "documentation":"

Lists the email templates present in your Amazon SES account.

You can execute this operation no more than once per second.

" + "documentation":"

Lists the email templates present in your Amazon SES account in the current AWS Region.

You can execute this operation no more than once per second.

" }, "ListVerifiedEmailAddresses":{ "name":"ListVerifiedEmailAddresses", @@ -715,7 +715,7 @@ {"shape":"FromEmailAddressNotVerifiedException"}, {"shape":"ProductionAccessNotGrantedException"} ], - "documentation":"

Adds an email address to the list of identities for your Amazon SES account and attempts to verify it. As a result of executing this operation, a customized verification email is sent to the specified address.

To use this operation, you must first create a custom verification email template. For more information about creating and using custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

You can execute this operation no more than once per second.

" + "documentation":"

Adds an email address to the list of identities for your Amazon SES account in the current AWS Region and attempts to verify it. As a result of executing this operation, a customized verification email is sent to the specified address.

To use this operation, you must first create a custom verification email template. For more information about creating and using custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

You can execute this operation no more than once per second.

" }, "SendEmail":{ "name":"SendEmail", @@ -776,7 +776,7 @@ {"shape":"ConfigurationSetSendingPausedException"}, {"shape":"AccountSendingPausedException"} ], - "documentation":"

Composes an email message using an email template and immediately queues it for sending.

In order to send email using the SendTemplatedEmail operation, your call to the API must meet the following requirements:

  • The call must refer to an existing email template. You can create email templates using the CreateTemplate operation.

  • The message must be sent from a verified email address or domain.

  • If your account is still in the Amazon SES sandbox, you may only send to verified addresses or domains, or to email addresses associated with the Amazon SES Mailbox Simulator. For more information, see Verifying Email Addresses and Domains in the Amazon SES Developer Guide.

  • The total size of the message, including attachments, must be less than 10 MB.

  • Calls to the SendTemplatedEmail operation may only include one Destination parameter. A destination is a set of recipients who will receive the same version of the email. The Destination parameter can include up to 50 recipients, across the To:, CC: and BCC: fields.

  • The Destination parameter must include at least one recipient email address. The recipient address can be a To: address, a CC: address, or a BCC: address. If a recipient email address is invalid (that is, it is not in the format UserName@[SubDomain.]Domain.TopLevelDomain), the entire message will be rejected, even if the message contains other recipients that are valid.

" + "documentation":"

Composes an email message using an email template and immediately queues it for sending.

In order to send email using the SendTemplatedEmail operation, your call to the API must meet the following requirements:

  • The call must refer to an existing email template. You can create email templates using the CreateTemplate operation.

  • The message must be sent from a verified email address or domain.

  • If your account is still in the Amazon SES sandbox, you may only send to verified addresses or domains, or to email addresses associated with the Amazon SES Mailbox Simulator. For more information, see Verifying Email Addresses and Domains in the Amazon SES Developer Guide.

  • The total size of the message, including attachments, must be less than 10 MB.

  • Calls to the SendTemplatedEmail operation may only include one Destination parameter. A destination is a set of recipients who will receive the same version of the email. The Destination parameter can include up to 50 recipients, across the To:, CC: and BCC: fields.

  • The Destination parameter must include at least one recipient email address. The recipient address can be a To: address, a CC: address, or a BCC: address. If a recipient email address is invalid (that is, it is not in the format UserName@[SubDomain.]Domain.TopLevelDomain), the entire message will be rejected, even if the message contains other recipients that are valid.

If your call to the SendTemplatedEmail operation includes all of the required parameters, Amazon SES accepts it and returns a Message ID. However, if Amazon SES can't render the email because the template contains errors, it doesn't send the email. Additionally, because it already accepted the message, Amazon SES doesn't return a message stating that it was unable to send the email.

For these reasons, we highly recommend that you set up Amazon SES to send you notifications when Rendering Failure events occur. For more information, see Sending Personalized Email Using the Amazon SES API in the Amazon Simple Email Service Developer Guide.

" }, "SetActiveReceiptRuleSet":{ "name":"SetActiveReceiptRuleSet", @@ -901,7 +901,7 @@ "requestUri":"/" }, "input":{"shape":"UpdateAccountSendingEnabledRequest"}, - "documentation":"

Enables or disables email sending across your entire Amazon SES account. You can use this operation in conjunction with Amazon CloudWatch alarms to temporarily pause email sending across your Amazon SES account when reputation metrics (such as your bounce on complaint rate) reach certain thresholds.

You can execute this operation no more than once per second.

" + "documentation":"

Enables or disables email sending across your entire Amazon SES account in the current AWS Region. You can use this operation in conjunction with Amazon CloudWatch alarms to temporarily pause email sending across your Amazon SES account in a given AWS Region when reputation metrics (such as your bounce or complaint rates) reach certain thresholds.

You can execute this operation no more than once per second.

" }, "UpdateConfigurationSetEventDestination":{ "name":"UpdateConfigurationSetEventDestination", @@ -933,7 +933,7 @@ "errors":[ {"shape":"ConfigurationSetDoesNotExistException"} ], - "documentation":"

Enables or disables the publishing of reputation metrics for emails sent using a specific configuration set. Reputation metrics include bounce and complaint rates. These metrics are published to Amazon CloudWatch. By using Amazon CloudWatch, you can create alarms when bounce or complaint rates exceed a certain threshold.

You can execute this operation no more than once per second.

" + "documentation":"

Enables or disables the publishing of reputation metrics for emails sent using a specific configuration set in a given AWS Region. Reputation metrics include bounce and complaint rates. These metrics are published to Amazon CloudWatch. By using CloudWatch, you can create alarms when bounce or complaint rates exceed certain thresholds.

You can execute this operation no more than once per second.

" }, "UpdateConfigurationSetSendingEnabled":{ "name":"UpdateConfigurationSetSendingEnabled", @@ -945,7 +945,7 @@ "errors":[ {"shape":"ConfigurationSetDoesNotExistException"} ], - "documentation":"

Enables or disables email sending for messages sent using a specific configuration set. You can use this operation in conjunction with Amazon CloudWatch alarms to temporarily pause email sending for a configuration set when the reputation metrics for that configuration set (such as your bounce on complaint rate) reach certain thresholds.

You can execute this operation no more than once per second.

" + "documentation":"

Enables or disables email sending for messages sent using a specific configuration set in a given AWS Region. You can use this operation in conjunction with Amazon CloudWatch alarms to temporarily pause email sending for a configuration set when the reputation metrics for that configuration set (such as your bounce on complaint rate) exceed certain thresholds.

You can execute this operation no more than once per second.

" }, "UpdateConfigurationSetTrackingOptions":{ "name":"UpdateConfigurationSetTrackingOptions", @@ -963,7 +963,7 @@ {"shape":"TrackingOptionsDoesNotExistException"}, {"shape":"InvalidTrackingOptionsException"} ], - "documentation":"

Modifies an association between a configuration set and a custom domain for open and click event tracking.

By default, images and links used for tracking open and click events are hosted on domains operated by Amazon SES. You can configure a subdomain of your own to handle these events. For information about using configuration sets, see Configuring Custom Domains to Handle Open and Click Tracking in the Amazon SES Developer Guide.

" + "documentation":"

Modifies an association between a configuration set and a custom domain for open and click event tracking.

By default, images and links used for tracking open and click events are hosted on domains operated by Amazon SES. You can configure a subdomain of your own to handle these events. For information about using custom domains, see the Amazon SES Developer Guide.

" }, "UpdateCustomVerificationEmailTemplate":{ "name":"UpdateCustomVerificationEmailTemplate", @@ -977,7 +977,7 @@ {"shape":"FromEmailAddressNotVerifiedException"}, {"shape":"CustomVerificationEmailInvalidContentException"} ], - "documentation":"

Updates an existing custom verification email template.

For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

You can execute this operation no more than once per second.

" + "documentation":"

Updates an existing custom verification email template.

For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

You can execute this operation no more than once per second.

" }, "UpdateReceiptRule":{ "name":"UpdateReceiptRule", @@ -1041,7 +1041,7 @@ "shape":"VerifyDomainIdentityResponse", "resultWrapper":"VerifyDomainIdentityResult" }, - "documentation":"

Adds a domain to the list of identities for your Amazon SES account and attempts to verify it. For more information about verifying domains, see Verifying Email Addresses and Domains in the Amazon SES Developer Guide.

You can execute this operation no more than once per second.

" + "documentation":"

Adds a domain to the list of identities for your Amazon SES account in the current AWS Region and attempts to verify it. For more information about verifying domains, see Verifying Email Addresses and Domains in the Amazon SES Developer Guide.

You can execute this operation no more than once per second.

" }, "VerifyEmailAddress":{ "name":"VerifyEmailAddress", @@ -1063,7 +1063,7 @@ "shape":"VerifyEmailIdentityResponse", "resultWrapper":"VerifyEmailIdentityResult" }, - "documentation":"

Adds an email address to the list of identities for your Amazon SES account and attempts to verify it. As a result of executing this operation, a verification email is sent to the specified address.

You can execute this operation no more than once per second.

" + "documentation":"

Adds an email address to the list of identities for your Amazon SES account in the current AWS region and attempts to verify it. As a result of executing this operation, a verification email is sent to the specified address.

You can execute this operation no more than once per second.

" } }, "shapes":{ @@ -2205,10 +2205,10 @@ "members":{ "Enabled":{ "shape":"Enabled", - "documentation":"

Describes whether email sending is enabled or disabled for your Amazon SES account.

" + "documentation":"

Describes whether email sending is enabled or disabled for your Amazon SES account in the current AWS Region.

" } }, - "documentation":"

Represents a request to return the email sending status for your Amazon SES account.

" + "documentation":"

Represents a request to return the email sending status for your Amazon SES account in the current AWS Region.

" }, "GetCustomVerificationEmailTemplateRequest":{ "type":"structure", @@ -2677,7 +2677,7 @@ "members":{ "TemplateName":{"shape":"TemplateName"} }, - "documentation":"

Indicates that a template could not be created because it contained invalid JSON.

", + "documentation":"

Indicates that the template that you specified could not be rendered. This issue may occur when a template refers to a partial that does not exist.

", "error":{ "code":"InvalidTemplate", "httpStatusCode":400, @@ -2795,7 +2795,7 @@ "documentation":"

The maximum number of custom verification email templates to return. This value must be at least 1 and less than or equal to 50. If you do not specify a value, or if you specify a value less than 1 or greater than 50, the operation will return up to 50 results.

" } }, - "documentation":"

Represents a request to list the existing custom verification email templates for your account.

For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

" + "documentation":"

Represents a request to list the existing custom verification email templates for your account.

For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

" }, "ListCustomVerificationEmailTemplatesResponse":{ "type":"structure", @@ -3156,7 +3156,7 @@ }, "WorkmailAction":{ "shape":"WorkmailAction", - "documentation":"

Calls Amazon WorkMail and, optionally, publishes a notification to Amazon SNS.

" + "documentation":"

Calls Amazon WorkMail and, optionally, publishes a notification to Amazon Amazon SNS.

" }, "LambdaAction":{ "shape":"LambdaAction", @@ -3427,7 +3427,7 @@ }, "KmsKeyArn":{ "shape":"AmazonResourceName", - "documentation":"

The customer master key that Amazon SES should use to encrypt your emails before saving them to the Amazon S3 bucket. You can use the default master key or a custom master key you created in AWS KMS as follows:

  • To use the default master key, provide an ARN in the form of arn:aws:kms:REGION:ACCOUNT-ID-WITHOUT-HYPHENS:alias/aws/ses. For example, if your AWS account ID is 123456789012 and you want to use the default master key in the US West (Oregon) region, the ARN of the default master key would be arn:aws:kms:us-west-2:123456789012:alias/aws/ses. If you use the default master key, you don't need to perform any extra steps to give Amazon SES permission to use the key.

  • To use a custom master key you created in AWS KMS, provide the ARN of the master key and ensure that you add a statement to your key's policy to give Amazon SES permission to use it. For more information about giving permissions, see the Amazon SES Developer Guide.

For more information about key policies, see the AWS KMS Developer Guide. If you do not specify a master key, Amazon SES will not encrypt your emails.

Your mail is encrypted by Amazon SES using the Amazon S3 encryption client before the mail is submitted to Amazon S3 for storage. It is not encrypted using Amazon S3 server-side encryption. This means that you must use the Amazon S3 encryption client to decrypt the email after retrieving it from Amazon S3, as the service has no access to use your AWS KMS keys for decryption. This encryption client is currently available with the AWS Java SDK and AWS Ruby SDK only. For more information about client-side encryption using AWS KMS master keys, see the Amazon S3 Developer Guide.

" + "documentation":"

The customer master key that Amazon SES should use to encrypt your emails before saving them to the Amazon S3 bucket. You can use the default master key or a custom master key you created in AWS KMS as follows:

  • To use the default master key, provide an ARN in the form of arn:aws:kms:REGION:ACCOUNT-ID-WITHOUT-HYPHENS:alias/aws/ses. For example, if your AWS account ID is 123456789012 and you want to use the default master key in the US West (Oregon) region, the ARN of the default master key would be arn:aws:kms:us-west-2:123456789012:alias/aws/ses. If you use the default master key, you don't need to perform any extra steps to give Amazon SES permission to use the key.

  • To use a custom master key you created in AWS KMS, provide the ARN of the master key and ensure that you add a statement to your key's policy to give Amazon SES permission to use it. For more information about giving permissions, see the Amazon SES Developer Guide.

For more information about key policies, see the AWS KMS Developer Guide. If you do not specify a master key, Amazon SES will not encrypt your emails.

Your mail is encrypted by Amazon SES using the Amazon S3 encryption client before the mail is submitted to Amazon S3 for storage. It is not encrypted using Amazon S3 server-side encryption. This means that you must use the Amazon S3 encryption client to decrypt the email after retrieving it from Amazon S3, as the service has no access to use your AWS KMS keys for decryption. This encryption client is currently available with the AWS SDK for Java and AWS SDK for Ruby only. For more information about client-side encryption using AWS KMS master keys, see the Amazon S3 Developer Guide.

" } }, "documentation":"

When included in a receipt rule, this action saves the received message to an Amazon Simple Storage Service (Amazon S3) bucket and, optionally, publishes a notification to Amazon Simple Notification Service (Amazon SNS).

To enable Amazon SES to write emails to your Amazon S3 bucket, use an AWS KMS key to encrypt your emails, or publish to an Amazon SNS topic of another account, Amazon SES must have permission to access those resources. For information about giving permissions, see the Amazon SES Developer Guide.

When you save your emails to an Amazon S3 bucket, the maximum email size (including headers) is 30 MB. Emails larger than that will bounce.

For information about specifying Amazon S3 actions in receipt rules, see the Amazon SES Developer Guide.

" @@ -4113,7 +4113,7 @@ "documentation":"

The custom subdomain that will be used to redirect email recipients to the Amazon SES event tracking domain.

" } }, - "documentation":"

A domain that is used to redirect email recipients to an Amazon SES-operated domain. This domain captures open and click events generated by Amazon SES emails.

For more information, see Configuring Custom Domains to Handle Open and Click Tracking in the Amazon SES Developer Guide.

" + "documentation":"

A domain that is used to redirect email recipients to an Amazon SES-operated domain. This domain captures open and click events generated by Amazon SES emails.

For more information, see Configuring Custom Domains to Handle Open and Click Tracking in the Amazon SES Developer Guide.

" }, "TrackingOptionsAlreadyExistsException":{ "type":"structure", @@ -4152,7 +4152,7 @@ "members":{ "Enabled":{ "shape":"Enabled", - "documentation":"

Describes whether email sending is enabled or disabled for your Amazon SES account.

" + "documentation":"

Describes whether email sending is enabled or disabled for your Amazon SES account in the current AWS Region.

" } }, "documentation":"

Represents a request to enable or disable the email sending capabilities for your entire Amazon SES account.

" @@ -4413,5 +4413,5 @@ "documentation":"

When included in a receipt rule, this action calls Amazon WorkMail and, optionally, publishes a notification to Amazon Simple Notification Service (Amazon SNS). You will typically not use this action directly because Amazon WorkMail adds the rule automatically during its setup procedure.

For information using a receipt rule to call Amazon WorkMail, see the Amazon SES Developer Guide.

" } }, - "documentation":"Amazon Simple Email Service

This is the API Reference for Amazon Simple Email Service (Amazon SES). This documentation is intended to be used in conjunction with the Amazon SES Developer Guide.

For a list of Amazon SES endpoints to use in service requests, see Regions and Amazon SES in the Amazon SES Developer Guide.

" + "documentation":"Amazon Simple Email Service

This document contains reference information for the Amazon Simple Email Service (Amazon SES) API, version 2010-12-01. This document is best used in conjunction with the Amazon SES Developer Guide.

For a list of Amazon SES endpoints to use in service requests, see Regions and Amazon SES in the Amazon SES Developer Guide.

" } diff --git a/botocore/data/shield/2016-06-02/paginators-1.json b/botocore/data/shield/2016-06-02/paginators-1.json index 6ff2244f..022e0dca 100644 --- a/botocore/data/shield/2016-06-02/paginators-1.json +++ b/botocore/data/shield/2016-06-02/paginators-1.json @@ -1,10 +1,10 @@ { - "pagination": { - "ListProtections": { - "input_token": "NextToken", - "output_token": "NextToken", - "limit_key": "MaxResults", - "result_key": "Protections" - } + "pagination": { + "ListProtections": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Protections" } + } } diff --git a/botocore/data/shield/2016-06-02/service-2.json b/botocore/data/shield/2016-06-02/service-2.json index 51861845..4d732920 100644 --- a/botocore/data/shield/2016-06-02/service-2.json +++ b/botocore/data/shield/2016-06-02/service-2.json @@ -7,11 +7,50 @@ "protocol":"json", "serviceAbbreviation":"AWS Shield", "serviceFullName":"AWS Shield", + "serviceId":"Shield", "signatureVersion":"v4", "targetPrefix":"AWSShield_20160616", "uid":"shield-2016-06-02" }, "operations":{ + "AssociateDRTLogBucket":{ + "name":"AssociateDRTLogBucket", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateDRTLogBucketRequest"}, + "output":{"shape":"AssociateDRTLogBucketResponse"}, + "errors":[ + {"shape":"InternalErrorException"}, + {"shape":"InvalidOperationException"}, + {"shape":"NoAssociatedRoleException"}, + {"shape":"LimitsExceededException"}, + {"shape":"InvalidParameterException"}, + {"shape":"AccessDeniedForDependencyException"}, + {"shape":"OptimisticLockException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Authorizes the DDoS Response team (DRT) to access the specified Amazon S3 bucket containing your flow logs. You can associate up to 10 Amazon S3 buckets with your subscription.

To use the services of the DRT and make an AssociateDRTLogBucket request, you must be subscribed to the Business Support plan or the Enterprise Support plan.

" + }, + "AssociateDRTRole":{ + "name":"AssociateDRTRole", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateDRTRoleRequest"}, + "output":{"shape":"AssociateDRTRoleResponse"}, + "errors":[ + {"shape":"InternalErrorException"}, + {"shape":"InvalidOperationException"}, + {"shape":"InvalidParameterException"}, + {"shape":"AccessDeniedForDependencyException"}, + {"shape":"OptimisticLockException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Authorizes the DDoS Response team (DRT), using the specified role, to access your AWS account to assist with DDoS attack mitigation during potential attacks. This enables the DRT to inspect your AWS WAF configuration and create or update AWS WAF rules and web ACLs.

You can associate only one RoleArn with your subscription. If you submit an AssociateDRTRole request for an account that already has an associated role, the new RoleArn will replace the existing RoleArn.

Prior to making the AssociateDRTRole request, you must attach the AWSShieldDRTAccessPolicy managed policy to the role you will specify in the request. For more information see Attaching and Detaching IAM Policies. The role must also trust the service principal drt.shield.amazonaws.com. For more information, see IAM JSON Policy Elements: Principal.

The DRT will have access only to your AWS WAF and Shield resources. By submitting this request, you authorize the DRT to inspect your AWS WAF and Shield configuration and create and update AWS WAF rules and web ACLs on your behalf. The DRT takes these actions only if explicitly authorized by you.

You must have the iam:PassRole permission to make an AssociateDRTRole request. For more information, see Granting a User Permissions to Pass a Role to an AWS Service.

To use the services of the DRT and make an AssociateDRTRole request, you must be subscribed to the Business Support plan or the Enterprise Support plan.

" + }, "CreateProtection":{ "name":"CreateProtection", "http":{ @@ -29,7 +68,7 @@ {"shape":"OptimisticLockException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Enables AWS Shield Advanced for a specific AWS resource. The resource can be an Amazon CloudFront distribution, Elastic Load Balancing load balancer, Elastic IP Address, or an Amazon Route 53 hosted zone.

" + "documentation":"

Enables AWS Shield Advanced for a specific AWS resource. The resource can be an Amazon CloudFront distribution, Elastic Load Balancing load balancer, Elastic IP Address, or an Amazon Route 53 hosted zone.

You can add protection to only a single resource with each CreateProtection request. If you want to add protection to multiple resources at once, use the AWS WAF console. For more information see Getting Started with AWS Shield Advanced and Add AWS Shield Advanced Protection to more AWS Resources.

" }, "CreateSubscription":{ "name":"CreateSubscription", @@ -43,7 +82,7 @@ {"shape":"InternalErrorException"}, {"shape":"ResourceAlreadyExistsException"} ], - "documentation":"

Activates AWS Shield Advanced for an account.

" + "documentation":"

Activates AWS Shield Advanced for an account.

As part of this request you can specify EmergencySettings that automaticaly grant the DDoS response team (DRT) needed permissions to assist you during a suspected DDoS attack. For more information see Authorize the DDoS Response Team to Create Rules and Web ACLs on Your Behalf.

When you initally create a subscription, your subscription is set to be automatically renewed at the end of the existing subscription period. You can change this by submitting an UpdateSubscription request.

" }, "DeleteProtection":{ "name":"DeleteProtection", @@ -73,7 +112,8 @@ {"shape":"LockedSubscriptionException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Removes AWS Shield Advanced from an account. AWS Shield Advanced requires a 1-year subscription commitment. You cannot delete a subscription prior to the completion of that commitment.

" + "documentation":"

Removes AWS Shield Advanced from an account. AWS Shield Advanced requires a 1-year subscription commitment. You cannot delete a subscription prior to the completion of that commitment.

", + "deprecated":true }, "DescribeAttack":{ "name":"DescribeAttack", @@ -89,6 +129,34 @@ ], "documentation":"

Describes the details of a DDoS attack.

" }, + "DescribeDRTAccess":{ + "name":"DescribeDRTAccess", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDRTAccessRequest"}, + "output":{"shape":"DescribeDRTAccessResponse"}, + "errors":[ + {"shape":"InternalErrorException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Returns the current role and list of Amazon S3 log buckets used by the DDoS Response team (DRT) to access your AWS account while assisting with attack mitigation.

" + }, + "DescribeEmergencyContactSettings":{ + "name":"DescribeEmergencyContactSettings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEmergencyContactSettingsRequest"}, + "output":{"shape":"DescribeEmergencyContactSettingsResponse"}, + "errors":[ + {"shape":"InternalErrorException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Lists the email addresses that the DRT can use to contact you during a suspected attack.

" + }, "DescribeProtection":{ "name":"DescribeProtection", "http":{ @@ -117,6 +185,40 @@ ], "documentation":"

Provides details about the AWS Shield Advanced subscription for an account.

" }, + "DisassociateDRTLogBucket":{ + "name":"DisassociateDRTLogBucket", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateDRTLogBucketRequest"}, + "output":{"shape":"DisassociateDRTLogBucketResponse"}, + "errors":[ + {"shape":"InternalErrorException"}, + {"shape":"InvalidOperationException"}, + {"shape":"NoAssociatedRoleException"}, + {"shape":"AccessDeniedForDependencyException"}, + {"shape":"OptimisticLockException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Removes the DDoS Response team's (DRT) access to the specified Amazon S3 bucket containing your flow logs.

To make a DisassociateDRTLogBucket request, you must be subscribed to the Business Support plan or the Enterprise Support plan. However, if you are not subscribed to one of these support plans, but had been previously and had granted the DRT access to your account, you can submit a DisassociateDRTLogBucket request to remove this access.

" + }, + "DisassociateDRTRole":{ + "name":"DisassociateDRTRole", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateDRTRoleRequest"}, + "output":{"shape":"DisassociateDRTRoleResponse"}, + "errors":[ + {"shape":"InternalErrorException"}, + {"shape":"InvalidOperationException"}, + {"shape":"OptimisticLockException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Removes the DDoS Response team's (DRT) access to your AWS account.

To make a DisassociateDRTRole request, you must be subscribed to the Business Support plan or the Enterprise Support plan. However, if you are not subscribed to one of these support plans, but had been previously and had granted the DRT access to your account, you can submit a DisassociateDRTRole request to remove this access.

" + }, "GetSubscriptionState":{ "name":"GetSubscriptionState", "http":{ @@ -155,12 +257,84 @@ "output":{"shape":"ListProtectionsResponse"}, "errors":[ {"shape":"InternalErrorException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidPaginationTokenException"} ], "documentation":"

Lists all Protection objects for the account.

" + }, + "UpdateEmergencyContactSettings":{ + "name":"UpdateEmergencyContactSettings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateEmergencyContactSettingsRequest"}, + "output":{"shape":"UpdateEmergencyContactSettingsResponse"}, + "errors":[ + {"shape":"InternalErrorException"}, + {"shape":"InvalidParameterException"}, + {"shape":"OptimisticLockException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Updates the details of the list of email addresses that the DRT can use to contact you during a suspected attack.

" + }, + "UpdateSubscription":{ + "name":"UpdateSubscription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateSubscriptionRequest"}, + "output":{"shape":"UpdateSubscriptionResponse"}, + "errors":[ + {"shape":"InternalErrorException"}, + {"shape":"LockedSubscriptionException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"OptimisticLockException"} + ], + "documentation":"

Updates the details of an existing subscription. Only enter values for parameters you want to change. Empty parameters are not updated.

" } }, "shapes":{ + "AccessDeniedForDependencyException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "documentation":"

In order to grant the necessary access to the DDoS Response Team, the user submitting AssociateDRTRole must have the iam:PassRole permission. This error indicates the user did not have the appropriate permissions. For more information, see Granting a User Permissions to Pass a Role to an AWS Service.

", + "exception":true + }, + "AssociateDRTLogBucketRequest":{ + "type":"structure", + "required":["LogBucket"], + "members":{ + "LogBucket":{ + "shape":"LogBucket", + "documentation":"

The Amazon S3 bucket that contains your flow logs.

" + } + } + }, + "AssociateDRTLogBucketResponse":{ + "type":"structure", + "members":{ + } + }, + "AssociateDRTRoleRequest":{ + "type":"structure", + "required":["RoleArn"], + "members":{ + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the role the DRT will use to access your AWS account.

Prior to making the AssociateDRTRole request, you must attach the AWSShieldDRTAccessPolicy managed policy to this role. For more information see Attaching and Detaching IAM Policies.

" + } + } + }, + "AssociateDRTRoleResponse":{ + "type":"structure", + "members":{ + } + }, "AttackDetail":{ "type":"structure", "members":{ @@ -299,6 +473,13 @@ "type":"list", "member":{"shape":"AttackVectorDescription"} }, + "AutoRenew":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "Contributor":{ "type":"structure", "members":{ @@ -367,12 +548,14 @@ "DeleteSubscriptionRequest":{ "type":"structure", "members":{ - } + }, + "deprecated":true }, "DeleteSubscriptionResponse":{ "type":"structure", "members":{ - } + }, + "deprecated":true }, "DescribeAttackRequest":{ "type":"structure", @@ -393,6 +576,38 @@ } } }, + "DescribeDRTAccessRequest":{ + "type":"structure", + "members":{ + } + }, + "DescribeDRTAccessResponse":{ + "type":"structure", + "members":{ + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the role the DRT used to access your AWS account.

" + }, + "LogBucketList":{ + "shape":"LogBucketList", + "documentation":"

The list of Amazon S3 buckets accessed by the DRT.

" + } + } + }, + "DescribeEmergencyContactSettingsRequest":{ + "type":"structure", + "members":{ + } + }, + "DescribeEmergencyContactSettingsResponse":{ + "type":"structure", + "members":{ + "EmergencyContactList":{ + "shape":"EmergencyContactList", + "documentation":"

A list of email addresses that the DRT can use to contact you during a suspected attack.

" + } + } + }, "DescribeProtectionRequest":{ "type":"structure", "required":["ProtectionId"], @@ -426,11 +641,57 @@ } } }, + "DisassociateDRTLogBucketRequest":{ + "type":"structure", + "required":["LogBucket"], + "members":{ + "LogBucket":{ + "shape":"LogBucket", + "documentation":"

The Amazon S3 bucket that contains your flow logs.

" + } + } + }, + "DisassociateDRTLogBucketResponse":{ + "type":"structure", + "members":{ + } + }, + "DisassociateDRTRoleRequest":{ + "type":"structure", + "members":{ + } + }, + "DisassociateDRTRoleResponse":{ + "type":"structure", + "members":{ + } + }, "Double":{"type":"double"}, "DurationInSeconds":{ "type":"long", "min":0 }, + "EmailAddress":{ + "type":"string", + "pattern":"^\\S+@\\S+\\.\\S+$" + }, + "EmergencyContact":{ + "type":"structure", + "required":["EmailAddress"], + "members":{ + "EmailAddress":{ + "shape":"EmailAddress", + "documentation":"

An email address that the DRT can use to contact you during a suspected attack.

" + } + }, + "documentation":"

Contact information that the DRT can use to contact you during a suspected attack.

" + }, + "EmergencyContactList":{ + "type":"list", + "member":{"shape":"EmergencyContact"}, + "max":10, + "min":0 + }, "GetSubscriptionStateRequest":{ "type":"structure", "members":{ @@ -464,6 +725,14 @@ "documentation":"

Exception that indicates that the operation would not cause any change to occur.

", "exception":true }, + "InvalidPaginationTokenException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "documentation":"

Exception that indicates that the NextToken specified in the request is invalid. Submit the request using the NextToken value that was returned in the response.

", + "exception":true + }, "InvalidParameterException":{ "type":"structure", "members":{ @@ -480,8 +749,26 @@ "documentation":"

Exception that indicates that the resource is invalid. You might not have access to the resource, or the resource might not exist.

", "exception":true }, + "Limit":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"String", + "documentation":"

The type of protection.

" + }, + "Max":{ + "shape":"Long", + "documentation":"

The maximum number of protections that can be created for the specified Type.

" + } + }, + "documentation":"

Specifies how many protections of a given type you can create.

" + }, "LimitNumber":{"type":"long"}, "LimitType":{"type":"string"}, + "Limits":{ + "type":"list", + "member":{"shape":"Limit"} + }, "LimitsExceededException":{ "type":"structure", "members":{ @@ -561,9 +848,21 @@ "members":{ "message":{"shape":"errorMessage"} }, - "documentation":"

Exception that indicates that the subscription you are trying to delete has not yet completed the 1-year commitment. You cannot delete this subscription.

", + "documentation":"

You are trying to update a subscription that has not yet completed the 1-year commitment. You can change the AutoRenew parameter during the last 30 days of your subscription. This exception indicates that you are attempting to change AutoRenew prior to that period.

", "exception":true }, + "LogBucket":{ + "type":"string", + "max":63, + "min":3, + "pattern":"^([a-z]|(\\d(?!\\d{0,2}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})))([a-z\\d]|(\\.(?!(\\.|-)))|(-(?!\\.))){1,61}[a-z\\d]$" + }, + "LogBucketList":{ + "type":"list", + "member":{"shape":"LogBucket"}, + "max":10, + "min":0 + }, "Long":{"type":"long"}, "MaxResults":{ "type":"integer", @@ -585,6 +884,14 @@ "type":"list", "member":{"shape":"Mitigation"} }, + "NoAssociatedRoleException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "documentation":"

The ARN of the role that you specifed does not exist.

", + "exception":true + }, "OptimisticLockException":{ "type":"structure", "members":{ @@ -651,6 +958,11 @@ "documentation":"

Exception indicating the specified resource does not exist.

", "exception":true }, + "RoleArn":{ + "type":"string", + "max":96, + "pattern":"^arn:aws:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+" + }, "String":{"type":"string"}, "SubResourceSummary":{ "type":"structure", @@ -692,9 +1004,21 @@ "shape":"Timestamp", "documentation":"

The start time of the subscription, in Unix time in seconds. For more information see timestamp.

" }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

The date and time your subscription will end.

" + }, "TimeCommitmentInSeconds":{ "shape":"DurationInSeconds", "documentation":"

The length, in seconds, of the AWS Shield Advanced subscription for the account.

" + }, + "AutoRenew":{ + "shape":"AutoRenew", + "documentation":"

If ENABLED, the subscription will be automatically renewed at the end of the existing subscription period.

When you initally create a subscription, AutoRenew is set to ENABLED. You can change this by submitting an UpdateSubscription request. If the UpdateSubscription request does not included a value for AutoRenew, the existing value for AutoRenew remains unchanged.

" + }, + "Limits":{ + "shape":"Limits", + "documentation":"

Specifies how many protections of a given type you can create.

" } }, "documentation":"

Information about the AWS Shield Advanced subscription for an account.

" @@ -791,6 +1115,34 @@ "REQUESTS" ] }, + "UpdateEmergencyContactSettingsRequest":{ + "type":"structure", + "members":{ + "EmergencyContactList":{ + "shape":"EmergencyContactList", + "documentation":"

A list of email addresses that the DRT can use to contact you during a suspected attack.

" + } + } + }, + "UpdateEmergencyContactSettingsResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateSubscriptionRequest":{ + "type":"structure", + "members":{ + "AutoRenew":{ + "shape":"AutoRenew", + "documentation":"

When you initally create a subscription, AutoRenew is set to ENABLED. If ENABLED, the subscription will be automatically renewed at the end of the existing subscription period. You can change this by submitting an UpdateSubscription request. If the UpdateSubscription request does not included a value for AutoRenew, the existing value for AutoRenew remains unchanged.

" + } + } + }, + "UpdateSubscriptionResponse":{ + "type":"structure", + "members":{ + } + }, "errorMessage":{"type":"string"} }, "documentation":"AWS Shield Advanced

This is the AWS Shield Advanced API Reference. This guide is for developers who need detailed information about the AWS Shield Advanced API actions, data types, and errors. For detailed information about AWS WAF and AWS Shield Advanced features and an overview of how to use the AWS WAF and AWS Shield Advanced APIs, see the AWS WAF and AWS Shield Developer Guide.

" diff --git a/botocore/data/sms/2016-10-24/service-2.json b/botocore/data/sms/2016-10-24/service-2.json index fd6a5901..5de6fb15 100644 --- a/botocore/data/sms/2016-10-24/service-2.json +++ b/botocore/data/sms/2016-10-24/service-2.json @@ -8,6 +8,7 @@ "protocol":"json", "serviceAbbreviation":"SMS", "serviceFullName":"AWS Server Migration Service", + "serviceId":"SMS", "signatureVersion":"v4", "targetPrefix":"AWSServerMigrationService_V2016_10_24" }, diff --git a/botocore/data/snowball/2016-06-30/service-2.json b/botocore/data/snowball/2016-06-30/service-2.json index 50ee13be..5257b757 100644 --- a/botocore/data/snowball/2016-06-30/service-2.json +++ b/botocore/data/snowball/2016-06-30/service-2.json @@ -7,6 +7,7 @@ "protocol":"json", "serviceAbbreviation":"Amazon Snowball", "serviceFullName":"Amazon Import/Export Snowball", + "serviceId":"Snowball", "signatureVersion":"v4", "targetPrefix":"AWSIESnowballJobManagementService", "uid":"snowball-2016-06-30" diff --git a/botocore/data/sns/2010-03-31/service-2.json b/botocore/data/sns/2010-03-31/service-2.json index 129d5720..5fb59881 100644 --- a/botocore/data/sns/2010-03-31/service-2.json +++ b/botocore/data/sns/2010-03-31/service-2.json @@ -1,13 +1,14 @@ { "version":"2.0", "metadata":{ - "uid":"sns-2010-03-31", "apiVersion":"2010-03-31", "endpointPrefix":"sns", "protocol":"query", "serviceAbbreviation":"Amazon SNS", "serviceFullName":"Amazon Simple Notification Service", + "serviceId":"SNS", "signatureVersion":"v4", + "uid":"sns-2010-03-31", "xmlNamespace":"http://sns.amazonaws.com/doc/2010-03-31/" }, "operations":{ @@ -276,7 +277,7 @@ {"shape":"AuthorizationErrorException"}, {"shape":"NotFoundException"} ], - "documentation":"

Lists the endpoints and endpoint attributes for devices in a supported push notification service, such as GCM and APNS. The results for ListEndpointsByPlatformApplication are paginated and return a limited list of endpoints, up to 100. If additional records are available after the first page results, then a NextToken string will be returned. To receive the next page, you call ListEndpointsByPlatformApplication again using the NextToken string received from the previous call. When there are no more records to return, NextToken will be null. For more information, see Using Amazon SNS Mobile Push Notifications.

" + "documentation":"

Lists the endpoints and endpoint attributes for devices in a supported push notification service, such as GCM and APNS. The results for ListEndpointsByPlatformApplication are paginated and return a limited list of endpoints, up to 100. If additional records are available after the first page results, then a NextToken string will be returned. To receive the next page, you call ListEndpointsByPlatformApplication again using the NextToken string received from the previous call. When there are no more records to return, NextToken will be null. For more information, see Using Amazon SNS Mobile Push Notifications.

This action is throttled at 30 transactions per second (TPS).

" }, "ListPhoneNumbersOptedOut":{ "name":"ListPhoneNumbersOptedOut", @@ -313,7 +314,7 @@ {"shape":"InternalErrorException"}, {"shape":"AuthorizationErrorException"} ], - "documentation":"

Lists the platform application objects for the supported push notification services, such as APNS and GCM. The results for ListPlatformApplications are paginated and return a limited list of applications, up to 100. If additional records are available after the first page results, then a NextToken string will be returned. To receive the next page, you call ListPlatformApplications using the NextToken string received from the previous call. When there are no more records to return, NextToken will be null. For more information, see Using Amazon SNS Mobile Push Notifications.

" + "documentation":"

Lists the platform application objects for the supported push notification services, such as APNS and GCM. The results for ListPlatformApplications are paginated and return a limited list of applications, up to 100. If additional records are available after the first page results, then a NextToken string will be returned. To receive the next page, you call ListPlatformApplications using the NextToken string received from the previous call. When there are no more records to return, NextToken will be null. For more information, see Using Amazon SNS Mobile Push Notifications.

This action is throttled at 15 transactions per second (TPS).

" }, "ListSubscriptions":{ "name":"ListSubscriptions", @@ -331,7 +332,7 @@ {"shape":"InternalErrorException"}, {"shape":"AuthorizationErrorException"} ], - "documentation":"

Returns a list of the requester's subscriptions. Each call returns a limited list of subscriptions, up to 100. If there are more subscriptions, a NextToken is also returned. Use the NextToken parameter in a new ListSubscriptions call to get further results.

" + "documentation":"

Returns a list of the requester's subscriptions. Each call returns a limited list of subscriptions, up to 100. If there are more subscriptions, a NextToken is also returned. Use the NextToken parameter in a new ListSubscriptions call to get further results.

This action is throttled at 30 transactions per second (TPS).

" }, "ListSubscriptionsByTopic":{ "name":"ListSubscriptionsByTopic", @@ -350,7 +351,7 @@ {"shape":"NotFoundException"}, {"shape":"AuthorizationErrorException"} ], - "documentation":"

Returns a list of the subscriptions to a specific topic. Each call returns a limited list of subscriptions, up to 100. If there are more subscriptions, a NextToken is also returned. Use the NextToken parameter in a new ListSubscriptionsByTopic call to get further results.

" + "documentation":"

Returns a list of the subscriptions to a specific topic. Each call returns a limited list of subscriptions, up to 100. If there are more subscriptions, a NextToken is also returned. Use the NextToken parameter in a new ListSubscriptionsByTopic call to get further results.

This action is throttled at 30 transactions per second (TPS).

" }, "ListTopics":{ "name":"ListTopics", @@ -368,7 +369,7 @@ {"shape":"InternalErrorException"}, {"shape":"AuthorizationErrorException"} ], - "documentation":"

Returns a list of the requester's topics. Each call returns a limited list of topics, up to 100. If there are more topics, a NextToken is also returned. Use the NextToken parameter in a new ListTopics call to get further results.

" + "documentation":"

Returns a list of the requester's topics. Each call returns a limited list of topics, up to 100. If there are more topics, a NextToken is also returned. Use the NextToken parameter in a new ListTopics call to get further results.

This action is throttled at 30 transactions per second (TPS).

" }, "OptInPhoneNumber":{ "name":"OptInPhoneNumber", @@ -409,7 +410,7 @@ {"shape":"PlatformApplicationDisabledException"}, {"shape":"AuthorizationErrorException"} ], - "documentation":"

Sends a message to all of a topic's subscribed endpoints. When a messageId is returned, the message has been saved and Amazon SNS will attempt to deliver it to the topic's subscribers shortly. The format of the outgoing message to each subscribed endpoint depends on the notification protocol.

To use the Publish action for sending a message to a mobile endpoint, such as an app on a Kindle device or mobile phone, you must specify the EndpointArn for the TargetArn parameter. The EndpointArn is returned when making a call with the CreatePlatformEndpoint action.

For more information about formatting messages, see Send Custom Platform-Specific Payloads in Messages to Mobile Devices.

" + "documentation":"

Sends a message to an Amazon SNS topic or sends a text message (SMS message) directly to a phone number.

If you send a message to a topic, Amazon SNS delivers the message to each endpoint that is subscribed to the topic. The format of the message depends on the notification protocol for each subscribed endpoint.

When a messageId is returned, the message has been saved and Amazon SNS will attempt to deliver it shortly.

To use the Publish action for sending a message to a mobile endpoint, such as an app on a Kindle device or mobile phone, you must specify the EndpointArn for the TargetArn parameter. The EndpointArn is returned when making a call with the CreatePlatformEndpoint action.

For more information about formatting messages, see Send Custom Platform-Specific Payloads in Messages to Mobile Devices.

" }, "RemovePermission":{ "name":"RemovePermission", @@ -484,11 +485,12 @@ "input":{"shape":"SetSubscriptionAttributesInput"}, "errors":[ {"shape":"InvalidParameterException"}, + {"shape":"FilterPolicyLimitExceededException"}, {"shape":"InternalErrorException"}, {"shape":"NotFoundException"}, {"shape":"AuthorizationErrorException"} ], - "documentation":"

Allows a subscription owner to set an attribute of the topic to a new value.

" + "documentation":"

Allows a subscription owner to set an attribute of the subscription to a new value.

" }, "SetTopicAttributes":{ "name":"SetTopicAttributes", @@ -518,12 +520,13 @@ }, "errors":[ {"shape":"SubscriptionLimitExceededException"}, + {"shape":"FilterPolicyLimitExceededException"}, {"shape":"InvalidParameterException"}, {"shape":"InternalErrorException"}, {"shape":"NotFoundException"}, {"shape":"AuthorizationErrorException"} ], - "documentation":"

Prepares to subscribe an endpoint by sending the endpoint a confirmation message. To actually create a subscription, the endpoint owner must call the ConfirmSubscription action with the token from the confirmation message. Confirmation tokens are valid for three days.

" + "documentation":"

Prepares to subscribe an endpoint by sending the endpoint a confirmation message. To actually create a subscription, the endpoint owner must call the ConfirmSubscription action with the token from the confirmation message. Confirmation tokens are valid for three days.

This action is throttled at 100 transactions per second (TPS).

" }, "Unsubscribe":{ "name":"Unsubscribe", @@ -538,7 +541,7 @@ {"shape":"AuthorizationErrorException"}, {"shape":"NotFoundException"} ], - "documentation":"

Deletes a subscription. If the subscription requires authentication for deletion, only the owner of the subscription or the topic's owner can unsubscribe, and an AWS signature is required. If the Unsubscribe call does not require authentication and the requester is not the subscription owner, a final cancellation message is delivered to the endpoint, so that the endpoint owner can easily resubscribe to the topic if the Unsubscribe request was unintended.

" + "documentation":"

Deletes a subscription. If the subscription requires authentication for deletion, only the owner of the subscription or the topic's owner can unsubscribe, and an AWS signature is required. If the Unsubscribe call does not require authentication and the requester is not the subscription owner, a final cancellation message is delivered to the endpoint, so that the endpoint owner can easily resubscribe to the topic if the Unsubscribe request was unintended.

This action is throttled at 100 transactions per second (TPS).

" } }, "shapes":{ @@ -796,6 +799,19 @@ }, "exception":true }, + "FilterPolicyLimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"string"} + }, + "documentation":"

Indicates that the number of filter polices in your AWS account exceeds the limit. To add more filter polices, submit an SNS Limit Increase case in the AWS Support Center.

", + "error":{ + "code":"FilterPolicyLimitExceeded", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, "GetEndpointAttributesInput":{ "type":"structure", "required":["EndpointArn"], @@ -874,7 +890,7 @@ "members":{ "Attributes":{ "shape":"SubscriptionAttributesMap", - "documentation":"

A map of the subscription's attributes. Attributes in this map include the following:

  • SubscriptionArn -- the subscription's ARN

  • TopicArn -- the topic ARN that the subscription is associated with

  • Owner -- the AWS account ID of the subscription's owner

  • ConfirmationWasAuthenticated -- true if the subscription confirmation request was authenticated

  • DeliveryPolicy -- the JSON serialization of the subscription's delivery policy

  • EffectiveDeliveryPolicy -- the JSON serialization of the effective delivery policy that takes into account the topic delivery policy and account system defaults

" + "documentation":"

A map of the subscription's attributes. Attributes in this map include the following:

  • ConfirmationWasAuthenticated -- true if the subscription confirmation request was authenticated.

  • DeliveryPolicy -- The JSON serialization of the subscription's delivery policy.

  • EffectiveDeliveryPolicy -- The JSON serialization of the effective delivery policy that takes into account the topic delivery policy and account system defaults.

  • FilterPolicy -- The filter policy JSON that is assigned to the subscription.

  • Owner -- The AWS account ID of the subscription's owner.

  • PendingConfirmation -- true if the subscription hasn't been confirmed. To confirm a pending subscription, call the ConfirmSubscription action with a confirmation token.

  • RawMessageDelivery -- true if raw message delivery is enabled for the subscription. Raw messages are free of JSON formatting and can be sent to HTTP/S and Amazon SQS endpoints.

  • SubscriptionArn -- The subscription's ARN.

  • TopicArn -- The topic ARN that the subscription is associated with.

" } }, "documentation":"

Response for GetSubscriptionAttributes action.

" @@ -1129,7 +1145,7 @@ "members":{ "DataType":{ "shape":"String", - "documentation":"

Amazon SNS supports the following logical data types: String, Number, and Binary. For more information, see Message Attribute Data Types.

" + "documentation":"

Amazon SNS supports the following logical data types: String, String.Array, Number, and Binary. For more information, see Message Attribute Data Types.

" }, "StringValue":{ "shape":"String", @@ -1225,7 +1241,7 @@ }, "Message":{ "shape":"message", - "documentation":"

The message you want to send to the topic.

If you want to send the same message to all transport protocols, include the text of the message as a String value.

If you want to send different messages for each transport protocol, set the value of the MessageStructure parameter to json and use a JSON object for the Message parameter.

Constraints: Messages must be UTF-8 encoded strings at most 256 KB in size (262144 bytes, not 262144 characters).

JSON-specific constraints:

  • Keys in the JSON object that correspond to supported transport protocols must have simple JSON string values.

  • The values will be parsed (unescaped) before they are used in outgoing messages.

  • Outbound notifications are JSON encoded (meaning that the characters will be reescaped for sending).

  • Values have a minimum length of 0 (the empty string, \"\", is allowed).

  • Values have a maximum length bounded by the overall message size (so, including multiple protocols may limit message sizes).

  • Non-string values will cause the key to be ignored.

  • Keys that do not correspond to supported transport protocols are ignored.

  • Duplicate keys are not allowed.

  • Failure to parse or validate any key or value in the message will cause the Publish call to return an error (no partial delivery).

" + "documentation":"

The message you want to send.

If you are publishing to a topic and you want to send the same message to all transport protocols, include the text of the message as a String value. If you want to send different messages for each transport protocol, set the value of the MessageStructure parameter to json and use a JSON object for the Message parameter.

Constraints:

  • With the exception of SMS, messages must be UTF-8 encoded strings and at most 256 KB in size (262144 bytes, not 262144 characters).

  • For SMS, each message can contain up to 140 bytes, and the character limit depends on the encoding scheme. For example, an SMS message can contain 160 GSM characters, 140 ASCII characters, or 70 UCS-2 characters. If you publish a message that exceeds the size limit, Amazon SNS sends it as multiple messages, each fitting within the size limit. Messages are not cut off in the middle of a word but on whole-word boundaries. The total size limit for a single SMS publish action is 1600 bytes.

JSON-specific constraints:

  • Keys in the JSON object that correspond to supported transport protocols must have simple JSON string values.

  • The values will be parsed (unescaped) before they are used in outgoing messages.

  • Outbound notifications are JSON encoded (meaning that the characters will be reescaped for sending).

  • Values have a minimum length of 0 (the empty string, \"\", is allowed).

  • Values have a maximum length bounded by the overall message size (so, including multiple protocols may limit message sizes).

  • Non-string values will cause the key to be ignored.

  • Keys that do not correspond to supported transport protocols are ignored.

  • Duplicate keys are not allowed.

  • Failure to parse or validate any key or value in the message will cause the Publish call to return an error (no partial delivery).

" }, "Subject":{ "shape":"subject", @@ -1312,7 +1328,7 @@ "members":{ "attributes":{ "shape":"MapStringToString", - "documentation":"

The default settings for sending SMS messages from your account. You can set values for the following attribute names:

MonthlySpendLimit – The maximum amount in USD that you are willing to spend each month to send SMS messages. When Amazon SNS determines that sending an SMS message would incur a cost that exceeds this limit, it stops sending SMS messages within minutes.

Amazon SNS stops sending SMS messages within minutes of the limit being crossed. During that interval, if you continue to send SMS messages, you will incur costs that exceed your limit.

By default, the spend limit is set to the maximum allowed by Amazon SNS. If you want to exceed the maximum, contact AWS Support or your AWS sales representative for a service limit increase.

DeliveryStatusIAMRole – The ARN of the IAM role that allows Amazon SNS to write logs about SMS deliveries in CloudWatch Logs. For each SMS message that you send, Amazon SNS writes a log that includes the message price, the success or failure status, the reason for failure (if the message failed), the message dwell time, and other information.

DeliveryStatusSuccessSamplingRate – The percentage of successful SMS deliveries for which Amazon SNS will write logs in CloudWatch Logs. The value can be an integer from 0 - 100. For example, to write logs only for failed deliveries, set this value to 0. To write logs for 10% of your successful deliveries, set it to 10.

DefaultSenderID – A string, such as your business brand, that is displayed as the sender on the receiving device. Support for sender IDs varies by country. The sender ID can be 1 - 11 alphanumeric characters, and it must contain at least one letter.

DefaultSMSType – The type of SMS message that you will send by default. You can assign the following values:

  • Promotional – (Default) Noncritical messages, such as marketing messages. Amazon SNS optimizes the message delivery to incur the lowest cost.

  • Transactional – Critical messages that support customer transactions, such as one-time passcodes for multi-factor authentication. Amazon SNS optimizes the message delivery to achieve the highest reliability.

UsageReportS3Bucket – The name of the Amazon S3 bucket to receive daily SMS usage reports from Amazon SNS. Each day, Amazon SNS will deliver a usage report as a CSV file to the bucket. The report includes the following information for each SMS message that was successfully delivered by your account:

  • Time that the message was published (in UTC)

  • Message ID

  • Destination phone number

  • Message type

  • Delivery status

  • Message price (in USD)

  • Part number (a message is split into multiple parts if it is too long for a single message)

  • Total number of parts

To receive the report, the bucket must have a policy that allows the Amazon SNS service principle to perform the s3:PutObject and s3:GetBucketLocation actions.

For an example bucket policy and usage report, see Monitoring SMS Activity in the Amazon SNS Developer Guide.

" + "documentation":"

The default settings for sending SMS messages from your account. You can set values for the following attribute names:

MonthlySpendLimit – The maximum amount in USD that you are willing to spend each month to send SMS messages. When Amazon SNS determines that sending an SMS message would incur a cost that exceeds this limit, it stops sending SMS messages within minutes.

Amazon SNS stops sending SMS messages within minutes of the limit being crossed. During that interval, if you continue to send SMS messages, you will incur costs that exceed your limit.

By default, the spend limit is set to the maximum allowed by Amazon SNS. If you want to raise the limit, submit an SNS Limit Increase case. For New limit value, enter your desired monthly spend limit. In the Use Case Description field, explain that you are requesting an SMS monthly spend limit increase.

DeliveryStatusIAMRole – The ARN of the IAM role that allows Amazon SNS to write logs about SMS deliveries in CloudWatch Logs. For each SMS message that you send, Amazon SNS writes a log that includes the message price, the success or failure status, the reason for failure (if the message failed), the message dwell time, and other information.

DeliveryStatusSuccessSamplingRate – The percentage of successful SMS deliveries for which Amazon SNS will write logs in CloudWatch Logs. The value can be an integer from 0 - 100. For example, to write logs only for failed deliveries, set this value to 0. To write logs for 10% of your successful deliveries, set it to 10.

DefaultSenderID – A string, such as your business brand, that is displayed as the sender on the receiving device. Support for sender IDs varies by country. The sender ID can be 1 - 11 alphanumeric characters, and it must contain at least one letter.

DefaultSMSType – The type of SMS message that you will send by default. You can assign the following values:

  • Promotional – (Default) Noncritical messages, such as marketing messages. Amazon SNS optimizes the message delivery to incur the lowest cost.

  • Transactional – Critical messages that support customer transactions, such as one-time passcodes for multi-factor authentication. Amazon SNS optimizes the message delivery to achieve the highest reliability.

UsageReportS3Bucket – The name of the Amazon S3 bucket to receive daily SMS usage reports from Amazon SNS. Each day, Amazon SNS will deliver a usage report as a CSV file to the bucket. The report includes the following information for each SMS message that was successfully delivered by your account:

  • Time that the message was published (in UTC)

  • Message ID

  • Destination phone number

  • Message type

  • Delivery status

  • Message price (in USD)

  • Part number (a message is split into multiple parts if it is too long for a single message)

  • Total number of parts

To receive the report, the bucket must have a policy that allows the Amazon SNS service principle to perform the s3:PutObject and s3:GetBucketLocation actions.

For an example bucket policy and usage report, see Monitoring SMS Activity in the Amazon SNS Developer Guide.

" } }, "documentation":"

The input for the SetSMSAttributes action.

" @@ -1336,7 +1352,7 @@ }, "AttributeName":{ "shape":"attributeName", - "documentation":"

The name of the attribute you want to set. Only a subset of the subscriptions attributes are mutable.

Valid values: DeliveryPolicy | RawMessageDelivery

" + "documentation":"

The name of the attribute you want to set. Only a subset of the subscriptions attributes are mutable.

Valid values: DeliveryPolicy | FilterPolicy | RawMessageDelivery

" }, "AttributeValue":{ "shape":"attributeValue", @@ -1386,6 +1402,14 @@ "Endpoint":{ "shape":"endpoint", "documentation":"

The endpoint that you want to receive notifications. Endpoints vary by protocol:

  • For the http protocol, the endpoint is an URL beginning with \"http://\"

  • For the https protocol, the endpoint is a URL beginning with \"https://\"

  • For the email protocol, the endpoint is an email address

  • For the email-json protocol, the endpoint is an email address

  • For the sms protocol, the endpoint is a phone number of an SMS-enabled device

  • For the sqs protocol, the endpoint is the ARN of an Amazon SQS queue

  • For the application protocol, the endpoint is the EndpointArn of a mobile app and device.

  • For the lambda protocol, the endpoint is the ARN of an AWS Lambda function.

" + }, + "Attributes":{ + "shape":"SubscriptionAttributesMap", + "documentation":"

Assigns attributes to the subscription as a map of key-value pairs. You can assign any attribute that is supported by the SetSubscriptionAttributes action.

" + }, + "ReturnSubscriptionArn":{ + "shape":"boolean", + "documentation":"

Sets whether the response from the Subscribe request includes the subscription ARN, even if the subscription is not yet confirmed.

If you set this parameter to false, the response includes the ARN for confirmed subscriptions, but it includes an ARN value of \"pending subscription\" for subscriptions that are not yet confirmed. A subscription becomes confirmed when the subscriber calls the ConfirmSubscription action with a confirmation token.

If you set this parameter to true, the response includes the ARN in all cases, even if the subscription is not yet confirmed.

The default value is false.

" } }, "documentation":"

Input for Subscribe action.

" @@ -1395,7 +1419,7 @@ "members":{ "SubscriptionArn":{ "shape":"subscriptionARN", - "documentation":"

The ARN of the subscription, if the service was able to create a subscription immediately (without requiring endpoint owner confirmation).

" + "documentation":"

The ARN of the subscription if it is confirmed, or the string \"pending confirmation\" if the subscription requires confirmation. However, if the API request parameter ReturnSubscriptionArn is true, then the value is always the subscription ARN, even if the subscription requires confirmation.

" } }, "documentation":"

Response for Subscribe action.

" diff --git a/botocore/data/sqs/2012-11-05/service-2.json b/botocore/data/sqs/2012-11-05/service-2.json index 35e6ad0b..2301a77a 100644 --- a/botocore/data/sqs/2012-11-05/service-2.json +++ b/botocore/data/sqs/2012-11-05/service-2.json @@ -6,6 +6,7 @@ "protocol":"query", "serviceAbbreviation":"Amazon SQS", "serviceFullName":"Amazon Simple Queue Service", + "serviceId":"SQS", "signatureVersion":"v4", "uid":"sqs-2012-11-05", "xmlNamespace":"http://queue.amazonaws.com/doc/2012-11-05/" diff --git a/botocore/data/ssm/2014-11-06/service-2.json b/botocore/data/ssm/2014-11-06/service-2.json index d1bc10f7..a05aea2a 100644 --- a/botocore/data/ssm/2014-11-06/service-2.json +++ b/botocore/data/ssm/2014-11-06/service-2.json @@ -167,7 +167,7 @@ {"shape":"ResourceDataSyncAlreadyExistsException"}, {"shape":"ResourceDataSyncInvalidConfigurationException"} ], - "documentation":"

Creates a resource data sync configuration to a single bucket in Amazon S3. This is an asynchronous operation that returns immediately. After a successful initial sync is completed, the system continuously syncs data to the Amazon S3 bucket. To check the status of the sync, use the ListResourceDataSync.

By default, data is not encrypted in Amazon S3. We strongly recommend that you enable encryption in Amazon S3 to ensure secure data storage. We also recommend that you secure access to the Amazon S3 bucket by creating a restrictive bucket policy. To view an example of a restrictive Amazon S3 bucket policy for Resource Data Sync, see Configuring Resource Data Sync for Inventory.

" + "documentation":"

Creates a resource data sync configuration to a single bucket in Amazon S3. This is an asynchronous operation that returns immediately. After a successful initial sync is completed, the system continuously syncs data to the Amazon S3 bucket. To check the status of the sync, use the ListResourceDataSync.

By default, data is not encrypted in Amazon S3. We strongly recommend that you enable encryption in Amazon S3 to ensure secure data storage. We also recommend that you secure access to the Amazon S3 bucket by creating a restrictive bucket policy. To view an example of a restrictive Amazon S3 bucket policy for Resource Data Sync, see Create a Resource Data Sync for Inventory in the AWS Systems Manager User Guide.

" }, "DeleteActivation":{ "name":"DeleteActivation", @@ -315,7 +315,7 @@ {"shape":"InvalidInstanceId"}, {"shape":"InternalServerError"} ], - "documentation":"

Removes the server or virtual machine from the list of registered servers. You can reregister the instance again at any time. If you don't plan to use Run Command on the server, we suggest uninstalling the SSM Agent first.

" + "documentation":"

Removes the server or virtual machine from the list of registered servers. You can reregister the instance again at any time. If you don't plan to use Run Command on the server, we suggest uninstalling SSM Agent first.

" }, "DeregisterPatchBaselineForPatchGroup":{ "name":"DeregisterPatchBaselineForPatchGroup", @@ -392,6 +392,37 @@ ], "documentation":"

Describes the association for the specified target or instance. If you created the association by using the Targets parameter, then you must retrieve the association by using the association ID. If you created the association by specifying an instance ID and a Systems Manager document, then you retrieve the association by specifying the document name and the instance ID.

" }, + "DescribeAssociationExecutionTargets":{ + "name":"DescribeAssociationExecutionTargets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAssociationExecutionTargetsRequest"}, + "output":{"shape":"DescribeAssociationExecutionTargetsResult"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"AssociationDoesNotExist"}, + {"shape":"InvalidNextToken"}, + {"shape":"AssociationExecutionDoesNotExist"} + ], + "documentation":"

Use this API action to view information about a specific execution of a specific association.

" + }, + "DescribeAssociationExecutions":{ + "name":"DescribeAssociationExecutions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAssociationExecutionsRequest"}, + "output":{"shape":"DescribeAssociationExecutionsResult"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"AssociationDoesNotExist"}, + {"shape":"InvalidNextToken"} + ], + "documentation":"

Use this API action to view all executions for a specific association ID.

" + }, "DescribeAutomationExecutions":{ "name":"DescribeAutomationExecutions", "http":{ @@ -529,7 +560,7 @@ {"shape":"InvalidInstanceInformationFilterValue"}, {"shape":"InvalidFilterKey"} ], - "documentation":"

Describes one or more of your instances. You can use this to get information about instances like the operating system platform, the SSM Agent version (Linux), status etc. If you specify one or more instance IDs, it returns information for those instances. If you do not specify instance IDs, it returns information for all your instances. If you specify an instance ID that is not valid or an instance that you do not own, you receive an error.

" + "documentation":"

Describes one or more of your instances. You can use this to get information about instances like the operating system platform, the SSM Agent version (Linux), status etc. If you specify one or more instance IDs, it returns information for those instances. If you do not specify instance IDs, it returns information for all your instances. If you specify an instance ID that is not valid or an instance that you do not own, you receive an error.

The IamRole field for this API action is the Amazon Identity and Access Management (IAM) role assigned to on-premises instances. This call does not return the IAM role for Amazon EC2 instances.

" }, "DescribeInstancePatchStates":{ "name":"DescribeInstancePatchStates", @@ -919,7 +950,7 @@ {"shape":"ParameterNotFound"}, {"shape":"ParameterVersionNotFound"} ], - "documentation":"

Get information about a parameter by using the parameter name.

" + "documentation":"

Get information about a parameter by using the parameter name. Don't confuse this API action with the GetParameters API action.

" }, "GetParameterHistory":{ "name":"GetParameterHistory", @@ -949,7 +980,7 @@ {"shape":"InvalidKeyId"}, {"shape":"InternalServerError"} ], - "documentation":"

Get details of a parameter.

" + "documentation":"

Get details of a parameter. Don't confuse this API action with the GetParameter API action.

" }, "GetParametersByPath":{ "name":"GetParametersByPath", @@ -967,7 +998,7 @@ {"shape":"InvalidKeyId"}, {"shape":"InvalidNextToken"} ], - "documentation":"

Retrieve parameters in a specific hierarchy. For more information, see Working with Systems Manager Parameters.

Request results are returned on a best-effort basis. If you specify MaxResults in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults. If the service reaches an internal limit while processing the results, it stops the operation and returns the matching values up to that point and a NextToken. You can specify the NextToken in a subsequent call to get the next set of results.

This API action doesn't support filtering by tags.

" + "documentation":"

Retrieve parameters in a specific hierarchy. For more information, see Working with Systems Manager Parameters in the AWS Systems Manager User Guide.

Request results are returned on a best-effort basis. If you specify MaxResults in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults. If the service reaches an internal limit while processing the results, it stops the operation and returns the matching values up to that point and a NextToken. You can specify the NextToken in a subsequent call to get the next set of results.

This API action doesn't support filtering by tags.

" }, "GetPatchBaseline":{ "name":"GetPatchBaseline", @@ -1386,6 +1417,20 @@ ], "documentation":"

Executes commands on one or more managed instances.

" }, + "StartAssociationsOnce":{ + "name":"StartAssociationsOnce", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartAssociationsOnceRequest"}, + "output":{"shape":"StartAssociationsOnceResult"}, + "errors":[ + {"shape":"InvalidAssociation"}, + {"shape":"AssociationDoesNotExist"} + ], + "documentation":"

Use this API action to execute an association immediately and only one time. This action can be helpful when troubleshooting associations.

" + }, "StartAutomationExecution":{ "name":"StartAutomationExecution", "http":{ @@ -1831,6 +1876,181 @@ "documentation":"

The specified association does not exist.

", "exception":true }, + "AssociationExecution":{ + "type":"structure", + "members":{ + "AssociationId":{ + "shape":"AssociationId", + "documentation":"

The association ID.

" + }, + "AssociationVersion":{ + "shape":"AssociationVersion", + "documentation":"

The association version.

" + }, + "ExecutionId":{ + "shape":"AssociationExecutionId", + "documentation":"

The execution ID for the association. If the association does not run at intervals or according to a schedule, then the ExecutionID is the same as the AssociationID.

" + }, + "Status":{ + "shape":"StatusName", + "documentation":"

The status of the association execution.

" + }, + "DetailedStatus":{ + "shape":"StatusName", + "documentation":"

Detailed status information about the execution.

" + }, + "CreatedTime":{ + "shape":"DateTime", + "documentation":"

The time the execution started.

" + }, + "LastExecutionDate":{ + "shape":"DateTime", + "documentation":"

The date of the last execution.

" + }, + "ResourceCountByStatus":{ + "shape":"ResourceCountByStatus", + "documentation":"

An aggregate status of the resources in the execution based on the status type.

" + } + }, + "documentation":"

Includes information about the specified association.

" + }, + "AssociationExecutionDoesNotExist":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The specified execution ID does not exist. Verify the ID number and try again.

", + "exception":true + }, + "AssociationExecutionFilter":{ + "type":"structure", + "required":[ + "Key", + "Value", + "Type" + ], + "members":{ + "Key":{ + "shape":"AssociationExecutionFilterKey", + "documentation":"

The key value used in the request.

" + }, + "Value":{ + "shape":"AssociationExecutionFilterValue", + "documentation":"

The value specified for the key.

" + }, + "Type":{ + "shape":"AssociationFilterOperatorType", + "documentation":"

The filter type specified in the request.

" + } + }, + "documentation":"

Filters used in the request.

" + }, + "AssociationExecutionFilterKey":{ + "type":"string", + "enum":[ + "ExecutionId", + "Status", + "CreatedTime" + ] + }, + "AssociationExecutionFilterList":{ + "type":"list", + "member":{"shape":"AssociationExecutionFilter"}, + "min":1 + }, + "AssociationExecutionFilterValue":{ + "type":"string", + "min":1 + }, + "AssociationExecutionId":{ + "type":"string", + "pattern":"[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}" + }, + "AssociationExecutionTarget":{ + "type":"structure", + "members":{ + "AssociationId":{ + "shape":"AssociationId", + "documentation":"

The association ID.

" + }, + "AssociationVersion":{ + "shape":"AssociationVersion", + "documentation":"

The association version.

" + }, + "ExecutionId":{ + "shape":"AssociationExecutionId", + "documentation":"

The execution ID. If the association does not run at intervals or according to a schedule, then the ExecutionID is the same as the AssociationID.

" + }, + "ResourceId":{ + "shape":"AssociationResourceId", + "documentation":"

The resource ID, for example, the instance ID where the association ran.

" + }, + "ResourceType":{ + "shape":"AssociationResourceType", + "documentation":"

The resource type, for example, instance.

" + }, + "Status":{ + "shape":"StatusName", + "documentation":"

The association execution status.

" + }, + "DetailedStatus":{ + "shape":"StatusName", + "documentation":"

Detailed information about the execution status.

" + }, + "LastExecutionDate":{ + "shape":"DateTime", + "documentation":"

The date of the last execution.

" + }, + "OutputSource":{ + "shape":"OutputSource", + "documentation":"

The location where the association details are saved.

" + } + }, + "documentation":"

Includes information about the specified association execution.

" + }, + "AssociationExecutionTargetsFilter":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"AssociationExecutionTargetsFilterKey", + "documentation":"

The key value used in the request.

" + }, + "Value":{ + "shape":"AssociationExecutionTargetsFilterValue", + "documentation":"

The value specified for the key.

" + } + }, + "documentation":"

Filters for the association execution.

" + }, + "AssociationExecutionTargetsFilterKey":{ + "type":"string", + "enum":[ + "Status", + "ResourceId", + "ResourceType" + ] + }, + "AssociationExecutionTargetsFilterList":{ + "type":"list", + "member":{"shape":"AssociationExecutionTargetsFilter"}, + "min":1 + }, + "AssociationExecutionTargetsFilterValue":{ + "type":"string", + "min":1 + }, + "AssociationExecutionTargetsList":{ + "type":"list", + "member":{"shape":"AssociationExecutionTarget"} + }, + "AssociationExecutionsList":{ + "type":"list", + "member":{"shape":"AssociationExecution"} + }, "AssociationFilter":{ "type":"structure", "required":[ @@ -1866,6 +2086,14 @@ "member":{"shape":"AssociationFilter"}, "min":1 }, + "AssociationFilterOperatorType":{ + "type":"string", + "enum":[ + "EQUAL", + "LESS_THAN", + "GREATER_THAN" + ] + }, "AssociationFilterValue":{ "type":"string", "min":1 @@ -1874,6 +2102,12 @@ "type":"string", "pattern":"[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}" }, + "AssociationIdList":{ + "type":"list", + "member":{"shape":"AssociationId"}, + "max":10, + "min":1 + }, "AssociationLimitExceeded":{ "type":"structure", "members":{ @@ -1907,6 +2141,16 @@ }, "documentation":"

Information about the association.

" }, + "AssociationResourceId":{ + "type":"string", + "max":100, + "min":1 + }, + "AssociationResourceType":{ + "type":"string", + "max":50, + "min":1 + }, "AssociationStatus":{ "type":"structure", "required":[ @@ -2381,6 +2625,27 @@ "max":64, "min":1 }, + "CloudWatchLogGroupName":{ + "type":"string", + "max":512, + "min":1, + "pattern":"[\\.\\-_/#A-Za-z0-9]+" + }, + "CloudWatchOutputConfig":{ + "type":"structure", + "members":{ + "CloudWatchLogGroupName":{ + "shape":"CloudWatchLogGroupName", + "documentation":"

The name of the CloudWatch log group where you want to send command output. If you don't specify a group name, Systems Manager automatically creates a log group for you. The log group uses the following naming format: aws/ssm/SystemsManagerDocumentName.

" + }, + "CloudWatchOutputEnabled":{ + "shape":"CloudWatchOutputEnabled", + "documentation":"

Enables Systems Manager to send command output to CloudWatch Logs.

" + } + }, + "documentation":"

Configuration options for sending command output to CloudWatch Logs.

" + }, + "CloudWatchOutputEnabled":{"type":"boolean"}, "Command":{ "type":"structure", "members":{ @@ -2426,7 +2691,7 @@ }, "StatusDetails":{ "shape":"StatusDetails", - "documentation":"

A detailed status of the command execution. StatusDetails includes more information than Status because it includes states resulting from error and concurrency control parameters. StatusDetails can show different results than Status. For more information about these statuses, see Run Command Status. StatusDetails can be one of the following values:

  • Pending: The command has not been sent to any instances.

  • In Progress: The command has been sent to at least one instance but has not reached a final state on all instances.

  • Success: The command successfully executed on all invocations. This is a terminal state.

  • Delivery Timed Out: The value of MaxErrors or more command invocations shows a status of Delivery Timed Out. This is a terminal state.

  • Execution Timed Out: The value of MaxErrors or more command invocations shows a status of Execution Timed Out. This is a terminal state.

  • Failed: The value of MaxErrors or more command invocations shows a status of Failed. This is a terminal state.

  • Incomplete: The command was attempted on all instances and one or more invocations does not have a value of Success but not enough invocations failed for the status to be Failed. This is a terminal state.

  • Canceled: The command was terminated before it was completed. This is a terminal state.

  • Rate Exceeded: The number of instances targeted by the command exceeded the account limit for pending invocations. The system has canceled the command before executing it on any instance. This is a terminal state.

" + "documentation":"

A detailed status of the command execution. StatusDetails includes more information than Status because it includes states resulting from error and concurrency control parameters. StatusDetails can show different results than Status. For more information about these statuses, see Understanding Command Statuses in the AWS Systems Manager User Guide. StatusDetails can be one of the following values:

  • Pending: The command has not been sent to any instances.

  • In Progress: The command has been sent to at least one instance but has not reached a final state on all instances.

  • Success: The command successfully executed on all invocations. This is a terminal state.

  • Delivery Timed Out: The value of MaxErrors or more command invocations shows a status of Delivery Timed Out. This is a terminal state.

  • Execution Timed Out: The value of MaxErrors or more command invocations shows a status of Execution Timed Out. This is a terminal state.

  • Failed: The value of MaxErrors or more command invocations shows a status of Failed. This is a terminal state.

  • Incomplete: The command was attempted on all instances and one or more invocations does not have a value of Success but not enough invocations failed for the status to be Failed. This is a terminal state.

  • Canceled: The command was terminated before it was completed. This is a terminal state.

  • Rate Exceeded: The number of instances targeted by the command exceeded the account limit for pending invocations. The system has canceled the command before executing it on any instance. This is a terminal state.

" }, "OutputS3Region":{ "shape":"S3Region", @@ -2442,11 +2707,11 @@ }, "MaxConcurrency":{ "shape":"MaxConcurrency", - "documentation":"

The maximum number of instances that are allowed to execute the command at the same time. You can specify a number of instances, such as 10, or a percentage of instances, such as 10%. The default value is 50. For more information about how to use MaxConcurrency, see Executing a Command Using Systems Manager Run Command.

" + "documentation":"

The maximum number of instances that are allowed to execute the command at the same time. You can specify a number of instances, such as 10, or a percentage of instances, such as 10%. The default value is 50. For more information about how to use MaxConcurrency, see Executing Commands Using Systems Manager Run Command in the AWS Systems Manager User Guide.

" }, "MaxErrors":{ "shape":"MaxErrors", - "documentation":"

The maximum number of errors allowed before the system stops sending the command to additional targets. You can specify a number of errors, such as 10, or a percentage or errors, such as 10%. The default value is 0. For more information about how to use MaxErrors, see Executing a Command Using Systems Manager Run Command.

" + "documentation":"

The maximum number of errors allowed before the system stops sending the command to additional targets. You can specify a number of errors, such as 10, or a percentage or errors, such as 10%. The default value is 0. For more information about how to use MaxErrors, see Executing Commands Using Systems Manager Run Command in the AWS Systems Manager User Guide.

" }, "TargetCount":{ "shape":"TargetCount", @@ -2460,6 +2725,10 @@ "shape":"ErrorCount", "documentation":"

The number of targets for which the status is Failed or Execution Timed Out.

" }, + "DeliveryTimedOutCount":{ + "shape":"DeliveryTimedOutCount", + "documentation":"

The number of targets for which the status is Delivery Timed Out.

" + }, "ServiceRole":{ "shape":"ServiceRole", "documentation":"

The IAM service role that Run Command uses to act on your behalf when sending notifications about command status changes.

" @@ -2467,6 +2736,10 @@ "NotificationConfig":{ "shape":"NotificationConfig", "documentation":"

Configurations for sending notifications about command status changes.

" + }, + "CloudWatchOutputConfig":{ + "shape":"CloudWatchOutputConfig", + "documentation":"

CloudWatch Logs information where you want Systems Manager to send the command output.

" } }, "documentation":"

Describes a command request.

" @@ -2549,7 +2822,7 @@ }, "StatusDetails":{ "shape":"StatusDetails", - "documentation":"

A detailed status of the command execution for each invocation (each instance targeted by the command). StatusDetails includes more information than Status because it includes states resulting from error and concurrency control parameters. StatusDetails can show different results than Status. For more information about these statuses, see Run Command Status. StatusDetails can be one of the following values:

  • Pending: The command has not been sent to the instance.

  • In Progress: The command has been sent to the instance but has not reached a terminal state.

  • Success: The execution of the command or plugin was successfully completed. This is a terminal state.

  • Delivery Timed Out: The command was not delivered to the instance before the delivery timeout expired. Delivery timeouts do not count against the parent command's MaxErrors limit, but they do contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Execution Timed Out: Command execution started on the instance, but the execution was not complete before the execution timeout expired. Execution timeouts count against the MaxErrors limit of the parent command. This is a terminal state.

  • Failed: The command was not successful on the instance. For a plugin, this indicates that the result code was not zero. For a command invocation, this indicates that the result code for one or more plugins was not zero. Invocation failures count against the MaxErrors limit of the parent command. This is a terminal state.

  • Canceled: The command was terminated before it was completed. This is a terminal state.

  • Undeliverable: The command can't be delivered to the instance. The instance might not exist or might not be responding. Undeliverable invocations don't count against the parent command's MaxErrors limit and don't contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Terminated: The parent command exceeded its MaxErrors limit and subsequent command invocations were canceled by the system. This is a terminal state.

" + "documentation":"

A detailed status of the command execution for each invocation (each instance targeted by the command). StatusDetails includes more information than Status because it includes states resulting from error and concurrency control parameters. StatusDetails can show different results than Status. For more information about these statuses, see Understanding Command Statuses in the AWS Systems Manager User Guide. StatusDetails can be one of the following values:

  • Pending: The command has not been sent to the instance.

  • In Progress: The command has been sent to the instance but has not reached a terminal state.

  • Success: The execution of the command or plugin was successfully completed. This is a terminal state.

  • Delivery Timed Out: The command was not delivered to the instance before the delivery timeout expired. Delivery timeouts do not count against the parent command's MaxErrors limit, but they do contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Execution Timed Out: Command execution started on the instance, but the execution was not complete before the execution timeout expired. Execution timeouts count against the MaxErrors limit of the parent command. This is a terminal state.

  • Failed: The command was not successful on the instance. For a plugin, this indicates that the result code was not zero. For a command invocation, this indicates that the result code for one or more plugins was not zero. Invocation failures count against the MaxErrors limit of the parent command. This is a terminal state.

  • Canceled: The command was terminated before it was completed. This is a terminal state.

  • Undeliverable: The command can't be delivered to the instance. The instance might not exist or might not be responding. Undeliverable invocations don't count against the parent command's MaxErrors limit and don't contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Terminated: The parent command exceeded its MaxErrors limit and subsequent command invocations were canceled by the system. This is a terminal state.

" }, "TraceOutput":{ "shape":"InvocationTraceOutput", @@ -2571,6 +2844,10 @@ "NotificationConfig":{ "shape":"NotificationConfig", "documentation":"

Configurations for sending notifications about command status changes on a per instance basis.

" + }, + "CloudWatchOutputConfig":{ + "shape":"CloudWatchOutputConfig", + "documentation":"

CloudWatch Logs information where you want Systems Manager to send the command output.

" } }, "documentation":"

An invocation is copy of a command sent to a specific instance. A command can apply to one or more instances. A command invocation applies to one instance. For example, if a user executes SendCommand against three instances, then a command invocation is created for each requested instance ID. A command invocation returns status and detail information about a command you executed.

" @@ -2614,7 +2891,7 @@ }, "StatusDetails":{ "shape":"StatusDetails", - "documentation":"

A detailed status of the plugin execution. StatusDetails includes more information than Status because it includes states resulting from error and concurrency control parameters. StatusDetails can show different results than Status. For more information about these statuses, see Run Command Status. StatusDetails can be one of the following values:

  • Pending: The command has not been sent to the instance.

  • In Progress: The command has been sent to the instance but has not reached a terminal state.

  • Success: The execution of the command or plugin was successfully completed. This is a terminal state.

  • Delivery Timed Out: The command was not delivered to the instance before the delivery timeout expired. Delivery timeouts do not count against the parent command's MaxErrors limit, but they do contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Execution Timed Out: Command execution started on the instance, but the execution was not complete before the execution timeout expired. Execution timeouts count against the MaxErrors limit of the parent command. This is a terminal state.

  • Failed: The command was not successful on the instance. For a plugin, this indicates that the result code was not zero. For a command invocation, this indicates that the result code for one or more plugins was not zero. Invocation failures count against the MaxErrors limit of the parent command. This is a terminal state.

  • Canceled: The command was terminated before it was completed. This is a terminal state.

  • Undeliverable: The command can't be delivered to the instance. The instance might not exist, or it might not be responding. Undeliverable invocations don't count against the parent command's MaxErrors limit, and they don't contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Terminated: The parent command exceeded its MaxErrors limit and subsequent command invocations were canceled by the system. This is a terminal state.

" + "documentation":"

A detailed status of the plugin execution. StatusDetails includes more information than Status because it includes states resulting from error and concurrency control parameters. StatusDetails can show different results than Status. For more information about these statuses, see Understanding Command Statuses in the AWS Systems Manager User Guide. StatusDetails can be one of the following values:

  • Pending: The command has not been sent to the instance.

  • In Progress: The command has been sent to the instance but has not reached a terminal state.

  • Success: The execution of the command or plugin was successfully completed. This is a terminal state.

  • Delivery Timed Out: The command was not delivered to the instance before the delivery timeout expired. Delivery timeouts do not count against the parent command's MaxErrors limit, but they do contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Execution Timed Out: Command execution started on the instance, but the execution was not complete before the execution timeout expired. Execution timeouts count against the MaxErrors limit of the parent command. This is a terminal state.

  • Failed: The command was not successful on the instance. For a plugin, this indicates that the result code was not zero. For a command invocation, this indicates that the result code for one or more plugins was not zero. Invocation failures count against the MaxErrors limit of the parent command. This is a terminal state.

  • Canceled: The command was terminated before it was completed. This is a terminal state.

  • Undeliverable: The command can't be delivered to the instance. The instance might not exist, or it might not be responding. Undeliverable invocations don't count against the parent command's MaxErrors limit, and they don't contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Terminated: The parent command exceeded its MaxErrors limit and subsequent command invocations were canceled by the system. This is a terminal state.

" }, "ResponseCode":{ "shape":"ResponseCode", @@ -3390,7 +3667,7 @@ }, "DeletionSummary":{ "shape":"InventoryDeletionSummary", - "documentation":"

A summary of the delete operation. For more information about this summary, see Understanding the Delete Inventory Summary.

" + "documentation":"

A summary of the delete operation. For more information about this summary, see Understanding the Delete Inventory Summary in the AWS Systems Manager User Guide.

" } } }, @@ -3485,6 +3762,7 @@ "members":{ } }, + "DeliveryTimedOutCount":{"type":"integer"}, "DeregisterManagedInstanceRequest":{ "type":"structure", "required":["InstanceId"], @@ -3652,6 +3930,85 @@ } } }, + "DescribeAssociationExecutionTargetsRequest":{ + "type":"structure", + "required":[ + "AssociationId", + "ExecutionId" + ], + "members":{ + "AssociationId":{ + "shape":"AssociationId", + "documentation":"

The association ID that includes the execution for which you want to view details.

" + }, + "ExecutionId":{ + "shape":"AssociationExecutionId", + "documentation":"

The execution ID for which you want to view details.

" + }, + "Filters":{ + "shape":"AssociationExecutionTargetsFilterList", + "documentation":"

Filters for the request. You can specify the following filters and values.

Status (EQUAL)

ResourceId (EQUAL)

ResourceType (EQUAL)

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.

", + "box":true + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token to start the list. Use this token to get the next set of results.

" + } + } + }, + "DescribeAssociationExecutionTargetsResult":{ + "type":"structure", + "members":{ + "AssociationExecutionTargets":{ + "shape":"AssociationExecutionTargetsList", + "documentation":"

Information about the execution.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of items to return. Use this token to get the next set of results.

" + } + } + }, + "DescribeAssociationExecutionsRequest":{ + "type":"structure", + "required":["AssociationId"], + "members":{ + "AssociationId":{ + "shape":"AssociationId", + "documentation":"

The association ID for which you want to view execution history details.

" + }, + "Filters":{ + "shape":"AssociationExecutionFilterList", + "documentation":"

Filters for the request. You can specify the following filters and values.

ExecutionId (EQUAL)

Status (EQUAL)

CreatedTime (EQUAL, GREATER_THAN, LESS_THAN)

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.

", + "box":true + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token to start the list. Use this token to get the next set of results.

" + } + } + }, + "DescribeAssociationExecutionsResult":{ + "type":"structure", + "members":{ + "AssociationExecutions":{ + "shape":"AssociationExecutionsList", + "documentation":"

A list of the executions for the specified association ID.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of items to return. Use this token to get the next set of results.

" + } + } + }, "DescribeAssociationRequest":{ "type":"structure", "members":{ @@ -5028,7 +5385,7 @@ }, "StatusDetails":{ "shape":"StatusDetails", - "documentation":"

A detailed status of the command execution for an invocation. StatusDetails includes more information than Status because it includes states resulting from error and concurrency control parameters. StatusDetails can show different results than Status. For more information about these statuses, see Run Command Status. StatusDetails can be one of the following values:

  • Pending: The command has not been sent to the instance.

  • In Progress: The command has been sent to the instance but has not reached a terminal state.

  • Delayed: The system attempted to send the command to the target, but the target was not available. The instance might not be available because of network issues, the instance was stopped, etc. The system will try to deliver the command again.

  • Success: The command or plugin was executed successfully. This is a terminal state.

  • Delivery Timed Out: The command was not delivered to the instance before the delivery timeout expired. Delivery timeouts do not count against the parent command's MaxErrors limit, but they do contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Execution Timed Out: The command started to execute on the instance, but the execution was not complete before the timeout expired. Execution timeouts count against the MaxErrors limit of the parent command. This is a terminal state.

  • Failed: The command wasn't executed successfully on the instance. For a plugin, this indicates that the result code was not zero. For a command invocation, this indicates that the result code for one or more plugins was not zero. Invocation failures count against the MaxErrors limit of the parent command. This is a terminal state.

  • Canceled: The command was terminated before it was completed. This is a terminal state.

  • Undeliverable: The command can't be delivered to the instance. The instance might not exist or might not be responding. Undeliverable invocations don't count against the parent command's MaxErrors limit and don't contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Terminated: The parent command exceeded its MaxErrors limit and subsequent command invocations were canceled by the system. This is a terminal state.

" + "documentation":"

A detailed status of the command execution for an invocation. StatusDetails includes more information than Status because it includes states resulting from error and concurrency control parameters. StatusDetails can show different results than Status. For more information about these statuses, see Understanding Command Statuses in the AWS Systems Manager User Guide. StatusDetails can be one of the following values:

  • Pending: The command has not been sent to the instance.

  • In Progress: The command has been sent to the instance but has not reached a terminal state.

  • Delayed: The system attempted to send the command to the target, but the target was not available. The instance might not be available because of network issues, the instance was stopped, etc. The system will try to deliver the command again.

  • Success: The command or plugin was executed successfully. This is a terminal state.

  • Delivery Timed Out: The command was not delivered to the instance before the delivery timeout expired. Delivery timeouts do not count against the parent command's MaxErrors limit, but they do contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Execution Timed Out: The command started to execute on the instance, but the execution was not complete before the timeout expired. Execution timeouts count against the MaxErrors limit of the parent command. This is a terminal state.

  • Failed: The command wasn't executed successfully on the instance. For a plugin, this indicates that the result code was not zero. For a command invocation, this indicates that the result code for one or more plugins was not zero. Invocation failures count against the MaxErrors limit of the parent command. This is a terminal state.

  • Canceled: The command was terminated before it was completed. This is a terminal state.

  • Undeliverable: The command can't be delivered to the instance. The instance might not exist or might not be responding. Undeliverable invocations don't count against the parent command's MaxErrors limit and don't contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Terminated: The parent command exceeded its MaxErrors limit and subsequent command invocations were canceled by the system. This is a terminal state.

" }, "StandardOutputContent":{ "shape":"StandardOutputContent", @@ -5045,6 +5402,10 @@ "StandardErrorUrl":{ "shape":"Url", "documentation":"

The URL for the complete text written by the plugin to stderr. If the command has not finished executing, then this string is empty.

" + }, + "CloudWatchOutputConfig":{ + "shape":"CloudWatchOutputConfig", + "documentation":"

CloudWatch Logs information where Systems Manager sent the command output.

" } } }, @@ -5628,7 +5989,7 @@ "members":{ "Path":{ "shape":"PSParameterName", - "documentation":"

The hierarchy for the parameter. Hierarchies start with a forward slash (/) and end with the parameter name. A hierarchy can have a maximum of 15 levels. Here is an example of a hierarchy: /Finance/Prod/IAD/WinServ2016/license33

" + "documentation":"

The hierarchy for the parameter. Hierarchies start with a forward slash (/) and end with the parameter name. A parameter name hierarchy can have a maximum of 15 levels. Here is an example of a hierarchy: /Finance/Prod/IAD/WinServ2016/license33

" }, "Recursive":{ "shape":"Boolean", @@ -5804,10 +6165,10 @@ "members":{ "message":{ "shape":"String", - "documentation":"

A hierarchy can have a maximum of 15 levels. For more information, see Working with Systems Manager Parameters.

" + "documentation":"

A hierarchy can have a maximum of 15 levels. For more information, see Requirements and Constraints for Parameter Names in the AWS Systems Manager User Guide.

" } }, - "documentation":"

A hierarchy can have a maximum of 15 levels. For more information, see Working with Systems Manager Parameters.

", + "documentation":"

A hierarchy can have a maximum of 15 levels. For more information, see Requirements and Constraints for Parameter Names in the AWS Systems Manager User Guide.

", "exception":true }, "HierarchyTypeMismatchException":{ @@ -5992,7 +6353,7 @@ }, "PingStatus":{ "shape":"PingStatus", - "documentation":"

Connection status of the SSM Agent.

" + "documentation":"

Connection status of SSM Agent.

" }, "LastPingDateTime":{ "shape":"DateTime", @@ -6001,11 +6362,11 @@ }, "AgentVersion":{ "shape":"Version", - "documentation":"

The version of the SSM Agent running on your Linux instance.

" + "documentation":"

The version of SSM Agent running on your Linux instance.

" }, "IsLatestVersion":{ "shape":"Boolean", - "documentation":"

Indicates whether latest version of the SSM Agent is running on your instance. Some older versions of Windows Server use the EC2Config service to process SSM requests. For this reason, this field does not indicate whether or not the latest version is installed on Windows managed instances.

", + "documentation":"

Indicates whether latest version of SSM Agent is running on your instance. Some older versions of Windows Server use the EC2Config service to process SSM requests. For this reason, this field does not indicate whether or not the latest version is installed on Windows managed instances.

", "box":true }, "PlatformType":{ @@ -6026,7 +6387,7 @@ }, "IamRole":{ "shape":"IamRole", - "documentation":"

The Amazon Identity and Access Management (IAM) role assigned to EC2 instances or managed instances.

" + "documentation":"

The Amazon Identity and Access Management (IAM) role assigned to the on-premises Systems Manager managed instances. This call does not return the IAM role for Amazon EC2 instances.

" }, "RegistrationDate":{ "shape":"DateTime", @@ -6311,6 +6672,14 @@ "documentation":"

The request does not meet the regular expression requirement.

", "exception":true }, + "InvalidAssociation":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The association is not valid or does not exist.

", + "exception":true + }, "InvalidAssociationVersion":{ "type":"structure", "members":{ @@ -6450,7 +6819,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The following problems can cause this exception:

You do not have permission to access the instance.

The SSM Agent is not running. On managed instances and Linux instances, verify that the SSM Agent is running. On EC2 Windows instances, verify that the EC2Config service is running.

The SSM Agent or EC2Config service is not registered to the SSM endpoint. Try reinstalling the SSM Agent or EC2Config service.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

", + "documentation":"

The following problems can cause this exception:

You do not have permission to access the instance.

SSM Agent is not running. On managed instances and Linux instances, verify that the SSM Agent is running. On EC2 Windows instances, verify that the EC2Config service is running.

SSM Agent or EC2Config service is not registered to the SSM endpoint. Try reinstalling SSM Agent or EC2Config service.

The instance is not in valid state. Valid states are: Running, Pending, Stopped, Stopping. Invalid states are: Shutting-down and Terminated.

", "exception":true }, "InvalidInstanceInformationFilterValue":{ @@ -6685,7 +7054,7 @@ }, "DeletionSummary":{ "shape":"InventoryDeletionSummary", - "documentation":"

Information about the delete operation. For more information about this summary, see Understanding the Delete Inventory Summary.

" + "documentation":"

Information about the delete operation. For more information about this summary, see Understanding the Delete Inventory Summary in the AWS Systems Manager User Guide.

" }, "LastStatusUpdateTime":{ "shape":"InventoryDeletionLastStatusUpdateTime", @@ -8177,7 +8546,7 @@ }, "NotificationEvents":{ "shape":"NotificationEventList", - "documentation":"

The different events for which you can receive notifications. These events include the following: All (events), InProgress, Success, TimedOut, Cancelled, Failed. To learn more about these events, see Setting Up Events and Notifications in the AWS Systems Manager User Guide.

" + "documentation":"

The different events for which you can receive notifications. These events include the following: All (events), InProgress, Success, TimedOut, Cancelled, Failed. To learn more about these events, see Configuring Amazon SNS Notifications for Run Command in the AWS Systems Manager User Guide.

" }, "NotificationType":{ "shape":"NotificationType", @@ -8213,12 +8582,33 @@ "enum":[ "WINDOWS", "AMAZON_LINUX", + "AMAZON_LINUX_2", "UBUNTU", "REDHAT_ENTERPRISE_LINUX", "SUSE", "CENTOS" ] }, + "OutputSource":{ + "type":"structure", + "members":{ + "OutputSourceId":{ + "shape":"OutputSourceId", + "documentation":"

The ID of the output source, for example the URL of an Amazon S3 bucket.

" + }, + "OutputSourceType":{ + "shape":"OutputSourceType", + "documentation":"

The type of source where the association execution details are stored, for example, Amazon S3.

" + } + }, + "documentation":"

Information about the source where the association execution details are stored.

" + }, + "OutputSourceId":{ + "type":"string", + "max":36, + "min":36 + }, + "OutputSourceType":{"type":"string"}, "OwnerInformation":{ "type":"string", "max":128, @@ -8428,7 +8818,7 @@ "documentation":"

The value you want to search for.

" } }, - "documentation":"

One or more filters. Use a filter to return a more specific list of results.

" + "documentation":"

One or more filters. Use a filter to return a more specific list of results.

The Name field can't be used with the GetParametersByPath API action.

" }, "ParameterStringFilterKey":{ "type":"string", @@ -8713,7 +9103,7 @@ "documentation":"

The value for the filter key.

See PatchFilter for lists of valid values for each key based on operating system type.

" } }, - "documentation":"

Defines a patch filter.

A patch filter consists of key/value pairs, but not all keys are valid for all operating system types. For example, the key PRODUCT is valid for all supported operating system types. The key MSRC_SEVERITY, however, is valid only for Windows operating systems, and the key SECTION is valid only for Ubuntu operating systems.

Refer to the following sections for information about which keys may be used with each major operating system, and which values are valid for each key.

Windows Operating Systems

The supported keys for Windows operating systems are PRODUCT, CLASSIFICATION, and MSRC_SEVERITY. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • Windows7

  • Windows8

  • Windows8.1

  • Windows8Embedded

  • Windows10

  • Windows10LTSB

  • WindowsServer2008

  • WindowsServer2008R2

  • WindowsServer2012

  • WindowsServer2012R2

  • WindowsServer2016

Supported key: CLASSIFICATION

Supported values:

  • CriticalUpdates

  • DefinitionUpdates

  • Drivers

  • FeaturePacks

  • SecurityUpdates

  • ServicePacks

  • Tools

  • UpdateRollups

  • Updates

  • Upgrades

Supported key: MSRC_SEVERITY

Supported values:

  • Critical

  • Important

  • Moderate

  • Low

  • Unspecified

Ubuntu Operating Systems

The supported keys for Ubuntu operating systems are PRODUCT, PRIORITY, and SECTION. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • Ubuntu14.04

  • Ubuntu16.04

Supported key: PRIORITY

Supported values:

  • Required

  • Important

  • Standard

  • Optional

  • Extra

Supported key: SECTION

Only the length of the key value is validated. Minimum length is 1. Maximum length is 64.

Amazon Linux Operating Systems

The supported keys for Amazon Linux operating systems are PRODUCT, CLASSIFICATION, and SEVERITY. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • AmazonLinux2012.03

  • AmazonLinux2012.09

  • AmazonLinux2013.03

  • AmazonLinux2013.09

  • AmazonLinux2014.03

  • AmazonLinux2014.09

  • AmazonLinux2015.03

  • AmazonLinux2015.09

  • AmazonLinux2016.03

  • AmazonLinux2016.09

  • AmazonLinux2017.03

  • AmazonLinux2017.09

Supported key: CLASSIFICATION

Supported values:

  • Security

  • Bugfix

  • Enhancement

  • Recommended

  • Newpackage

Supported key: SEVERITY

Supported values:

  • Critical

  • Important

  • Medium

  • Low

RedHat Enterprise Linux (RHEL) Operating Systems

The supported keys for RedHat Enterprise Linux operating systems are PRODUCT, CLASSIFICATION, and SEVERITY. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • RedhatEnterpriseLinux6.5

  • RedhatEnterpriseLinux6.6

  • RedhatEnterpriseLinux6.7

  • RedhatEnterpriseLinux6.8

  • RedhatEnterpriseLinux6.9

  • RedhatEnterpriseLinux7.0

  • RedhatEnterpriseLinux7.1

  • RedhatEnterpriseLinux7.2

  • RedhatEnterpriseLinux7.3

  • RedhatEnterpriseLinux7.4

Supported key: CLASSIFICATION

Supported values:

  • Security

  • Bugfix

  • Enhancement

  • Recommended

  • Newpackage

Supported key: SEVERITY

Supported values:

  • Critical

  • Important

  • Medium

  • Low

SUSE Linux Enterprise Server (SLES) Operating Systems

The supported keys for SLES operating systems are PRODUCT, CLASSIFICATION, and SEVERITY. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • Suse12.0

  • Suse12.1

  • Suse12.2

  • Suse12.3

  • Suse12.4

  • Suse12.5

  • Suse12.6

  • Suse12.7

  • Suse12.8

  • Suse12.9

Supported key: CLASSIFICATION

Supported values:

  • Security

  • Recommended

  • Optional

  • Feature

  • Document

  • Yast

Supported key: SEVERITY

Supported values:

  • Critical

  • Important

  • Moderate

  • Low

CentOS Operating Systems

The supported keys for CentOS operating systems are PRODUCT, CLASSIFICATION, and SEVERITY. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • CentOS6.5

  • CentOS6.6

  • CentOS6.7

  • CentOS6.8

  • CentOS6.9

  • CentOS7.0

  • CentOS7.1

  • CentOS7.2

  • CentOS7.3

  • CentOS7.4

Supported key: CLASSIFICATION

Supported values:

  • Security

  • Bugfix

  • Enhancement

  • Recommended

  • Newpackage

Supported key: SEVERITY

Supported values:

  • Critical

  • Important

  • Medium

  • Low

" + "documentation":"

Defines a patch filter.

A patch filter consists of key/value pairs, but not all keys are valid for all operating system types. For example, the key PRODUCT is valid for all supported operating system types. The key MSRC_SEVERITY, however, is valid only for Windows operating systems, and the key SECTION is valid only for Ubuntu operating systems.

Refer to the following sections for information about which keys may be used with each major operating system, and which values are valid for each key.

Windows Operating Systems

The supported keys for Windows operating systems are PRODUCT, CLASSIFICATION, and MSRC_SEVERITY. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • Windows7

  • Windows8

  • Windows8.1

  • Windows8Embedded

  • Windows10

  • Windows10LTSB

  • WindowsServer2008

  • WindowsServer2008R2

  • WindowsServer2012

  • WindowsServer2012R2

  • WindowsServer2016

  • *

    Use a wildcard character (*) to target all supported operating system versions.

Supported key: CLASSIFICATION

Supported values:

  • CriticalUpdates

  • DefinitionUpdates

  • Drivers

  • FeaturePacks

  • SecurityUpdates

  • ServicePacks

  • Tools

  • UpdateRollups

  • Updates

  • Upgrades

Supported key: MSRC_SEVERITY

Supported values:

  • Critical

  • Important

  • Moderate

  • Low

  • Unspecified

Ubuntu Operating Systems

The supported keys for Ubuntu operating systems are PRODUCT, PRIORITY, and SECTION. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • Ubuntu14.04

  • Ubuntu16.04

  • *

    Use a wildcard character (*) to target all supported operating system versions.

Supported key: PRIORITY

Supported values:

  • Required

  • Important

  • Standard

  • Optional

  • Extra

Supported key: SECTION

Only the length of the key value is validated. Minimum length is 1. Maximum length is 64.

Amazon Linux Operating Systems

The supported keys for Amazon Linux operating systems are PRODUCT, CLASSIFICATION, and SEVERITY. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • AmazonLinux2012.03

  • AmazonLinux2012.09

  • AmazonLinux2013.03

  • AmazonLinux2013.09

  • AmazonLinux2014.03

  • AmazonLinux2014.09

  • AmazonLinux2015.03

  • AmazonLinux2015.09

  • AmazonLinux2016.03

  • AmazonLinux2016.09

  • AmazonLinux2017.03

  • AmazonLinux2017.09

  • *

    Use a wildcard character (*) to target all supported operating system versions.

Supported key: CLASSIFICATION

Supported values:

  • Security

  • Bugfix

  • Enhancement

  • Recommended

  • Newpackage

Supported key: SEVERITY

Supported values:

  • Critical

  • Important

  • Medium

  • Low

Amazon Linux 2 Operating Systems

The supported keys for Amazon Linux 2 operating systems are PRODUCT, CLASSIFICATION, and SEVERITY. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • AmazonLinux2

  • AmazonLinux2.0

  • *

    Use a wildcard character (*) to target all supported operating system versions.

Supported key: CLASSIFICATION

Supported values:

  • Security

  • Bugfix

  • Enhancement

  • Recommended

  • Newpackage

Supported key: SEVERITY

Supported values:

  • Critical

  • Important

  • Medium

  • Low

RedHat Enterprise Linux (RHEL) Operating Systems

The supported keys for RedHat Enterprise Linux operating systems are PRODUCT, CLASSIFICATION, and SEVERITY. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • RedhatEnterpriseLinux6.5

  • RedhatEnterpriseLinux6.6

  • RedhatEnterpriseLinux6.7

  • RedhatEnterpriseLinux6.8

  • RedhatEnterpriseLinux6.9

  • RedhatEnterpriseLinux7.0

  • RedhatEnterpriseLinux7.1

  • RedhatEnterpriseLinux7.2

  • RedhatEnterpriseLinux7.3

  • RedhatEnterpriseLinux7.4

  • *

    Use a wildcard character (*) to target all supported operating system versions.

Supported key: CLASSIFICATION

Supported values:

  • Security

  • Bugfix

  • Enhancement

  • Recommended

  • Newpackage

Supported key: SEVERITY

Supported values:

  • Critical

  • Important

  • Medium

  • Low

SUSE Linux Enterprise Server (SLES) Operating Systems

The supported keys for SLES operating systems are PRODUCT, CLASSIFICATION, and SEVERITY. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • Suse12.0

  • Suse12.1

  • Suse12.2

  • Suse12.3

  • Suse12.4

  • Suse12.5

  • Suse12.6

  • Suse12.7

  • Suse12.8

  • Suse12.9

  • *

    Use a wildcard character (*) to target all supported operating system versions.

Supported key: CLASSIFICATION

Supported values:

  • Security

  • Recommended

  • Optional

  • Feature

  • Document

  • Yast

Supported key: SEVERITY

Supported values:

  • Critical

  • Important

  • Moderate

  • Low

CentOS Operating Systems

The supported keys for CentOS operating systems are PRODUCT, CLASSIFICATION, and SEVERITY. See the following lists for valid values for each of these keys.

Supported key: PRODUCT

Supported values:

  • CentOS6.5

  • CentOS6.6

  • CentOS6.7

  • CentOS6.8

  • CentOS6.9

  • CentOS7.0

  • CentOS7.1

  • CentOS7.2

  • CentOS7.3

  • CentOS7.4

  • *

    Use a wildcard character (*) to target all supported operating system versions.

Supported key: CLASSIFICATION

Supported values:

  • Security

  • Bugfix

  • Enhancement

  • Recommended

  • Newpackage

Supported key: SEVERITY

Supported values:

  • Critical

  • Important

  • Medium

  • Low

" }, "PatchFilterGroup":{ "type":"structure", @@ -8866,7 +9256,7 @@ }, "ApproveAfterDays":{ "shape":"ApproveAfterDays", - "documentation":"

The number of days after the release date of each patch matched by the rule the patch is marked as approved in the patch baseline.

", + "documentation":"

The number of days after the release date of each patch matched by the rule that the patch is marked as approved in the patch baseline. For example, a value of 7 means that patches are approved seven days after they are released.

", "box":true }, "EnableNonSecurity":{ @@ -8913,7 +9303,7 @@ }, "Configuration":{ "shape":"PatchSourceConfiguration", - "documentation":"

The value of the yum repo configuration. For example:

cachedir=/var/cache/yum/$basesearch

$releasever

keepcache=0

debualevel=2

" + "documentation":"

The value of the yum repo configuration. For example:

cachedir=/var/cache/yum/$basesearch

$releasever

keepcache=0

debuglevel=2

" } }, "documentation":"

Information about the patches to use to update the instances, including target operating systems and source repository. Applies to Linux instances only.

" @@ -9063,11 +9453,11 @@ "members":{ "Name":{ "shape":"PSParameterName", - "documentation":"

The fully qualified name of the parameter that you want to add to the system. The fully qualified name includes the complete hierarchy of the parameter path and name. For example: /Dev/DBServer/MySQL/db-string13

For information about parameter name requirements and restrictions, see About Creating Systems Manager Parameters in the AWS Systems Manager User Guide.

The maximum length constraint listed below includes capacity for additional system attributes that are not part of the name. The maximum length for the fully qualified parameter name is 1011 characters.

" + "documentation":"

The fully qualified name of the parameter that you want to add to the system. The fully qualified name includes the complete hierarchy of the parameter path and name. For example: /Dev/DBServer/MySQL/db-string13

Naming Constraints:

  • Parameter names are case sensitive.

  • A parameter name must be unique within an AWS Region

  • A parameter name can't be prefixed with \"aws\" or \"ssm\" (case-insensitive).

  • Parameter names can include only the following symbols and letters: a-zA-Z0-9_.-/

  • A parameter name can't include spaces.

  • Parameter hierarchies are limited to a maximum depth of fifteen levels.

For additional information about valid values for parameter names, see Requirements and Constraints for Parameter Names in the AWS Systems Manager User Guide.

The maximum length constraint listed below includes capacity for additional system attributes that are not part of the name. The maximum length for the fully qualified parameter name is 1011 characters.

" }, "Description":{ "shape":"ParameterDescription", - "documentation":"

Information about the parameter that you want to add to the system.

Do not enter personally identifiable information in this field.

" + "documentation":"

Information about the parameter that you want to add to the system. Optional but recommended.

Do not enter personally identifiable information in this field.

" }, "Value":{ "shape":"PSParameterValue", @@ -9075,11 +9465,11 @@ }, "Type":{ "shape":"ParameterType", - "documentation":"

The type of parameter that you want to add to the system.

" + "documentation":"

The type of parameter that you want to add to the system.

Items in a StringList must be separated by a comma (,). You can't use other punctuation or special character to escape items in the list. If you have a parameter value that requires a comma, then use the String data type.

SecureString is not currently supported for AWS CloudFormation templates or in the China Regions.

" }, "KeyId":{ "shape":"ParameterKeyId", - "documentation":"

The KMS Key ID that you want to use to encrypt a parameter when you choose the SecureString data type. If you don't specify a key ID, the system uses the default key associated with your AWS account.

" + "documentation":"

The KMS Key ID that you want to use to encrypt a parameter. Either the default AWS Key Management Service (AWS KMS) key automatically assigned to your AWS account or a custom key. Required for parameters that use the SecureString data type.

If you don't specify a key ID, the system uses the default key associated with your AWS account.

  • To use your default AWS KMS key, choose the SecureString data type, and do not specify the Key ID when you create the parameter. The system automatically populates Key ID with your default KMS key.

  • To use a custom KMS key, choose the SecureString data type with the Key ID parameter.

" }, "Overwrite":{ "shape":"Boolean", @@ -9374,6 +9764,7 @@ "member":{"shape":"ResourceComplianceSummaryItem"} }, "ResourceCount":{"type":"integer"}, + "ResourceCountByStatus":{"type":"string"}, "ResourceDataSyncAWSKMSKeyARN":{ "type":"string", "max":512, @@ -9638,11 +10029,11 @@ "members":{ "InstanceIds":{ "shape":"InstanceIdList", - "documentation":"

The instance IDs where the command should execute. You can specify a maximum of 50 IDs. If you prefer not to list individual instance IDs, you can instead send commands to a fleet of instances using the Targets parameter, which accepts EC2 tags. For more information about how to use Targets, see Sending Commands to a Fleet.

" + "documentation":"

The instance IDs where the command should execute. You can specify a maximum of 50 IDs. If you prefer not to list individual instance IDs, you can instead send commands to a fleet of instances using the Targets parameter, which accepts EC2 tags. For more information about how to use Targets, see Sending Commands to a Fleet in the AWS Systems Manager User Guide.

" }, "Targets":{ "shape":"Targets", - "documentation":"

(Optional) An array of search criteria that targets instances using a Key,Value combination that you specify. Targets is required if you don't provide one or more instance IDs in the call. For more information about how to use Targets, see Sending Commands to a Fleet.

" + "documentation":"

(Optional) An array of search criteria that targets instances using a Key,Value combination that you specify. Targets is required if you don't provide one or more instance IDs in the call. For more information about how to use Targets, see Sending Commands to a Fleet in the AWS Systems Manager User Guide.

" }, "DocumentName":{ "shape":"DocumentARN", @@ -9650,7 +10041,7 @@ }, "DocumentVersion":{ "shape":"DocumentVersion", - "documentation":"

The SSM document version to use in the request. You can specify Default, Latest, or a specific version number.

" + "documentation":"

The SSM document version to use in the request. You can specify $DEFAULT, $LATEST, or a specific version number. If you execute commands by using the AWS CLI, then you must escape the first two options by using a backslash. If you specify a version number, then you don't need to use the backslash. For example:

--document-version \"\\$DEFAULT\"

--document-version \"\\$LATEST\"

--document-version \"3\"

" }, "DocumentHash":{ "shape":"DocumentHash", @@ -9687,11 +10078,11 @@ }, "MaxConcurrency":{ "shape":"MaxConcurrency", - "documentation":"

(Optional) The maximum number of instances that are allowed to execute the command at the same time. You can specify a number such as 10 or a percentage such as 10%. The default value is 50. For more information about how to use MaxConcurrency, see Using Concurrency Controls.

" + "documentation":"

(Optional) The maximum number of instances that are allowed to execute the command at the same time. You can specify a number such as 10 or a percentage such as 10%. The default value is 50. For more information about how to use MaxConcurrency, see Using Concurrency Controls in the AWS Systems Manager User Guide.

" }, "MaxErrors":{ "shape":"MaxErrors", - "documentation":"

The maximum number of errors allowed without the command failing. When the command fails one more time beyond the value of MaxErrors, the systems stops sending the command to additional targets. You can specify a number like 10 or a percentage like 10%. The default value is 0. For more information about how to use MaxErrors, see Using Error Controls.

" + "documentation":"

The maximum number of errors allowed without the command failing. When the command fails one more time beyond the value of MaxErrors, the systems stops sending the command to additional targets. You can specify a number like 10 or a percentage like 10%. The default value is 0. For more information about how to use MaxErrors, see Using Error Controls in the AWS Systems Manager User Guide.

" }, "ServiceRoleArn":{ "shape":"ServiceRole", @@ -9700,6 +10091,10 @@ "NotificationConfig":{ "shape":"NotificationConfig", "documentation":"

Configurations for sending notifications.

" + }, + "CloudWatchOutputConfig":{ + "shape":"CloudWatchOutputConfig", + "documentation":"

Enables Systems Manager to send Run Command output to Amazon CloudWatch Logs.

" } } }, @@ -9768,6 +10163,21 @@ "type":"string", "max":24000 }, + "StartAssociationsOnceRequest":{ + "type":"structure", + "required":["AssociationIds"], + "members":{ + "AssociationIds":{ + "shape":"AssociationIdList", + "documentation":"

The association IDs that you want to execute immediately and only one time.

" + } + } + }, + "StartAssociationsOnceResult":{ + "type":"structure", + "members":{ + } + }, "StartAutomationExecutionRequest":{ "type":"structure", "required":["DocumentName"], @@ -10046,11 +10456,11 @@ "members":{ "Key":{ "shape":"TargetKey", - "documentation":"

User-defined criteria for sending commands that target instances that meet the criteria. Key can be tag:<Amazon EC2 tag> or InstanceIds. For more information about how to send commands that target instances using Key,Value parameters, see Executing a Command Using Systems Manager Run Command.

" + "documentation":"

User-defined criteria for sending commands that target instances that meet the criteria. Key can be tag:<Amazon EC2 tag> or InstanceIds. For more information about how to send commands that target instances using Key,Value parameters, see Targeting Multiple Instances in the AWS Systems Manager User Guide.

" }, "Values":{ "shape":"TargetValues", - "documentation":"

User-defined criteria that maps to Key. For example, if you specified tag:ServerRole, you could specify value:WebServer to execute a command on instances that include Amazon EC2 tags of ServerRole,WebServer. For more information about how to send commands that target instances using Key,Value parameters, see Executing a Command Using Systems Manager Run Command.

" + "documentation":"

User-defined criteria that maps to Key. For example, if you specified tag:ServerRole, you could specify value:WebServer to execute a command on instances that include Amazon EC2 tags of ServerRole,WebServer. For more information about how to send commands that target instances using Key,Value parameters, see Sending Commands to a Fleet in the AWS Systems Manager User Guide.

" } }, "documentation":"

An array of search criteria that targets instances using a Key,Value combination that you specify. Targets is required if you don't provide one or more instance IDs in the call.

" @@ -10721,5 +11131,5 @@ "pattern":"^[0-9]{1,6}(\\.[0-9]{1,6}){2,3}$" } }, - "documentation":"AWS Systems Manager

AWS Systems Manager is a collection of capabilities that helps you automate management tasks such as collecting system inventory, applying operating system (OS) patches, automating the creation of Amazon Machine Images (AMIs), and configuring operating systems (OSs) and applications at scale. Systems Manager lets you remotely and securely manage the configuration of your managed instances. A managed instance is any Amazon EC2 instance or on-premises machine in your hybrid environment that has been configured for Systems Manager.

This reference is intended to be used with the AWS Systems Manager User Guide.

To get started, verify prerequisites and configure managed instances. For more information, see Systems Manager Prerequisites.

For information about other API actions you can perform on Amazon EC2 instances, see the Amazon EC2 API Reference. For information about how to use a Query API, see Making API Requests.

" + "documentation":"AWS Systems Manager

AWS Systems Manager is a collection of capabilities that helps you automate management tasks such as collecting system inventory, applying operating system (OS) patches, automating the creation of Amazon Machine Images (AMIs), and configuring operating systems (OSs) and applications at scale. Systems Manager lets you remotely and securely manage the configuration of your managed instances. A managed instance is any Amazon EC2 instance or on-premises machine in your hybrid environment that has been configured for Systems Manager.

This reference is intended to be used with the AWS Systems Manager User Guide.

To get started, verify prerequisites and configure managed instances. For more information, see Systems Manager Prerequisites in the AWS Systems Manager User Guide.

For information about other API actions you can perform on Amazon EC2 instances, see the Amazon EC2 API Reference. For information about how to use a Query API, see Making API Requests.

" } diff --git a/botocore/data/storagegateway/2013-06-30/service-2.json b/botocore/data/storagegateway/2013-06-30/service-2.json index 187d1b0b..26a29a9a 100644 --- a/botocore/data/storagegateway/2013-06-30/service-2.json +++ b/botocore/data/storagegateway/2013-06-30/service-2.json @@ -136,7 +136,21 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Creates a file share on an existing file gateway. In Storage Gateway, a file share is a file system mount point backed by Amazon S3 cloud storage. Storage Gateway exposes file shares using a Network File System (NFS) interface. This operation is only supported in the file gateway type.

File gateway requires AWS Security Token Service (AWS STS) to be activated to enable you create a file share. Make sure AWS STS is activated in the region you are creating your file gateway in. If AWS STS is not activated in the region, activate it. For information about how to activate AWS STS, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

File gateway does not support creating hard or symbolic links on a file share.

" + "documentation":"

Creates a Network File System (NFS) file share on an existing file gateway. In Storage Gateway, a file share is a file system mount point backed by Amazon S3 cloud storage. Storage Gateway exposes file shares using a NFS interface. This operation is only supported in the file gateway type.

File gateway requires AWS Security Token Service (AWS STS) to be activated to enable you create a file share. Make sure AWS STS is activated in the region you are creating your file gateway in. If AWS STS is not activated in the region, activate it. For information about how to activate AWS STS, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

File gateway does not support creating hard or symbolic links on a file share.

" + }, + "CreateSMBFileShare":{ + "name":"CreateSMBFileShare", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSMBFileShareInput"}, + "output":{"shape":"CreateSMBFileShareOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Creates a Server Message Block (SMB) file share on an existing file gateway. In Storage Gateway, a file share is a file system mount point backed by Amazon S3 cloud storage. Storage Gateway expose file shares using a SMB interface. This operation is only supported in the file gateway type.

File gateway requires AWS Security Token Service (AWS STS) to be activated to enable you create a file share. Make sure AWS STS is activated in the region you are creating your file gateway in. If AWS STS is not activated in the region, activate it. For information about how to activate AWS STS, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

File gateway does not support creating hard or symbolic links on a file share.

" }, "CreateSnapshot":{ "name":"CreateSnapshot", @@ -418,7 +432,35 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Gets a description for one or more file shares from a file gateway. This operation is only supported in the file gateway type.

" + "documentation":"

Gets a description for one or more Network File System (NFS) file shares from a file gateway. This operation is only supported in the file gateway type.

" + }, + "DescribeSMBFileShares":{ + "name":"DescribeSMBFileShares", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSMBFileSharesInput"}, + "output":{"shape":"DescribeSMBFileSharesOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Gets a description for one or more Server Message Block (SMB) file shares from a file gateway. This operation is only supported in the file gateway type.

" + }, + "DescribeSMBSettings":{ + "name":"DescribeSMBSettings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSMBSettingsInput"}, + "output":{"shape":"DescribeSMBSettingsOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Gets a description of a Server Message Block (SMB) file share settings from a file gateway. This operation is only supported in the file gateway type.

" }, "DescribeSnapshotSchedule":{ "name":"DescribeSnapshotSchedule", @@ -546,6 +588,20 @@ ], "documentation":"

Disables a tape gateway when the gateway is no longer functioning. For example, if your gateway VM is damaged, you can disable the gateway so you can recover virtual tapes.

Use this operation for a tape gateway that is not reachable or not functioning. This operation is only supported in the tape gateway type.

Once a gateway is disabled it cannot be enabled.

" }, + "JoinDomain":{ + "name":"JoinDomain", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"JoinDomainInput"}, + "output":{"shape":"JoinDomainOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Adds a file gateway to an Active Directory domain. This operation is only supported in the file gateway type that supports the SMB file protocol.

" + }, "ListFileShares":{ "name":"ListFileShares", "http":{ @@ -756,6 +812,20 @@ ], "documentation":"

Sets the password for your VM local console. When you log in to the local console for the first time, you log in to the VM with the default credentials. We recommend that you set a new password. You don't need to know the default password to set a new password.

" }, + "SetSMBGuestPassword":{ + "name":"SetSMBGuestPassword", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetSMBGuestPasswordInput"}, + "output":{"shape":"SetSMBGuestPasswordOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Sets the password for the guest user “smbguest”. \"smbguest\" is the user when the Authentication method for the file share is “GuestAccess”.

" + }, "ShutdownGateway":{ "name":"ShutdownGateway", "http":{ @@ -866,7 +936,21 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Updates a file share. This operation is only supported in the file gateway type.

To leave a file share field unchanged, set the corresponding input field to null.

Updates the following file share setting:

  • Default storage class for your S3 bucket

  • Metadata defaults for your S3 bucket

  • Allowed NFS clients for your file share

  • Squash settings

  • Write status of your file share

To leave a file share field unchanged, set the corresponding input field to null. This operation is only supported in file gateways.

" + "documentation":"

Updates a Network File System (NFS) file share. This operation is only supported in the file gateway type.

To leave a file share field unchanged, set the corresponding input field to null.

Updates the following file share setting:

  • Default storage class for your S3 bucket

  • Metadata defaults for your S3 bucket

  • Allowed NFS clients for your file share

  • Squash settings

  • Write status of your file share

To leave a file share field unchanged, set the corresponding input field to null. This operation is only supported in file gateways.

" + }, + "UpdateSMBFileShare":{ + "name":"UpdateSMBFileShare", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateSMBFileShareInput"}, + "output":{"shape":"UpdateSMBFileShareOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Updates a Server Message Block (SMB) file share. This operation is only supported in the file gateway type.

To leave a file share field unchanged, set the corresponding input field to null. This operation is only supported in the file gateway type.

File gateway requires AWS Security Token Service (AWS STS) to be activated to enable you create a file share. Make sure AWS STS is activated in the region you are creating your file gateway in. If AWS STS is not activated in the region, activate it. For information about how to activate AWS STS, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

File gateway does not support creating hard or symbolic links on a file share.

" }, "UpdateSnapshotSchedule":{ "name":"UpdateSnapshotSchedule", @@ -1034,6 +1118,12 @@ }, "documentation":"

A JSON object containing the of the gateway for which working storage was configured.

" }, + "Authentication":{ + "type":"string", + "documentation":"

The authentication method of the file share. Valid values: \"ActiveDirectory\" or \"GuestAccess\". The default is \"ActiveDirectory\".

", + "max":15, + "min":5 + }, "BandwidthDownloadRateLimit":{ "type":"long", "min":102400 @@ -1090,7 +1180,8 @@ "VolumeUsedInBytes":{ "shape":"VolumeUsedInBytes", "documentation":"

The size of the data stored on the volume in bytes.

This value is not available for volumes created prior to May 13, 2015, until you store data on the volume.

" - } + }, + "KMSKey":{"shape":"KMSKey"} }, "documentation":"

Describes an iSCSI cached volume.

" }, @@ -1195,22 +1286,51 @@ ], "members":{ "GatewayARN":{"shape":"GatewayARN"}, - "VolumeSizeInBytes":{"shape":"long"}, - "SnapshotId":{"shape":"SnapshotId"}, - "TargetName":{"shape":"TargetName"}, + "VolumeSizeInBytes":{ + "shape":"long", + "documentation":"

The size of the volume in bytes.

" + }, + "SnapshotId":{ + "shape":"SnapshotId", + "documentation":"

The snapshot ID (e.g. \"snap-1122aabb\") of the snapshot to restore as the new cached volume. Specify this field if you want to create the iSCSI storage volume from a snapshot otherwise do not include this field. To list snapshots for your account use DescribeSnapshots in the Amazon Elastic Compute Cloud API Reference.

" + }, + "TargetName":{ + "shape":"TargetName", + "documentation":"

The name of the iSCSI target used by initiators to connect to the target and as a suffix for the target ARN. For example, specifying TargetName as myvolume results in the target ARN of arn:aws:storagegateway:us-east-2:111122223333:gateway/sgw-12A3456B/target/iqn.1997-05.com.amazon:myvolume. The target name must be unique across all volumes of a gateway.

" + }, "SourceVolumeARN":{ "shape":"VolumeARN", "documentation":"

The ARN for an existing volume. Specifying this ARN makes the new volume into an exact copy of the specified existing volume's latest recovery point. The VolumeSizeInBytes value for this new volume must be equal to or larger than the size of the existing volume, in bytes.

" }, - "NetworkInterfaceId":{"shape":"NetworkInterfaceId"}, - "ClientToken":{"shape":"ClientToken"} + "NetworkInterfaceId":{ + "shape":"NetworkInterfaceId", + "documentation":"

The network interface of the gateway on which to expose the iSCSI target. Only IPv4 addresses are accepted. Use DescribeGatewayInformation to get a list of the network interfaces available on a gateway.

Valid Values: A valid IP address.

" + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

A unique identifier that you use to retry a request. If you retry a request, use the same ClientToken you specified in the initial request.

" + }, + "KMSEncrypted":{ + "shape":"Boolean", + "documentation":"

True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

" + }, + "KMSKey":{ + "shape":"KMSKey", + "documentation":"

The Amazon Resource Name (ARN) of the KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

" + } } }, "CreateCachediSCSIVolumeOutput":{ "type":"structure", "members":{ - "VolumeARN":{"shape":"VolumeARN"}, - "TargetARN":{"shape":"TargetARN"} + "VolumeARN":{ + "shape":"VolumeARN", + "documentation":"

The Amazon Resource Name (ARN) of the configured volume.

" + }, + "TargetARN":{ + "shape":"TargetARN", + "documentation":"

he Amazon Resource Name (ARN) of the volume target that includes the iSCSI name that initiators can use to connect to the target.

" + } } }, "CreateNFSFileShareInput":{ @@ -1240,7 +1360,7 @@ }, "KMSKey":{ "shape":"KMSKey", - "documentation":"

The KMS key used for Amazon S3 server side encryption. This value can only be set when KmsEncrypted is true. Optional.

" + "documentation":"

The Amazon Resource Name (ARN) KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

" }, "Role":{ "shape":"Role", @@ -1252,7 +1372,7 @@ }, "DefaultStorageClass":{ "shape":"StorageClass", - "documentation":"

The default storage class for objects put into an Amazon S3 bucket by file gateway. Possible values are S3_STANDARD or S3_STANDARD_IA. If this field is not populated, the default value S3_STANDARD is used. Optional.

" + "documentation":"

The default storage class for objects put into an Amazon S3 bucket by file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA or S3_ONEZONE_IA. If this field is not populated, the default value S3_STANDARD is used. Optional.

" }, "ObjectACL":{ "shape":"ObjectACL", @@ -1291,6 +1411,84 @@ }, "documentation":"

CreateNFSFileShareOutput

" }, + "CreateSMBFileShareInput":{ + "type":"structure", + "required":[ + "ClientToken", + "GatewayARN", + "Role", + "LocationARN" + ], + "members":{ + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

A unique string value that you supply that is used by file gateway to ensure idempotent file share creation.

" + }, + "GatewayARN":{ + "shape":"GatewayARN", + "documentation":"

The Amazon Resource Name (ARN) of the file gateway on which you want to create a file share.

" + }, + "KMSEncrypted":{ + "shape":"Boolean", + "documentation":"

True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

" + }, + "KMSKey":{ + "shape":"KMSKey", + "documentation":"

The Amazon Resource Name (ARN) KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

" + }, + "Role":{ + "shape":"Role", + "documentation":"

The ARN of the AWS Identity and Access Management (IAM) role that a file gateway assumes when it accesses the underlying storage.

" + }, + "LocationARN":{ + "shape":"LocationARN", + "documentation":"

The ARN of the backed storage used for storing file data.

" + }, + "DefaultStorageClass":{ + "shape":"StorageClass", + "documentation":"

The default storage class for objects put into an Amazon S3 bucket by file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA or S3_ONEZONE_IA. If this field is not populated, the default value S3_STANDARD is used. Optional.

" + }, + "ObjectACL":{ + "shape":"ObjectACL", + "documentation":"

Sets the access control list permission for objects in the Amazon S3 bucket that a file gateway puts objects into. The default value is \"private\".

" + }, + "ReadOnly":{ + "shape":"Boolean", + "documentation":"

Sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.

" + }, + "GuessMIMETypeEnabled":{ + "shape":"Boolean", + "documentation":"

Enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.

" + }, + "RequesterPays":{ + "shape":"Boolean", + "documentation":"

Sets who pays the cost of the request and the data download from the Amazon S3 bucket. Set this value to true if you want the requester to pay instead of the bucket owner, and otherwise to false.

" + }, + "ValidUserList":{ + "shape":"FileShareUserList", + "documentation":"

A list of users in the Active Directory that are allowed to access the file share. Can only be set if Authentication is set to \"ActiveDirectory\".

" + }, + "InvalidUserList":{ + "shape":"FileShareUserList", + "documentation":"

A list of users in the Active Directory that are not allowed to access the file share. Can only be set if Authentication is set to \"ActiveDirectory\".

" + }, + "Authentication":{ + "shape":"Authentication", + "documentation":"

The authentication method that users use to access the file share.

Valid values: \"ActiveDirectory\" or \"GuestAccess\". The default is \"ActiveDirectory\".

" + } + }, + "documentation":"

CreateSMBFileShareInput

" + }, + "CreateSMBFileShareOutput":{ + "type":"structure", + "members":{ + "FileShareARN":{ + "shape":"FileShareARN", + "documentation":"

The Amazon Resource Name (ARN) of the newly created file share.

" + } + }, + "documentation":"

CreateSMBFileShareOutput

" + }, "CreateSnapshotFromVolumeRecoveryPointInput":{ "type":"structure", "required":[ @@ -1413,6 +1611,14 @@ "TapeBarcode":{ "shape":"TapeBarcode", "documentation":"

The barcode that you want to assign to the tape.

Barcodes cannot be reused. This includes barcodes used for tapes that have been deleted.

" + }, + "KMSEncrypted":{ + "shape":"Boolean", + "documentation":"

True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

" + }, + "KMSKey":{ + "shape":"KMSKey", + "documentation":"

The Amazon Resource Name (ARN) of the KMS Key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

" } }, "documentation":"

CreateTapeWithBarcodeInput

" @@ -1456,6 +1662,14 @@ "TapeBarcodePrefix":{ "shape":"TapeBarcodePrefix", "documentation":"

A prefix that you append to the barcode of the virtual tape you are creating. This prefix makes the barcode unique.

The prefix must be 1 to 4 characters in length and must be one of the uppercase letters from A to Z.

" + }, + "KMSEncrypted":{ + "shape":"Boolean", + "documentation":"

True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

" + }, + "KMSKey":{ + "shape":"KMSKey", + "documentation":"

The Amazon Resource Name (ARN) of the KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

" } }, "documentation":"

CreateTapesInput

" @@ -1829,6 +2043,48 @@ }, "documentation":"

DescribeNFSFileSharesOutput

" }, + "DescribeSMBFileSharesInput":{ + "type":"structure", + "required":["FileShareARNList"], + "members":{ + "FileShareARNList":{ + "shape":"FileShareARNList", + "documentation":"

An array containing the Amazon Resource Name (ARN) of each file share to be described.

" + } + }, + "documentation":"

DescribeSMBFileSharesInput

" + }, + "DescribeSMBFileSharesOutput":{ + "type":"structure", + "members":{ + "SMBFileShareInfoList":{ + "shape":"SMBFileShareInfoList", + "documentation":"

An array containing a description for each requested file share.

" + } + }, + "documentation":"

DescribeSMBFileSharesOutput

" + }, + "DescribeSMBSettingsInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "DescribeSMBSettingsOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "DomainName":{ + "shape":"DomainName", + "documentation":"

The name of the domain that the gateway is joined to.

" + }, + "SMBGuestPasswordSet":{ + "shape":"Boolean", + "documentation":"

This value is true if a password for the guest user “smbguest” is set, and otherwise false.

" + } + } + }, "DescribeSnapshotScheduleInput":{ "type":"structure", "required":["VolumeARN"], @@ -2122,6 +2378,19 @@ "type":"list", "member":{"shape":"Disk"} }, + "DomainName":{ + "type":"string", + "pattern":"^([a-z0-9]+(-[a-z0-9]+)*\\.)+[a-z]{2,}$" + }, + "DomainUserName":{ + "type":"string", + "pattern":"^\\w[\\w\\.\\- ]*$" + }, + "DomainUserPassword":{ + "type":"string", + "pattern":"^[ -~]+$", + "sensitive":true + }, "DoubleObject":{"type":"double"}, "ErrorCode":{ "type":"string", @@ -2217,6 +2486,7 @@ "FileShareInfo":{ "type":"structure", "members":{ + "FileShareType":{"shape":"FileShareType"}, "FileShareARN":{"shape":"FileShareARN"}, "FileShareId":{"shape":"FileShareId"}, "FileShareStatus":{"shape":"FileShareStatus"}, @@ -2230,10 +2500,29 @@ }, "FileShareStatus":{ "type":"string", - "documentation":"

The status of the file share. Possible values are CREATING, UPDATING, AVAILABLE and DELETING.

", + "documentation":"

The status of the file share. Possible values are CREATING, UPDATING, AVAILABLE and DELETING.

", "max":50, "min":3 }, + "FileShareType":{ + "type":"string", + "documentation":"

The type of the file share.

", + "enum":[ + "NFS", + "SMB" + ] + }, + "FileShareUser":{ + "type":"string", + "max":64, + "min":1 + }, + "FileShareUserList":{ + "type":"list", + "member":{"shape":"FileShareUser"}, + "max":100, + "min":0 + }, "GatewayARN":{ "type":"string", "documentation":"

The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.

", @@ -2360,9 +2649,47 @@ "min":1, "pattern":"[0-9a-z:.-]+" }, + "JoinDomainInput":{ + "type":"structure", + "required":[ + "GatewayARN", + "DomainName", + "UserName", + "Password" + ], + "members":{ + "GatewayARN":{ + "shape":"GatewayARN", + "documentation":"

The unique Amazon Resource Name of the file gateway you want to add to the Active Directory domain.

" + }, + "DomainName":{ + "shape":"DomainName", + "documentation":"

The name of the domain that you want the gateway to join.

" + }, + "UserName":{ + "shape":"DomainUserName", + "documentation":"

Sets the user name of user who has permission to add the gateway to the Active Directory domain.

" + }, + "Password":{ + "shape":"DomainUserPassword", + "documentation":"

Sets the password of the user who has permission to add the gateway to the Active Directory domain.

" + } + }, + "documentation":"

JoinDomainInput

" + }, + "JoinDomainOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{ + "shape":"GatewayARN", + "documentation":"

The unique Amazon Resource Name of the gateway that joined the domain.

" + } + }, + "documentation":"

JoinDomainOutput

" + }, "KMSKey":{ "type":"string", - "documentation":"

The ARN of the KMS key used for Amazon S3 server side encryption.

", + "documentation":"

The Amazon Resource Name (ARN) of the KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

", "max":2048, "min":20 }, @@ -2612,7 +2939,7 @@ "documentation":"

The default owner ID for files in the file share (unless the files have another owner ID specified). The default value is nfsnobody.

" } }, - "documentation":"

Describes file share default values. Files and folders stored as Amazon S3 objects in S3 buckets don't, by default, have Unix file permissions assigned to them. Upon discovery in an S3 bucket by Storage Gateway, the S3 objects that represent files and folders are assigned these default Unix permissions. This operation is only supported in the file gateway type.

" + "documentation":"

Describes Network File System (NFS) file share default values. Files and folders stored as Amazon S3 objects in S3 buckets don't, by default, have Unix file permissions assigned to them. Upon discovery in an S3 bucket by Storage Gateway, the S3 objects that represent files and folders are assigned these default Unix permissions. This operation is only supported in the file gateway type.

" }, "NFSFileShareInfo":{ "type":"structure", @@ -2632,7 +2959,7 @@ "LocationARN":{"shape":"LocationARN"}, "DefaultStorageClass":{ "shape":"StorageClass", - "documentation":"

The default storage class for objects put into an Amazon S3 bucket by file gateway. Possible values are S3_STANDARD or S3_STANDARD_IA. If this field is not populated, the default value S3_STANDARD is used. Optional.

" + "documentation":"

The default storage class for objects put into an Amazon S3 bucket by file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA or S3_ONEZONE_IA. If this field is not populated, the default value S3_STANDARD is used. Optional.

" }, "ObjectACL":{"shape":"ObjectACL"}, "ClientList":{"shape":"FileShareClientList"}, @@ -2868,6 +3195,64 @@ "max":2048, "min":20 }, + "SMBFileShareInfo":{ + "type":"structure", + "members":{ + "FileShareARN":{"shape":"FileShareARN"}, + "FileShareId":{"shape":"FileShareId"}, + "FileShareStatus":{"shape":"FileShareStatus"}, + "GatewayARN":{"shape":"GatewayARN"}, + "KMSEncrypted":{ + "shape":"boolean", + "documentation":"

True to use Amazon S3 server side encryption with your own KMS key, or false to use a key managed by Amazon S3. Optional.

" + }, + "KMSKey":{"shape":"KMSKey"}, + "Path":{ + "shape":"Path", + "documentation":"

The file share path used by the SMB client to identify the mount point.

" + }, + "Role":{"shape":"Role"}, + "LocationARN":{"shape":"LocationARN"}, + "DefaultStorageClass":{ + "shape":"StorageClass", + "documentation":"

The default storage class for objects put into an Amazon S3 bucket by file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA or S3_ONEZONE_IA. If this field is not populated, the default value S3_STANDARD is used. Optional.

" + }, + "ObjectACL":{"shape":"ObjectACL"}, + "ReadOnly":{ + "shape":"Boolean", + "documentation":"

Sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.

" + }, + "GuessMIMETypeEnabled":{ + "shape":"Boolean", + "documentation":"

Enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.

" + }, + "RequesterPays":{ + "shape":"Boolean", + "documentation":"

Sets who pays the cost of the request and the data download from the Amazon S3 bucket. Set this value to true if you want the requester to pay instead of the bucket owner, and otherwise to false.

" + }, + "ValidUserList":{ + "shape":"FileShareUserList", + "documentation":"

A list of users in the Active Directory that are allowed to access the file share. Can only be set if Authentication is set to \"ActiveDirectory\".

" + }, + "InvalidUserList":{ + "shape":"FileShareUserList", + "documentation":"

A list of users in the Active Directory that are not allowed to access the file share. Can only be set if Authentication is set to \"ActiveDirectory\".

" + }, + "Authentication":{"shape":"Authentication"} + }, + "documentation":"

The Windows file permissions and ownership information assigned, by default, to native S3 objects when file gateway discovers them in S3 buckets. This operation is only supported in file gateways.

" + }, + "SMBFileShareInfoList":{ + "type":"list", + "member":{"shape":"SMBFileShareInfo"} + }, + "SMBGuestPassword":{ + "type":"string", + "max":512, + "min":6, + "pattern":"^[ -~]+$", + "sensitive":true + }, "ServiceUnavailableError":{ "type":"structure", "members":{ @@ -2904,6 +3289,30 @@ "GatewayARN":{"shape":"GatewayARN"} } }, + "SetSMBGuestPasswordInput":{ + "type":"structure", + "required":[ + "GatewayARN", + "Password" + ], + "members":{ + "GatewayARN":{ + "shape":"GatewayARN", + "documentation":"

The Amazon Resource Name (ARN) of the file gateway the SMB file share is associated with.

" + }, + "Password":{ + "shape":"SMBGuestPassword", + "documentation":"

The password you want to set for your SMB Server.

" + } + }, + "documentation":"

SetSMBGuestPasswordInput

" + }, + "SetSMBGuestPasswordOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, "ShutdownGatewayInput":{ "type":"structure", "required":["GatewayARN"], @@ -3042,7 +3451,7 @@ "type":"string", "max":128, "min":1, - "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-%@]*)$" + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" }, "TagKeys":{ "type":"list", @@ -3090,7 +3499,8 @@ "TapeUsedInBytes":{ "shape":"TapeUsage", "documentation":"

The size, in bytes, of data stored on the virtual tape.

This value is not available for tapes created prior to May 13, 2015.

" - } + }, + "KMSKey":{"shape":"KMSKey"} }, "documentation":"

Describes a virtual tape object.

" }, @@ -3098,7 +3508,7 @@ "type":"string", "max":500, "min":50, - "pattern":"^arn:(aws|aws-cn):storagegateway:[a-z\\-0-9]+:[0-9]+:tape\\/[0-9A-Z]{7,16}$" + "pattern":"^arn:(aws|aws-cn|aws-us-gov):storagegateway:[a-z\\-0-9]+:[0-9]+:tape\\/[0-9A-Z]{7,16}$" }, "TapeARNs":{ "type":"list", @@ -3116,14 +3526,17 @@ "shape":"TapeBarcode", "documentation":"

The barcode that identifies the archived virtual tape.

" }, - "TapeCreatedDate":{"shape":"Time"}, + "TapeCreatedDate":{ + "shape":"Time", + "documentation":"

The date the virtual tape was created.

" + }, "TapeSizeInBytes":{ "shape":"TapeSize", "documentation":"

The size, in bytes, of the archived virtual tape.

" }, "CompletionTime":{ "shape":"Time", - "documentation":"

The time that the archiving of the virtual tape was completed.

The string format of the completion time is in the ISO8601 extended YYYY-MM-DD'T'HH:MM:SS'Z' format.

" + "documentation":"

The time that the archiving of the virtual tape was completed.

The default time stamp format is in the ISO8601 extended YYYY-MM-DD'T'HH:MM:SS'Z' format.

" }, "RetrievedTo":{ "shape":"GatewayARN", @@ -3136,7 +3549,8 @@ "TapeUsedInBytes":{ "shape":"TapeUsage", "documentation":"

The size, in bytes, of data stored on the virtual tape.

This value is not available for tapes created prior to May 13, 2015.

" - } + }, + "KMSKey":{"shape":"KMSKey"} }, "documentation":"

Represents a virtual tape that is archived in the virtual tape shelf (VTS).

" }, @@ -3202,7 +3616,7 @@ }, "TapeRecoveryPointTime":{ "shape":"Time", - "documentation":"

The time when the point-in-time view of the virtual tape was replicated for later recovery.

The string format of the tape recovery point time is in the ISO8601 extended YYYY-MM-DD'T'HH:MM:SS'Z' format.

" + "documentation":"

The time when the point-in-time view of the virtual tape was replicated for later recovery.

The default time stamp format of the tape recovery point time is in the ISO8601 extended YYYY-MM-DD'T'HH:MM:SS'Z' format.

" }, "TapeSizeInBytes":{ "shape":"TapeSize", @@ -3378,7 +3792,7 @@ }, "KMSKey":{ "shape":"KMSKey", - "documentation":"

The KMS key used for Amazon S3 server side encryption. This value can only be set when KmsEncrypted is true. Optional.

" + "documentation":"

The Amazon Resource Name (ARN) of the KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

" }, "NFSFileShareDefaults":{ "shape":"NFSFileShareDefaults", @@ -3386,7 +3800,7 @@ }, "DefaultStorageClass":{ "shape":"StorageClass", - "documentation":"

The default storage class for objects put into an Amazon S3 bucket by a file gateway. Possible values are S3_STANDARD or S3_STANDARD_IA. If this field is not populated, the default value S3_STANDARD is used. Optional.

" + "documentation":"

The default storage class for objects put into an Amazon S3 bucket by a file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA or S3_ONEZONE_IA. If this field is not populated, the default value S3_STANDARD is used. Optional.

" }, "ObjectACL":{ "shape":"ObjectACL", @@ -3425,6 +3839,63 @@ }, "documentation":"

UpdateNFSFileShareOutput

" }, + "UpdateSMBFileShareInput":{ + "type":"structure", + "required":["FileShareARN"], + "members":{ + "FileShareARN":{ + "shape":"FileShareARN", + "documentation":"

The Amazon Resource Name (ARN) of the SMB file share you want to update.

" + }, + "KMSEncrypted":{ + "shape":"Boolean", + "documentation":"

True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

" + }, + "KMSKey":{ + "shape":"KMSKey", + "documentation":"

The Amazon Resource Name (ARN) KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

" + }, + "DefaultStorageClass":{ + "shape":"StorageClass", + "documentation":"

The default storage class for objects put into an Amazon S3 bucket by file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA or S3_ONEZONE_IA. If this field is not populated, the default value S3_STANDARD is used. Optional.

" + }, + "ObjectACL":{ + "shape":"ObjectACL", + "documentation":"

Sets the access control list permission for objects in the Amazon S3 bucket that a file gateway puts objects into. The default value is \"private\".

" + }, + "ReadOnly":{ + "shape":"Boolean", + "documentation":"

Sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.

" + }, + "GuessMIMETypeEnabled":{ + "shape":"Boolean", + "documentation":"

Enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.

" + }, + "RequesterPays":{ + "shape":"Boolean", + "documentation":"

Sets who pays the cost of the request and the data download from the Amazon S3 bucket. Set this value to true if you want the requester to pay instead of the bucket owner, and otherwise to false.

" + }, + "ValidUserList":{ + "shape":"FileShareUserList", + "documentation":"

A list of users in the Active Directory that are allowed to access the file share. Can only be set if Authentication is set to \"ActiveDirectory\".

" + }, + "InvalidUserList":{ + "shape":"FileShareUserList", + "documentation":"

A list of users in the Active Directory that are not allowed to access the file share. Can only be set if Authentication is set to \"ActiveDirectory\".

" + } + }, + "documentation":"

UpdateSMBFileShareInput

" + }, + "UpdateSMBFileShareOutput":{ + "type":"structure", + "members":{ + "FileShareARN":{ + "shape":"FileShareARN", + "documentation":"

The Amazon Resource Name (ARN) of the updated SMB file share.

" + } + }, + "documentation":"

UpdateSMBFileShareOutput

" + }, "UpdateSnapshotScheduleInput":{ "type":"structure", "required":[ @@ -3625,5 +4096,5 @@ "long":{"type":"long"}, "string":{"type":"string"} }, - "documentation":"AWS Storage Gateway Service

AWS Storage Gateway is the service that connects an on-premises software appliance with cloud-based storage to provide seamless and secure integration between an organization's on-premises IT environment and AWS's storage infrastructure. The service enables you to securely upload data to the AWS cloud for cost effective backup and rapid disaster recovery.

Use the following links to get started using the AWS Storage Gateway Service API Reference:

AWS Storage Gateway resource IDs are in uppercase. When you use these resource IDs with the Amazon EC2 API, EC2 expects resource IDs in lowercase. You must change your resource ID to lowercase to use it with the EC2 API. For example, in Storage Gateway the ID for a volume might be vol-AA22BB012345DAF670. When you use this ID with the EC2 API, you must change it to vol-aa22bb012345daf670. Otherwise, the EC2 API might not behave as expected.

IDs for Storage Gateway volumes and Amazon EBS snapshots created from gateway volumes are changing to a longer format. Starting in December 2016, all new volumes and snapshots will be created with a 17-character string. Starting in April 2016, you will be able to use these longer IDs so you can test your systems with the new format. For more information, see Longer EC2 and EBS Resource IDs.

For example, a volume Amazon Resource Name (ARN) with the longer volume ID format looks like the following:

arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG.

A snapshot ID with the longer ID format looks like the following: snap-78e226633445566ee.

For more information, see Announcement: Heads-up – Longer AWS Storage Gateway volume and snapshot IDs coming in 2016.

" + "documentation":"AWS Storage Gateway Service

AWS Storage Gateway is the service that connects an on-premises software appliance with cloud-based storage to provide seamless and secure integration between an organization's on-premises IT environment and AWS's storage infrastructure. The service enables you to securely upload data to the AWS cloud for cost effective backup and rapid disaster recovery.

Use the following links to get started using the AWS Storage Gateway Service API Reference:

AWS Storage Gateway resource IDs are in uppercase. When you use these resource IDs with the Amazon EC2 API, EC2 expects resource IDs in lowercase. You must change your resource ID to lowercase to use it with the EC2 API. For example, in Storage Gateway the ID for a volume might be vol-AA22BB012345DAF670. When you use this ID with the EC2 API, you must change it to vol-aa22bb012345daf670. Otherwise, the EC2 API might not behave as expected.

IDs for Storage Gateway volumes and Amazon EBS snapshots created from gateway volumes are changing to a longer format. Starting in December 2016, all new volumes and snapshots will be created with a 17-character string. Starting in April 2016, you will be able to use these longer IDs so you can test your systems with the new format. For more information, see Longer EC2 and EBS Resource IDs.

For example, a volume Amazon Resource Name (ARN) with the longer volume ID format looks like the following:

arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG.

A snapshot ID with the longer ID format looks like the following: snap-78e226633445566ee.

For more information, see Announcement: Heads-up – Longer AWS Storage Gateway volume and snapshot IDs coming in 2016.

" } diff --git a/botocore/data/support/2013-04-15/service-2.json b/botocore/data/support/2013-04-15/service-2.json index 1cf125a9..e86bf787 100644 --- a/botocore/data/support/2013-04-15/service-2.json +++ b/botocore/data/support/2013-04-15/service-2.json @@ -7,6 +7,7 @@ "jsonVersion":"1.1", "protocol":"json", "serviceFullName":"AWS Support", + "serviceId":"Support", "signatureVersion":"v4", "targetPrefix":"AWSSupport_20130415" }, diff --git a/botocore/data/swf/2012-01-25/service-2.json b/botocore/data/swf/2012-01-25/service-2.json index b9b2aaea..c7238593 100644 --- a/botocore/data/swf/2012-01-25/service-2.json +++ b/botocore/data/swf/2012-01-25/service-2.json @@ -7,6 +7,7 @@ "protocol":"json", "serviceAbbreviation":"Amazon SWF", "serviceFullName":"Amazon Simple Workflow Service", + "serviceId":"SWF", "signatureVersion":"v4", "targetPrefix":"SimpleWorkflowService", "timestampFormat":"unixTimestamp", diff --git a/botocore/data/transcribe/2017-10-26/service-2.json b/botocore/data/transcribe/2017-10-26/service-2.json index e909759a..ce08faa6 100644 --- a/botocore/data/transcribe/2017-10-26/service-2.json +++ b/botocore/data/transcribe/2017-10-26/service-2.json @@ -6,6 +6,7 @@ "jsonVersion":"1.1", "protocol":"json", "serviceFullName":"Amazon Transcribe Service", + "serviceId":"Transcribe", "signatureVersion":"v4", "signingName":"transcribe", "targetPrefix":"Transcribe", @@ -38,6 +39,7 @@ "errors":[ {"shape":"NotFoundException"}, {"shape":"LimitExceededException"}, + {"shape":"BadRequestException"}, {"shape":"InternalFailureException"} ], "documentation":"

Deletes a vocabulary from Amazon Transcribe.

" @@ -132,9 +134,10 @@ {"shape":"BadRequestException"}, {"shape":"LimitExceededException"}, {"shape":"InternalFailureException"}, - {"shape":"NotFoundException"} + {"shape":"NotFoundException"}, + {"shape":"ConflictException"} ], - "documentation":"

Updates an existing vocabulary with new values.

" + "documentation":"

Updates an existing vocabulary with new values. The UpdateVocabulary operation overwrites all of the existing information with the values that you provide in the request.

" } }, "shapes":{ @@ -143,7 +146,7 @@ "members":{ "Message":{"shape":"FailureReason"} }, - "documentation":"

Your request didn't pass one or more validation tests. For example, a name already exists when createing a resource or a name may not exist when getting a transcription job or custom vocabulary. See the exception Message field for more information.

", + "documentation":"

Your request didn't pass one or more validation tests. For example, a name already exists when creating a resource or a name may not exist when getting a transcription job or custom vocabulary. See the exception Message field for more information.

", "exception":true }, "Boolean":{"type":"boolean"}, @@ -152,7 +155,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The JobName field is a duplicate of a previously entered job name. Resend your request with a different name.

", + "documentation":"

When you are using the StartTranscriptionJob operation, the JobName field is a duplicate of a previously entered job name. Resend your request with a different name.

When you are using the UpdateVocabulary operation, there are two jobs running at the same time. Resend the second request later.

", "exception":true }, "CreateVocabularyRequest":{ @@ -415,9 +418,20 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

We can't find the requested transcription job or custom vocabulary. Check the name and try your request again.

", + "documentation":"

We can't find the requested resource. Check the name and try your request again.

", "exception":true }, + "OutputBucketName":{ + "type":"string", + "pattern":"[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9]" + }, + "OutputLocationType":{ + "type":"string", + "enum":[ + "CUSTOMER_BUCKET", + "SERVICE_BUCKET" + ] + }, "Phrase":{ "type":"string", "max":256, @@ -456,7 +470,7 @@ "members":{ "TranscriptionJobName":{ "shape":"TranscriptionJobName", - "documentation":"

The name of the job. The name must be unique within an AWS account.

" + "documentation":"

The name of the job. You can't use the strings \".\" or \"..\" in the job name. The name must be unique within an AWS account.

" }, "LanguageCode":{ "shape":"LanguageCode", @@ -474,6 +488,10 @@ "shape":"Media", "documentation":"

An object that describes the input media for a transcription job.

" }, + "OutputBucketName":{ + "shape":"OutputBucketName", + "documentation":"

The location where the transcription is stored.

If you set the OutputBucketName, Amazon Transcribe puts the transcription in the specified S3 bucket. When you call the GetTranscriptionJob operation, the operation returns this location in the TranscriptFileUri field. The S3 bucket must have permissions that allow Amazon Transcribe to put files in the bucket. For more information, see Permissions Required for IAM User Roles.

If you don't set the OutputBucketName, Amazon Transcribe generates a pre-signed URL, a shareable URL that provides secure access to your transcription, and returns it in the TranscriptFileUri field. Use this URL to download the transcription.

" + }, "Settings":{ "shape":"Settings", "documentation":"

A Settings object that provides optional settings for a transcription job.

" @@ -495,17 +513,17 @@ "members":{ "TranscriptFileUri":{ "shape":"Uri", - "documentation":"

The S3 location where the transcription result is stored. Use this URI to access the results of the transcription job.

" + "documentation":"

The location where the transcription is stored.

Use this URI to access the transcription. If you specified an S3 bucket in the OutputBucketName field when you created the job, this is the URI of that bucket. If you chose to store the transcription in Amazon Transcribe, this is a shareable URL that provides secure access to that location.

" } }, - "documentation":"

Describes the output of a transcription job.

" + "documentation":"

Identifies the location of a transcription.

" }, "TranscriptionJob":{ "type":"structure", "members":{ "TranscriptionJobName":{ "shape":"TranscriptionJobName", - "documentation":"

A name to identify the transcription job.

" + "documentation":"

The name of the transcription job.

" }, "TranscriptionJobStatus":{ "shape":"TranscriptionJobStatus", @@ -525,7 +543,7 @@ }, "Media":{ "shape":"Media", - "documentation":"

An object that describes the input media for a transcription job.

" + "documentation":"

An object that describes the input media for the transcription job.

" }, "Transcript":{ "shape":"Transcript", @@ -533,11 +551,11 @@ }, "CreationTime":{ "shape":"DateTime", - "documentation":"

Timestamp of the date and time that the job was created.

" + "documentation":"

A timestamp that shows when the job was created.

" }, "CompletionTime":{ "shape":"DateTime", - "documentation":"

Timestamp of the date and time that the job completed.

" + "documentation":"

A timestamp that shows when the job was completed.

" }, "FailureReason":{ "shape":"FailureReason", @@ -545,7 +563,7 @@ }, "Settings":{ "shape":"Settings", - "documentation":"

Optional settings for the transcription job.

" + "documentation":"

Optional settings for the transcription job. Use these settings to turn on speaker recognition, to set the maximum number of speakers that should be identified and to specify a custom vocabulary to use when processing the transcription job.

" } }, "documentation":"

Describes an asynchronous transcription job that was created with the StartTranscriptionJob operation.

" @@ -573,15 +591,15 @@ "members":{ "TranscriptionJobName":{ "shape":"TranscriptionJobName", - "documentation":"

The name assigned to the transcription job when it was created.

" + "documentation":"

The name of the transcription job.

" }, "CreationTime":{ "shape":"DateTime", - "documentation":"

Timestamp of the date and time that the job was created.

" + "documentation":"

A timestamp that shows when the job was created.

" }, "CompletionTime":{ "shape":"DateTime", - "documentation":"

Timestamp of the date and time that the job completed.

" + "documentation":"

A timestamp that shows when the job was completed.

" }, "LanguageCode":{ "shape":"LanguageCode", @@ -593,7 +611,11 @@ }, "FailureReason":{ "shape":"FailureReason", - "documentation":"

If the TranscriptionJobStatus field is FAILED, this field contains a description of the error.

" + "documentation":"

If the TranscriptionJobStatus field is FAILED, a description of the error.

" + }, + "OutputLocationType":{ + "shape":"OutputLocationType", + "documentation":"

Indicates the location of the output of the transcription job.

If the value is CUSTOMER_BUCKET then the location is the S3 bucket specified in the outputBucketName field when the transcription job was started with the StartTranscriptionJob operation.

If the value is SERVICE_BUCKET then the output is stored by Amazon Transcribe and can be retrieved using the URI in the GetTranscriptionJob response's TranscriptFileUri field.

" } }, "documentation":"

Provides a summary of information about a transcription job.

" diff --git a/botocore/data/workdocs/2016-05-01/service-2.json b/botocore/data/workdocs/2016-05-01/service-2.json index 1775b8d7..be8ff5d1 100644 --- a/botocore/data/workdocs/2016-05-01/service-2.json +++ b/botocore/data/workdocs/2016-05-01/service-2.json @@ -6,6 +6,7 @@ "jsonVersion":"1.1", "protocol":"rest-json", "serviceFullName":"Amazon WorkDocs", + "serviceId":"WorkDocs", "signatureVersion":"v4", "uid":"workdocs-2016-05-01" }, diff --git a/botocore/data/workmail/2017-10-01/service-2.json b/botocore/data/workmail/2017-10-01/service-2.json index 934f650d..d0ca5c1e 100644 --- a/botocore/data/workmail/2017-10-01/service-2.json +++ b/botocore/data/workmail/2017-10-01/service-2.json @@ -6,6 +6,7 @@ "jsonVersion":"1.1", "protocol":"json", "serviceFullName":"Amazon WorkMail", + "serviceId":"WorkMail", "signatureVersion":"v4", "targetPrefix":"WorkMailService", "uid":"workmail-2017-10-01" diff --git a/botocore/data/xray/2016-04-12/service-2.json b/botocore/data/xray/2016-04-12/service-2.json index 9a83a978..292a35d8 100644 --- a/botocore/data/xray/2016-04-12/service-2.json +++ b/botocore/data/xray/2016-04-12/service-2.json @@ -5,6 +5,7 @@ "endpointPrefix":"xray", "protocol":"rest-json", "serviceFullName":"AWS X-Ray", + "serviceId":"XRay", "signatureVersion":"v4", "uid":"xray-2016-04-12" }, diff --git a/botocore/model.py b/botocore/model.py index cf58128b..873530da 100644 --- a/botocore/model.py +++ b/botocore/model.py @@ -283,6 +283,10 @@ class ServiceModel(object): else: return self.endpoint_prefix + @CachedProperty + def service_id(self): + return self._get_metadata_property('serviceId') + @CachedProperty def signing_name(self): """The name to use when computing signatures. diff --git a/botocore/response.py b/botocore/response.py index 3a436ef3..7b58c9e7 100644 --- a/botocore/response.py +++ b/botocore/response.py @@ -38,6 +38,8 @@ class StreamingBody(object): is raised. """ + _DEFAULT_CHUNK_SIZE = 1024 + def __init__(self, raw_stream, content_length): self._raw_stream = raw_stream self._content_length = content_length @@ -80,6 +82,47 @@ class StreamingBody(object): self._verify_content_length() return chunk + def __iter__(self): + """Return an iterator to yield 1k chunks from the raw stream. + """ + return self.iter_chunks(self._DEFAULT_CHUNK_SIZE) + + def iter_lines(self, chunk_size=1024): + """Return an iterator to yield lines from the raw stream. + + This is achieved by reading chunk of bytes (of size chunk_size) at a + time from the raw stream, and then yielding lines from there. + """ + pending = None + for chunk in self.iter_chunks(chunk_size): + if pending is not None: + chunk = pending + chunk + + lines = chunk.splitlines() + + if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]: + # We might be in the 'middle' of a line. Hence we keep the + # last line as pending. + pending = lines.pop() + else: + pending = None + + for line in lines: + yield line + + if pending is not None: + yield pending + + def iter_chunks(self, chunk_size=_DEFAULT_CHUNK_SIZE): + """Return an iterator to yield chunks of chunk_size bytes from the raw + stream. + """ + while True: + current_chunk = self.read(chunk_size) + if current_chunk == b"": + break + yield current_chunk + def _verify_content_length(self): # See: https://github.com/kennethreitz/requests/issues/1855 # Basically, our http library doesn't do this for us, so we have diff --git a/botocore/utils.py b/botocore/utils.py index b490ace2..0de8e578 100644 --- a/botocore/utils.py +++ b/botocore/utils.py @@ -161,7 +161,7 @@ def set_value_from_jmespath(source, expression, value, is_first=True): class InstanceMetadataFetcher(object): def __init__(self, timeout=DEFAULT_METADATA_SERVICE_TIMEOUT, num_attempts=1, url=METADATA_SECURITY_CREDENTIALS_URL, - env=None): + env=None, user_agent=None): self._timeout = timeout self._num_attempts = num_attempts self._url = url @@ -169,15 +169,20 @@ class InstanceMetadataFetcher(object): env = os.environ.copy() self._disabled = env.get('AWS_EC2_METADATA_DISABLED', 'false').lower() self._disabled = self._disabled == 'true' + self._user_agent = user_agent def _get_request(self, url, timeout, num_attempts=1): if self._disabled: logger.debug("Access to EC2 metadata has been disabled.") raise _RetriesExceededError() + headers = {} + if self._user_agent is not None: + headers['User-Agent'] = self._user_agent + for i in range(num_attempts): try: - response = requests.get(url, timeout=timeout) + response = requests.get(url, timeout=timeout, headers=headers) except RETRYABLE_HTTP_ERRORS as e: logger.debug("Caught exception while trying to retrieve " "credentials: %s", e, exc_info=True) @@ -1020,6 +1025,8 @@ class ContainerMetadataFetcher(object): def __init__(self, session=None, sleep=time.sleep): if session is None: session = requests.Session() + session.trust_env = False + session.proxies = {} self._session = session self._sleep = sleep diff --git a/docs/source/conf.py b/docs/source/conf.py index 49c5c6b4..2e3c6ec9 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -54,7 +54,7 @@ copyright = u'2013, Mitch Garnaat' # The short X.Y version. version = '1.10.' # The full version, including alpha/beta/rc tags. -release = '1.10.15' +release = '1.10.55' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/tests/functional/test_endpoints.py b/tests/functional/test_endpoints.py index 804f47b8..03a081f4 100644 --- a/tests/functional/test_endpoints.py +++ b/tests/functional/test_endpoints.py @@ -10,72 +10,134 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -import os -import json from nose.tools import assert_equal from botocore.session import get_session -# Several services have names that don't match for one reason or another. SERVICE_RENAMES = { - 'application-autoscaling': 'autoscaling', - 'appstream': 'appstream2', - 'autoscaling-plans': 'autoscaling', - 'dynamodbstreams': 'streams.dynamodb', - 'cloudwatch': 'monitoring', - 'efs': 'elasticfilesystem', - 'elb': 'elasticloadbalancing', - 'elbv2': 'elasticloadbalancing', - 'emr': 'elasticmapreduce', - 'iot-data': 'data.iot', - 'meteringmarketplace': 'metering.marketplace', - 'opsworkscm': 'opsworks-cm', - 'ses': 'email', - 'stepfunctions': 'states', - 'lex-runtime': 'runtime.lex', - 'mturk': 'mturk-requester', - 'resourcegroupstaggingapi': 'tagging', - 'sagemaker-runtime': 'runtime.sagemaker', - 'lex-models': 'models.lex', - 'marketplace-entitlement': 'entitlement.marketplace', - 'pricing': 'api.pricing', - 'mediastore-data': 'data.mediastore', - 'iot-jobs-data': 'data.jobs.iot', - 'kinesis-video-media': 'kinesisvideo', - 'kinesis-video-archived-media': 'kinesisvideo', - 'alexaforbusiness': 'a4b', + # Actual service name we use -> Allowed computed service name. + 'alexaforbusiness': 'alexa-for-business', + 'apigateway': 'api-gateway', + 'application-autoscaling': 'application-auto-scaling', + 'autoscaling': 'auto-scaling', + 'autoscaling-plans': 'auto-scaling-plans', + 'ce': 'cost-explorer', + 'cloudhsmv2': 'cloudhsm-v2', + 'cloudsearchdomain': 'cloudsearch-domain', + 'cognito-idp': 'cognito-identity-provider', + 'config': 'config-service', + 'cur': 'cost-and-usage-report-service', + 'datapipeline': 'data-pipeline', + 'directconnect': 'direct-connect', + 'devicefarm': 'device-farm', + 'discovery': 'application-discovery-service', + 'dms': 'database-migration-service', + 'ds': 'directory-service', + 'dynamodbstreams': 'dynamodb-streams', + 'elasticbeanstalk': 'elastic-beanstalk', + 'elastictranscoder': 'elastic-transcoder', + 'elb': 'elastic-load-balancing', + 'elbv2': 'elastic-load-balancing-v2', + 'es': 'elasticsearch-service', + 'events': 'cloudwatch-events', + 'iot-data': 'iot-data-plane', + 'iot-jobs-data': 'iot-jobs-data-plane', + 'iot1click-devices': 'iot-1click-devices-service', + 'iot1click-projects': 'iot-1click-projects', + 'kinesisanalytics': 'kinesis-analytics', + 'kinesisvideo': 'kinesis-video', + 'lex-models': 'lex-model-building-service', + 'lex-runtime': 'lex-runtime-service', + 'logs': 'cloudwatch-logs', + 'machinelearning': 'machine-learning', + 'marketplacecommerceanalytics': 'marketplace-commerce-analytics', + 'marketplace-entitlement': 'marketplace-entitlement-service', + 'meteringmarketplace': 'marketplace-metering', + 'mgh': 'migration-hub', + 'resourcegroupstaggingapi': 'resource-groups-tagging-api', + 'route53': 'route-53', + 'route53domains': 'route-53-domains', + 'sdb': 'simpledb', + 'secretsmanager': 'secrets-manager', + 'serverlessrepo': 'serverlessapplicationrepository', + 'servicecatalog': 'service-catalog', + 'stepfunctions': 'sfn', + 'storagegateway': 'storage-gateway', } -BLACKLIST = [ + +ENDPOINT_PREFIX_OVERRIDE = { + # entry in endpoints.json -> actual endpoint prefix. + # The autoscaling-* services actually send requests to the + # autoscaling service, but they're exposed as separate clients + # in botocore. + 'autoscaling-plans': 'autoscaling', + 'application-autoscaling': 'autoscaling', + # For neptune, we send requests to the RDS endpoint. + 'neptune': 'rds', +} +NOT_SUPPORTED_IN_SDK = [ 'mobileanalytics', ] def test_endpoint_matches_service(): - backwards_renames = dict((v, k) for k, v in SERVICE_RENAMES.items()) + # This verifies client names match up with data from the endpoints.json + # file. We want to verify that every entry in the endpoints.json + # file corresponds to a client we can construct via + # session.create_client(...). + # So first we get a list of all the service names in the endpoints + # file. session = get_session() loader = session.get_component('data_loader') - expected_services = set(loader.list_available_services('service-2')) + endpoints = loader.load_data('endpoints') + # A service can be in multiple partitions so we're using + # a set here to remove dupes. + services_in_endpoints_file = set([]) + for partition in endpoints['partitions']: + for service in partition['services']: + # There are some services we don't support in the SDK + # so we don't need to add them to the list of services + # we need to check. + if service not in NOT_SUPPORTED_IN_SDK: + services_in_endpoints_file.add(service) - pdir = os.path.dirname - endpoints_path = os.path.join(pdir(pdir(pdir(__file__))), - 'botocore', 'data', 'endpoints.json') - with open(endpoints_path, 'r') as f: - data = json.loads(f.read()) - for partition in data['partitions']: - for service in partition['services'].keys(): - service = backwards_renames.get(service, service) - if service not in BLACKLIST: - yield _assert_endpoint_is_service, service, expected_services + # Now we need to cross check them against services we know about. + # The entries in endpoints.json are keyed off of the endpoint + # prefix. We don't directly have that data, so we have to load + # every service model and look up its endpoint prefix in its + # ``metadata`` section. + known_services = loader.list_available_services('service-2') + known_endpoint_prefixes = [ + session.get_service_model(service_name).endpoint_prefix + for service_name in known_services + ] + + # Now we go through every known endpoint prefix in the endpoints.json + # file and ensure it maps to an endpoint prefix we've seen + # in a service model. + for endpoint_prefix in services_in_endpoints_file: + # Check for an override where we know that an entry + # in the endpoints.json actually maps to a different endpoint + # prefix. + endpoint_prefix = ENDPOINT_PREFIX_OVERRIDE.get(endpoint_prefix, + endpoint_prefix) + yield (_assert_known_endpoint_prefix, + endpoint_prefix, + known_endpoint_prefixes) -def _assert_endpoint_is_service(service, expected_services): - assert service in expected_services +def _assert_known_endpoint_prefix(endpoint_prefix, known_endpoint_prefixes): + assert endpoint_prefix in known_endpoint_prefixes def test_service_name_matches_endpoint_prefix(): - # Generates tests for each service to verify that the endpoint prefix - # matches the service name unless there is an explicit exception. + # Generates tests for each service to verify that the computed service + # named based on the service id matches the service name used to + # create a client (i.e the directory name in botocore/data) + # unless there is an explicit exception. + # If there model has no serviceId then we fall back to the endpoint + # prefix. session = get_session() loader = session.get_component('data_loader') @@ -84,19 +146,18 @@ def test_service_name_matches_endpoint_prefix(): services = loader.list_available_services('service-2') for service in services: - yield _assert_service_name_matches_endpoint_prefix, loader, service + yield _assert_service_name_matches_endpoint_prefix, session, service -def _assert_service_name_matches_endpoint_prefix(loader, service_name): - # Load the service model and grab its endpoint prefix - service_model = loader.load_service_model(service_name, 'service-2') - endpoint_prefix = service_model['metadata']['endpointPrefix'] +def _assert_service_name_matches_endpoint_prefix(session, service_name): + service_model = session.get_service_model(service_name) + computed_name = service_model.service_id.replace(' ', '-').lower() # Handle known exceptions where we have renamed the service directory # for one reason or another. - expected_endpoint_prefix = SERVICE_RENAMES.get(service_name, service_name) + actual_service_name = SERVICE_RENAMES.get(service_name, service_name) assert_equal( - endpoint_prefix, expected_endpoint_prefix, - "Service name `%s` does not match expected endpoint " - "prefix `%s`, actual: `%s`" % ( - service_name, expected_endpoint_prefix, endpoint_prefix)) + computed_name, actual_service_name, + "Actual service name `%s` does not match expected service name " + "we computed: `%s`" % ( + actual_service_name, computed_name)) diff --git a/tests/functional/test_s3.py b/tests/functional/test_s3.py index 5ccfc6d8..0ce15572 100644 --- a/tests/functional/test_s3.py +++ b/tests/functional/test_s3.py @@ -544,6 +544,10 @@ def test_correct_url_used_for_s3(): signature_version='s3v4', is_secure=False, expected_url=( 'http://bucket.s3.us-west-1.amazonaws.com/key')) + yield t.case( + region='us-west-1', bucket='bucket-with-num-1', key='key', + signature_version='s3v4', is_secure=False, + expected_url='http://bucket-with-num-1.s3.us-west-1.amazonaws.com/key') # Regions outside of the 'aws' partition. # These should still default to virtual hosted addressing @@ -592,6 +596,25 @@ def test_correct_url_used_for_s3(): yield t.case( region='us-east-1', bucket='bucket.dot', key='key', expected_url='https://s3.amazonaws.com/bucket.dot/key') + yield t.case( + region='us-east-1', bucket='BucketName', key='key', + expected_url='https://s3.amazonaws.com/BucketName/key') + yield t.case( + region='us-west-1', bucket='bucket_name', key='key', + expected_url='https://s3.us-west-1.amazonaws.com/bucket_name/key') + yield t.case( + region='us-west-1', bucket='-bucket-name', key='key', + expected_url='https://s3.us-west-1.amazonaws.com/-bucket-name/key') + yield t.case( + region='us-west-1', bucket='bucket-name-', key='key', + expected_url='https://s3.us-west-1.amazonaws.com/bucket-name-/key') + yield t.case( + region='us-west-1', bucket='aa', key='key', + expected_url='https://s3.us-west-1.amazonaws.com/aa/key') + yield t.case( + region='us-west-1', bucket='a'*64, key='key', + expected_url=('https://s3.us-west-1.amazonaws.com/%s/key' % ('a' * 64)) + ) # Custom endpoint url should always be used. yield t.case( diff --git a/tests/unit/test_client.py b/tests/unit/test_client.py index dd7ef694..b3bb6e9a 100644 --- a/tests/unit/test_client.py +++ b/tests/unit/test_client.py @@ -758,8 +758,8 @@ class TestAutoGeneratedClient(unittest.TestCase): creator = self.create_client_creator() service_client = creator.create_client('myservice', 'us-west-2') self.assertEqual(sorted(service_client.waiter_names), - sorted(['waiter_1', 'waiter_2'])) - self.assertTrue(hasattr(service_client.get_waiter('waiter_1'), 'wait')) + sorted(['waiter1', 'waiter2'])) + self.assertTrue(hasattr(service_client.get_waiter('waiter1'), 'wait')) def test_service_has_no_waiter_configs(self): self.loader.load_service_model.side_effect = [ diff --git a/tests/unit/test_model.py b/tests/unit/test_model.py index da667828..c48df029 100644 --- a/tests/unit/test_model.py +++ b/tests/unit/test_model.py @@ -35,7 +35,8 @@ class TestServiceModel(unittest.TestCase): def setUp(self): self.model = { 'metadata': {'protocol': 'query', - 'endpointPrefix': 'endpoint-prefix'}, + 'endpointPrefix': 'endpoint-prefix', + 'serviceId': 'MyService'}, 'documentation': 'Documentation value', 'operations': {}, 'shapes': { @@ -57,6 +58,9 @@ class TestServiceModel(unittest.TestCase): def test_service_name_defaults_to_endpoint_prefix(self): self.assertEqual(self.service_model.service_name, 'endpoint-prefix') + def test_service_id(self): + self.assertEqual(self.service_model.service_id, 'MyService') + def test_operation_does_not_exist(self): with self.assertRaises(model.OperationNotFoundError): self.service_model.operation_model('NoExistOperation') diff --git a/tests/unit/test_response.py b/tests/unit/test_response.py index 2c0e1bd7..39daf58d 100644 --- a/tests/unit/test_response.py +++ b/tests/unit/test_response.py @@ -41,6 +41,17 @@ XMLBODY2 = (b'' class TestStreamWrapper(unittest.TestCase): + + def assert_lines(self, line_iterator, expected_lines): + for expected_line in expected_lines: + self.assertEqual( + next(line_iterator), + expected_line, + ) + # We should have exhausted the iterator. + with self.assertRaises(StopIteration): + next(line_iterator) + def test_streaming_wrapper_validates_content_length(self): body = six.BytesIO(b'1234567890') stream = response.StreamingBody(body, content_length=10) @@ -75,6 +86,56 @@ class TestStreamWrapper(unittest.TestCase): stream.close() self.assertTrue(body.closed) + def test_default_iter_behavior(self): + body = six.BytesIO(b'a' * 2048) + stream = response.StreamingBody(body, content_length=2048) + chunks = list(stream) + self.assertEqual(len(chunks), 2) + self.assertEqual(chunks, [b'a' * 1024, b'a' * 1024]) + + def test_iter_chunks_single_byte(self): + body = six.BytesIO(b'abcde') + stream = response.StreamingBody(body, content_length=5) + chunks = list(stream.iter_chunks(chunk_size=1)) + self.assertEqual(chunks, [b'a', b'b', b'c', b'd', b'e']) + + def test_iter_chunks_with_leftover(self): + body = six.BytesIO(b'abcde') + stream = response.StreamingBody(body, content_length=5) + chunks = list(stream.iter_chunks(chunk_size=2)) + self.assertEqual(chunks, [b'ab', b'cd', b'e']) + + def test_iter_chunks_single_chunk(self): + body = six.BytesIO(b'abcde') + stream = response.StreamingBody(body, content_length=5) + chunks = list(stream.iter_chunks(chunk_size=1024)) + self.assertEqual(chunks, [b'abcde']) + + def test_streaming_line_iterator(self): + body = six.BytesIO(b'1234567890\n1234567890\n12345') + stream = response.StreamingBody(body, content_length=27) + self.assert_lines( + stream.iter_lines(), + [b'1234567890', b'1234567890', b'12345'], + ) + + def test_streaming_line_iterator_ends_newline(self): + body = six.BytesIO(b'1234567890\n1234567890\n12345\n') + stream = response.StreamingBody(body, content_length=28) + self.assert_lines( + stream.iter_lines(), + [b'1234567890', b'1234567890', b'12345'], + ) + + def test_streaming_line_iter_chunk_sizes(self): + for chunk_size in range(1, 30): + body = six.BytesIO(b'1234567890\n1234567890\n12345') + stream = response.StreamingBody(body, content_length=27) + self.assert_lines( + stream.iter_lines(chunk_size), + [b'1234567890', b'1234567890', b'12345'], + ) + class TestGetResponse(BaseResponseTest): maxDiff = None diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index dc3c0890..054aee45 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -176,6 +176,12 @@ class TestTransformName(unittest.TestCase): transformed = xform_name('IPV6', '_') self.assertEqual(transformed, 'ipv6') + def test_s3_partial_rename(self): + transformed = xform_name('s3Resources', '-') + self.assertEqual(transformed, 's3-resources') + transformed = xform_name('s3Resources', '_') + self.assertEqual(transformed, 's3_resources') + class TestValidateJMESPathForSet(unittest.TestCase): def setUp(self): @@ -1558,6 +1564,12 @@ class TestContainerMetadataFetcher(unittest.TestCase): fetcher.retrieve_full_uri(full_uri) self.assertFalse(self.http.get.called) + def test_default_session_disables_proxies(self): + with mock.patch('botocore.utils.requests.Session') as session: + fetcher = ContainerMetadataFetcher() + self.assertFalse(session.return_value.trust_env) + self.assertEqual(session.return_value.proxies, {}) + def test_can_specify_extra_headers_are_merged(self): headers = { # The 'Accept' header will override the @@ -1752,3 +1764,9 @@ class TestInstanceMetadataFetcher(unittest.TestCase): } self.assertEqual(result, expected_result) + def test_includes_user_agent_header(self): + user_agent = 'my-user-agent' + InstanceMetadataFetcher( + user_agent=user_agent).retrieve_iam_role_credentials() + headers = self._requests.get.call_args[1]['headers'] + self.assertEqual(headers['User-Agent'], user_agent)