diff --git a/PKG-INFO b/PKG-INFO index 96016f95..0b5d41cb 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,6 +1,6 @@ -Metadata-Version: 1.2 +Metadata-Version: 2.1 Name: botocore -Version: 1.20.0 +Version: 1.20.35 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services @@ -128,3 +128,4 @@ Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Requires-Python: >= 2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.* +Provides-Extra: crt diff --git a/botocore/__init__.py b/botocore/__init__.py index ca5ea32f..4ecced5b 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re import logging -__version__ = '1.20.0' +__version__ = '1.20.35' class NullHandler(logging.Handler): diff --git a/botocore/credentials.py b/botocore/credentials.py index cc229dad..1b0eb820 100644 --- a/botocore/credentials.py +++ b/botocore/credentials.py @@ -283,8 +283,14 @@ class JSONFileCache(object): CACHE_DIR = os.path.expanduser(os.path.join('~', '.aws', 'boto', 'cache')) - def __init__(self, working_dir=CACHE_DIR): + def __init__(self, working_dir=CACHE_DIR, dumps_func=None): self._working_dir = working_dir + if dumps_func is None: + dumps_func = self._default_dumps + self._dumps = dumps_func + + def _default_dumps(self, obj): + return json.dumps(obj, default=_serialize_if_needed) def __contains__(self, cache_key): actual_key = self._convert_cache_key(cache_key) @@ -302,7 +308,7 @@ class JSONFileCache(object): def __setitem__(self, cache_key, value): full_key = self._convert_cache_key(cache_key) try: - file_content = json.dumps(value, default=_serialize_if_needed) + file_content = self._dumps(value) except (TypeError, ValueError): raise ValueError("Value cannot be cached, must be " "JSON serializable: %s" % value) diff --git a/botocore/data/accessanalyzer/2019-11-01/paginators-1.json b/botocore/data/accessanalyzer/2019-11-01/paginators-1.json index 64553f7b..10d81836 100644 --- a/botocore/data/accessanalyzer/2019-11-01/paginators-1.json +++ b/botocore/data/accessanalyzer/2019-11-01/paginators-1.json @@ -23,6 +23,24 @@ "output_token": "nextToken", "limit_key": "maxResults", "result_key": "findings" + }, + "ListAccessPreviewFindings": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "findings" + }, + "ListAccessPreviews": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "accessPreviews" + }, + "ValidatePolicy": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "findings" } } } diff --git a/botocore/data/accessanalyzer/2019-11-01/service-2.json b/botocore/data/accessanalyzer/2019-11-01/service-2.json index 6677c91b..bcc382af 100644 --- a/botocore/data/accessanalyzer/2019-11-01/service-2.json +++ b/botocore/data/accessanalyzer/2019-11-01/service-2.json @@ -30,6 +30,27 @@ "documentation":"

Retroactively applies the archive rule to existing findings that meet the archive rule criteria.

", "idempotent":true }, + "CreateAccessPreview":{ + "name":"CreateAccessPreview", + "http":{ + "method":"PUT", + "requestUri":"/access-preview", + "responseCode":200 + }, + "input":{"shape":"CreateAccessPreviewRequest"}, + "output":{"shape":"CreateAccessPreviewResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Creates an access preview that allows you to preview Access Analyzer findings for your resource before deploying resource permissions.

", + "idempotent":true + }, "CreateAnalyzer":{ "name":"CreateAnalyzer", "http":{ @@ -67,7 +88,7 @@ {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Creates an archive rule for the specified analyzer. Archive rules automatically archive new findings that meet the criteria you define when you create the rule.

", + "documentation":"

Creates an archive rule for the specified analyzer. Archive rules automatically archive new findings that meet the criteria you define when you create the rule.

To learn about filter keys that you can use to create an archive rule, see Access Analyzer filter keys in the IAM User Guide.

", "idempotent":true }, "DeleteAnalyzer":{ @@ -106,6 +127,24 @@ "documentation":"

Deletes the specified archive rule.

", "idempotent":true }, + "GetAccessPreview":{ + "name":"GetAccessPreview", + "http":{ + "method":"GET", + "requestUri":"/access-preview/{accessPreviewId}", + "responseCode":200 + }, + "input":{"shape":"GetAccessPreviewRequest"}, + "output":{"shape":"GetAccessPreviewResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Retrieves information about an access preview for the specified analyzer.

" + }, "GetAnalyzedResource":{ "name":"GetAnalyzedResource", "http":{ @@ -178,6 +217,43 @@ ], "documentation":"

Retrieves information about the specified finding.

" }, + "ListAccessPreviewFindings":{ + "name":"ListAccessPreviewFindings", + "http":{ + "method":"POST", + "requestUri":"/access-preview/{accessPreviewId}", + "responseCode":200 + }, + "input":{"shape":"ListAccessPreviewFindingsRequest"}, + "output":{"shape":"ListAccessPreviewFindingsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Retrieves a list of access preview findings generated by the specified access preview.

" + }, + "ListAccessPreviews":{ + "name":"ListAccessPreviews", + "http":{ + "method":"GET", + "requestUri":"/access-preview", + "responseCode":200 + }, + "input":{"shape":"ListAccessPreviewsRequest"}, + "output":{"shape":"ListAccessPreviewsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Retrieves a list of access previews for the specified analyzer.

" + }, "ListAnalyzedResources":{ "name":"ListAnalyzedResources", "http":{ @@ -246,7 +322,7 @@ {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Retrieves a list of findings generated by the specified analyzer.

To learn about filter keys that you can use to create an archive rule, see Access Analyzer filter keys in the IAM User Guide.

" + "documentation":"

Retrieves a list of findings generated by the specified analyzer.

To learn about filter keys that you can use to retrieve a list of findings, see Access Analyzer filter keys in the IAM User Guide.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -356,6 +432,23 @@ ], "documentation":"

Updates the status for the specified findings.

", "idempotent":true + }, + "ValidatePolicy":{ + "name":"ValidatePolicy", + "http":{ + "method":"POST", + "requestUri":"/policy/validation", + "responseCode":200 + }, + "input":{"shape":"ValidatePolicyRequest"}, + "output":{"shape":"ValidatePolicyResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Requests the validation of a policy and returns a list of findings. The findings help you identify issues and provide actionable recommendations to resolve the issue and enable you to author functional policies that meet security best practices.

" } }, "shapes":{ @@ -372,6 +465,217 @@ }, "exception":true }, + "AccessPointArn":{ + "type":"string", + "pattern":"arn:[^:]*:s3:[^:]*:[^:]*:accesspoint/.*$" + }, + "AccessPointPolicy":{"type":"string"}, + "AccessPreview":{ + "type":"structure", + "required":[ + "analyzerArn", + "configurations", + "createdAt", + "id", + "status" + ], + "members":{ + "analyzerArn":{ + "shape":"AnalyzerArn", + "documentation":"

The ARN of the analyzer used to generate the access preview.

" + }, + "configurations":{ + "shape":"ConfigurationsMap", + "documentation":"

A map of resource ARNs for the proposed resource configuration.

" + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

The time at which the access preview was created.

" + }, + "id":{ + "shape":"AccessPreviewId", + "documentation":"

The unique ID for the access preview.

" + }, + "status":{ + "shape":"AccessPreviewStatus", + "documentation":"

The status of the access preview.

" + }, + "statusReason":{ + "shape":"AccessPreviewStatusReason", + "documentation":"

Provides more details about the current status of the access preview.

For example, if the creation of the access preview fails, a Failed status is returned. This failure can be due to an internal issue with the analysis or due to an invalid resource configuration.

" + } + }, + "documentation":"

Contains information about an access preview.

" + }, + "AccessPreviewFinding":{ + "type":"structure", + "required":[ + "changeType", + "createdAt", + "id", + "resourceOwnerAccount", + "resourceType", + "status" + ], + "members":{ + "action":{ + "shape":"ActionList", + "documentation":"

The action in the analyzed policy statement that an external principal has permission to perform.

" + }, + "changeType":{ + "shape":"FindingChangeType", + "documentation":"

Provides context on how the access preview finding compares to existing access identified in Access Analyzer.

For example, a Changed finding with preview status Resolved and existing status Active indicates the existing Active finding would become Resolved as a result of the proposed permissions change.

" + }, + "condition":{ + "shape":"ConditionKeyMap", + "documentation":"

The condition in the analyzed policy statement that resulted in a finding.

" + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

The time at which the access preview finding was created.

" + }, + "error":{ + "shape":"String", + "documentation":"

An error.

" + }, + "existingFindingId":{ + "shape":"FindingId", + "documentation":"

The existing ID of the finding in Access Analyzer, provided only for existing findings.

" + }, + "existingFindingStatus":{ + "shape":"FindingStatus", + "documentation":"

The existing status of the finding, provided only for existing findings.

" + }, + "id":{ + "shape":"AccessPreviewFindingId", + "documentation":"

The ID of the access preview finding. This ID uniquely identifies the element in the list of access preview findings and is not related to the finding ID in Access Analyzer.

" + }, + "isPublic":{ + "shape":"Boolean", + "documentation":"

Indicates whether the policy that generated the finding allows public access to the resource.

" + }, + "principal":{ + "shape":"PrincipalMap", + "documentation":"

The external principal that has access to a resource within the zone of trust.

" + }, + "resource":{ + "shape":"String", + "documentation":"

The resource that an external principal has access to. This is the resource associated with the access preview.

" + }, + "resourceOwnerAccount":{ + "shape":"String", + "documentation":"

The AWS account ID that owns the resource. For most AWS resources, the owning account is the account in which the resource was created.

" + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

The type of the resource that can be accessed in the finding.

" + }, + "sources":{ + "shape":"FindingSourceList", + "documentation":"

The sources of the finding. This indicates how the access that generated the finding is granted. It is populated for Amazon S3 bucket findings.

" + }, + "status":{ + "shape":"FindingStatus", + "documentation":"

The preview status of the finding. This is what the status of the finding would be after permissions deployment. For example, a Changed finding with preview status Resolved and existing status Active indicates the existing Active finding would become Resolved as a result of the proposed permissions change.

" + } + }, + "documentation":"

An access preview finding generated by the access preview.

" + }, + "AccessPreviewFindingId":{"type":"string"}, + "AccessPreviewFindingsList":{ + "type":"list", + "member":{"shape":"AccessPreviewFinding"} + }, + "AccessPreviewId":{ + "type":"string", + "pattern":"^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$" + }, + "AccessPreviewStatus":{ + "type":"string", + "enum":[ + "COMPLETED", + "CREATING", + "FAILED" + ] + }, + "AccessPreviewStatusReason":{ + "type":"structure", + "required":["code"], + "members":{ + "code":{ + "shape":"AccessPreviewStatusReasonCode", + "documentation":"

The reason code for the current status of the access preview.

" + } + }, + "documentation":"

Provides more details about the current status of the access preview. For example, if the creation of the access preview fails, a Failed status is returned. This failure can be due to an internal issue with the analysis or due to an invalid proposed resource configuration.

" + }, + "AccessPreviewStatusReasonCode":{ + "type":"string", + "enum":[ + "INTERNAL_ERROR", + "INVALID_CONFIGURATION" + ] + }, + "AccessPreviewSummary":{ + "type":"structure", + "required":[ + "analyzerArn", + "createdAt", + "id", + "status" + ], + "members":{ + "analyzerArn":{ + "shape":"AnalyzerArn", + "documentation":"

The ARN of the analyzer used to generate the access preview.

" + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

The time at which the access preview was created.

" + }, + "id":{ + "shape":"AccessPreviewId", + "documentation":"

The unique ID for the access preview.

" + }, + "status":{ + "shape":"AccessPreviewStatus", + "documentation":"

The status of the access preview.

" + }, + "statusReason":{"shape":"AccessPreviewStatusReason"} + }, + "documentation":"

Contains a summary of information about an access preview.

" + }, + "AccessPreviewsList":{ + "type":"list", + "member":{"shape":"AccessPreviewSummary"} + }, + "AclCanonicalId":{"type":"string"}, + "AclGrantee":{ + "type":"structure", + "members":{ + "id":{ + "shape":"AclCanonicalId", + "documentation":"

The value specified is the canonical user ID of an AWS account.

" + }, + "uri":{ + "shape":"AclUri", + "documentation":"

Used for granting permissions to a predefined group.

" + } + }, + "documentation":"

You specify each grantee as a type-value pair using one of these types. You can specify only one type of grantee. For more information, see PutBucketAcl.

", + "union":true + }, + "AclPermission":{ + "type":"string", + "enum":[ + "READ", + "WRITE", + "READ_ACP", + "WRITE_ACP", + "FULL_CONTROL" + ] + }, + "AclUri":{"type":"string"}, "ActionList":{ "type":"list", "member":{"shape":"String"} @@ -511,7 +815,7 @@ }, "statusReason":{ "shape":"StatusReason", - "documentation":"

The statusReason provides more details about the current status of the analyzer. For example, if the creation for the analyzer fails, a Failed status is displayed. For an analyzer with organization as the type, this failure can be due to an issue with creating the service-linked roles required in the member accounts of the AWS organization.

" + "documentation":"

The statusReason provides more details about the current status of the analyzer. For example, if the creation for the analyzer fails, a Failed status is returned. For an analyzer with organization as the type, this failure can be due to an issue with creating the service-linked roles required in the member accounts of the AWS organization.

" }, "tags":{ "shape":"TagsMap", @@ -592,6 +896,39 @@ "key":{"shape":"String"}, "value":{"shape":"String"} }, + "Configuration":{ + "type":"structure", + "members":{ + "iamRole":{ + "shape":"IamRoleConfiguration", + "documentation":"

The access control configuration is for an IAM role.

" + }, + "kmsKey":{ + "shape":"KmsKeyConfiguration", + "documentation":"

The access control configuration is for a KMS key.

" + }, + "s3Bucket":{ + "shape":"S3BucketConfiguration", + "documentation":"

The access control configuration is for an Amazon S3 Bucket.

" + }, + "secretsManagerSecret":{ + "shape":"SecretsManagerSecretConfiguration", + "documentation":"

The access control configuration is for a Secrets Manager secret.

" + }, + "sqsQueue":{ + "shape":"SqsQueueConfiguration", + "documentation":"

The access control configuration is for an SQS queue.

" + } + }, + "documentation":"

Access control configuration structures for your resource. You specify the configuration as a type-value pair. You can specify only one type of access control configuration.

", + "union":true + }, + "ConfigurationsMap":{ + "type":"map", + "key":{"shape":"ConfigurationsMapKey"}, + "value":{"shape":"Configuration"} + }, + "ConfigurationsMapKey":{"type":"string"}, "ConflictException":{ "type":"structure", "required":[ @@ -617,6 +954,38 @@ }, "exception":true }, + "CreateAccessPreviewRequest":{ + "type":"structure", + "required":[ + "analyzerArn", + "configurations" + ], + "members":{ + "analyzerArn":{ + "shape":"AnalyzerArn", + "documentation":"

The ARN of the account analyzer used to generate the access preview. You can only create an access preview for analyzers with an Account type and Active status.

" + }, + "clientToken":{ + "shape":"String", + "documentation":"

A client token.

", + "idempotencyToken":true + }, + "configurations":{ + "shape":"ConfigurationsMap", + "documentation":"

Access control configuration for your resource that is used to generate the access preview. The access preview includes findings for external access allowed to the resource with the proposed access control configuration. The configuration must contain exactly one element.

" + } + } + }, + "CreateAccessPreviewResponse":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"AccessPreviewId", + "documentation":"

The unique ID for the access preview.

" + } + } + }, "CreateAnalyzerRequest":{ "type":"structure", "required":[ @@ -819,7 +1188,7 @@ }, "resourceType":{ "shape":"ResourceType", - "documentation":"

The type of the resource reported in the finding.

" + "documentation":"

The type of the resource identified in the finding.

" }, "sources":{ "shape":"FindingSourceList", @@ -836,6 +1205,14 @@ }, "documentation":"

Contains information about a finding.

" }, + "FindingChangeType":{ + "type":"string", + "enum":[ + "CHANGED", + "NEW", + "UNCHANGED" + ] + }, "FindingId":{"type":"string"}, "FindingIdList":{ "type":"list", @@ -969,6 +1346,37 @@ "type":"list", "member":{"shape":"FindingSummary"} }, + "GetAccessPreviewRequest":{ + "type":"structure", + "required":[ + "accessPreviewId", + "analyzerArn" + ], + "members":{ + "accessPreviewId":{ + "shape":"AccessPreviewId", + "documentation":"

The unique ID for the access preview.

", + "location":"uri", + "locationName":"accessPreviewId" + }, + "analyzerArn":{ + "shape":"AnalyzerArn", + "documentation":"

The ARN of the analyzer used to generate the access preview.

", + "location":"querystring", + "locationName":"analyzerArn" + } + } + }, + "GetAccessPreviewResponse":{ + "type":"structure", + "required":["accessPreview"], + "members":{ + "accessPreview":{ + "shape":"AccessPreview", + "documentation":"

An object that contains information about the access preview.

" + } + } + }, "GetAnalyzedResourceRequest":{ "type":"structure", "required":[ @@ -978,7 +1386,7 @@ "members":{ "analyzerArn":{ "shape":"AnalyzerArn", - "documentation":"

The ARN of the analyzer to retrieve information from.

", + "documentation":"

The ARN of the analyzer to retrieve information from.

", "location":"querystring", "locationName":"analyzerArn" }, @@ -996,7 +1404,7 @@ "members":{ "resource":{ "shape":"AnalyzedResource", - "documentation":"

An AnalyedResource object that contains information that Access Analyzer found when it analyzed the resource.

" + "documentation":"

An AnalyzedResource object that contains information that Access Analyzer found when it analyzed the resource.

" } }, "documentation":"

The response to the request.

" @@ -1064,7 +1472,7 @@ "members":{ "analyzerArn":{ "shape":"AnalyzerArn", - "documentation":"

The ARN of the analyzer that generated the finding.

", + "documentation":"

The ARN of the analyzer that generated the finding.

", "location":"querystring", "locationName":"analyzerArn" }, @@ -1087,6 +1495,18 @@ }, "documentation":"

The response to the request.

" }, + "GranteePrincipal":{"type":"string"}, + "IamRoleConfiguration":{ + "type":"structure", + "members":{ + "trustPolicy":{ + "shape":"IamTrustPolicy", + "documentation":"

The proposed trust policy for the IAM role.

" + } + }, + "documentation":"

The proposed access control configuration for an IAM role. You can propose a configuration for a new IAM role or an existing IAM role that you own by specifying the trust policy. If the configuration is for a new IAM role, you must specify the trust policy. If the configuration is for an existing IAM role that you own and you do not propose the trust policy, the access preview uses the existing trust policy for the role. The proposed trust policy cannot be an empty string. For more information about role trust policy limits, see IAM and STS quotas.

" + }, + "IamTrustPolicy":{"type":"string"}, "InlineArchiveRule":{ "type":"structure", "required":[ @@ -1131,13 +1551,204 @@ "fault":true, "retryable":{"throttling":false} }, + "InternetConfiguration":{ + "type":"structure", + "members":{ + }, + "documentation":"

This configuration sets the Amazon S3 access point network origin to Internet.

" + }, + "IssueCode":{"type":"string"}, + "IssuingAccount":{"type":"string"}, + "KmsConstraintsKey":{"type":"string"}, + "KmsConstraintsMap":{ + "type":"map", + "key":{"shape":"KmsConstraintsKey"}, + "value":{"shape":"KmsConstraintsValue"} + }, + "KmsConstraintsValue":{"type":"string"}, + "KmsGrantConfiguration":{ + "type":"structure", + "required":[ + "granteePrincipal", + "issuingAccount", + "operations" + ], + "members":{ + "constraints":{ + "shape":"KmsGrantConstraints", + "documentation":"

Use this structure to propose allowing cryptographic operations in the grant only when the operation request includes the specified encryption context.

" + }, + "granteePrincipal":{ + "shape":"GranteePrincipal", + "documentation":"

The principal that is given permission to perform the operations that the grant permits.

" + }, + "issuingAccount":{ + "shape":"IssuingAccount", + "documentation":"

The AWS account under which the grant was issued. The account is used to propose KMS grants issued by accounts other than the owner of the key.

" + }, + "operations":{ + "shape":"KmsGrantOperationsList", + "documentation":"

A list of operations that the grant permits.

" + }, + "retiringPrincipal":{ + "shape":"RetiringPrincipal", + "documentation":"

The principal that is given permission to retire the grant by using RetireGrant operation.

" + } + }, + "documentation":"

A proposed grant configuration for a KMS key. For more information, see CreateGrant.

" + }, + "KmsGrantConfigurationsList":{ + "type":"list", + "member":{"shape":"KmsGrantConfiguration"} + }, + "KmsGrantConstraints":{ + "type":"structure", + "members":{ + "encryptionContextEquals":{ + "shape":"KmsConstraintsMap", + "documentation":"

A list of key-value pairs that must match the encryption context in the cryptographic operation request. The grant allows the operation only when the encryption context in the request is the same as the encryption context specified in this constraint.

" + }, + "encryptionContextSubset":{ + "shape":"KmsConstraintsMap", + "documentation":"

A list of key-value pairs that must be included in the encryption context of the cryptographic operation request. The grant allows the cryptographic operation only when the encryption context in the request includes the key-value pairs specified in this constraint, although it can include additional key-value pairs.

" + } + }, + "documentation":"

Use this structure to propose allowing cryptographic operations in the grant only when the operation request includes the specified encryption context. You can specify only one type of encryption context. An empty map is treated as not specified. For more information, see GrantConstraints.

" + }, + "KmsGrantOperation":{ + "type":"string", + "enum":[ + "CreateGrant", + "Decrypt", + "DescribeKey", + "Encrypt", + "GenerateDataKey", + "GenerateDataKeyPair", + "GenerateDataKeyPairWithoutPlaintext", + "GenerateDataKeyWithoutPlaintext", + "GetPublicKey", + "ReEncryptFrom", + "ReEncryptTo", + "RetireGrant", + "Sign", + "Verify" + ] + }, + "KmsGrantOperationsList":{ + "type":"list", + "member":{"shape":"KmsGrantOperation"} + }, + "KmsKeyConfiguration":{ + "type":"structure", + "members":{ + "grants":{ + "shape":"KmsGrantConfigurationsList", + "documentation":"

A list of proposed grant configurations for the KMS key. If the proposed grant configuration is for an existing key, the access preview uses the proposed list of grant configurations in place of the existing grants. Otherwise, the access preview uses the existing grants for the key.

" + }, + "keyPolicies":{ + "shape":"KmsKeyPoliciesMap", + "documentation":"

Resource policy configuration for the KMS key. The only valid value for the name of the key policy is default. For more information, see Default key policy.

" + } + }, + "documentation":"

Proposed access control configuration for a KMS key. You can propose a configuration for a new KMS key or an existing KMS key that you own by specifying the key policy and KMS grant configuration. If the configuration is for an existing key and you do not specify the key policy, the access preview uses the existing policy for the key. If the access preview is for a new resource and you do not specify the key policy, then the access preview uses the default key policy. The proposed key policy cannot be an empty string. For more information, see Default key policy. For more information about key policy limits, see Resource quotas.

" + }, + "KmsKeyPoliciesMap":{ + "type":"map", + "key":{"shape":"PolicyName"}, + "value":{"shape":"KmsKeyPolicy"} + }, + "KmsKeyPolicy":{"type":"string"}, + "LearnMoreLink":{"type":"string"}, + "ListAccessPreviewFindingsRequest":{ + "type":"structure", + "required":[ + "accessPreviewId", + "analyzerArn" + ], + "members":{ + "accessPreviewId":{ + "shape":"AccessPreviewId", + "documentation":"

The unique ID for the access preview.

", + "location":"uri", + "locationName":"accessPreviewId" + }, + "analyzerArn":{ + "shape":"AnalyzerArn", + "documentation":"

The ARN of the analyzer used to generate the access.

" + }, + "filter":{ + "shape":"FilterCriteriaMap", + "documentation":"

Criteria to filter the returned findings.

" + }, + "maxResults":{ + "shape":"Integer", + "documentation":"

The maximum number of results to return in the response.

" + }, + "nextToken":{ + "shape":"Token", + "documentation":"

A token used for pagination of results returned.

" + } + } + }, + "ListAccessPreviewFindingsResponse":{ + "type":"structure", + "required":["findings"], + "members":{ + "findings":{ + "shape":"AccessPreviewFindingsList", + "documentation":"

A list of access preview findings that match the specified filter criteria.

" + }, + "nextToken":{ + "shape":"Token", + "documentation":"

A token used for pagination of results returned.

" + } + } + }, + "ListAccessPreviewsRequest":{ + "type":"structure", + "required":["analyzerArn"], + "members":{ + "analyzerArn":{ + "shape":"AnalyzerArn", + "documentation":"

The ARN of the analyzer used to generate the access preview.

", + "location":"querystring", + "locationName":"analyzerArn" + }, + "maxResults":{ + "shape":"Integer", + "documentation":"

The maximum number of results to return in the response.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"Token", + "documentation":"

A token used for pagination of results returned.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListAccessPreviewsResponse":{ + "type":"structure", + "required":["accessPreviews"], + "members":{ + "accessPreviews":{ + "shape":"AccessPreviewsList", + "documentation":"

A list of access previews retrieved for the analyzer.

" + }, + "nextToken":{ + "shape":"Token", + "documentation":"

A token used for pagination of results returned.

" + } + } + }, "ListAnalyzedResourcesRequest":{ "type":"structure", "required":["analyzerArn"], "members":{ "analyzerArn":{ "shape":"AnalyzerArn", - "documentation":"

The ARN of the analyzer to retrieve a list of analyzed resources from.

" + "documentation":"

The ARN of the analyzer to retrieve a list of analyzed resources from.

" }, "maxResults":{ "shape":"Integer", @@ -1254,7 +1865,7 @@ "members":{ "analyzerArn":{ "shape":"AnalyzerArn", - "documentation":"

The ARN of the analyzer to retrieve findings from.

" + "documentation":"

The ARN of the analyzer to retrieve findings from.

" }, "filter":{ "shape":"FilterCriteriaMap", @@ -1313,12 +1924,61 @@ }, "documentation":"

The response to the request.

" }, + "Locale":{ + "type":"string", + "enum":[ + "DE", + "EN", + "ES", + "FR", + "IT", + "JA", + "KO", + "PT_BR", + "ZH_CN", + "ZH_TW" + ] + }, + "Location":{ + "type":"structure", + "required":[ + "path", + "span" + ], + "members":{ + "path":{ + "shape":"PathElementList", + "documentation":"

A path in a policy, represented as a sequence of path elements.

" + }, + "span":{ + "shape":"Span", + "documentation":"

A span in a policy.

" + } + }, + "documentation":"

A location in a policy that is represented as a path through the JSON representation and a corresponding span.

" + }, + "LocationList":{ + "type":"list", + "member":{"shape":"Location"} + }, "Name":{ "type":"string", "max":255, "min":1, "pattern":"^[A-Za-z][A-Za-z0-9_.-]*$" }, + "NetworkOriginConfiguration":{ + "type":"structure", + "members":{ + "internetConfiguration":{ + "shape":"InternetConfiguration", + "documentation":"

The configuration for the Amazon S3 access point with an Internet origin.

" + }, + "vpcConfiguration":{"shape":"VpcConfiguration"} + }, + "documentation":"

The proposed InternetConfiguration or VpcConfiguration to apply to the Amazon S3 Access point. You can make the access point accessible from the internet, or you can specify that all requests made through that access point must originate from a specific virtual private cloud (VPC). You can specify only one type of network configuration. For more information, see Creating access points.

", + "union":true + }, "OrderBy":{ "type":"string", "enum":[ @@ -1326,6 +1986,66 @@ "DESC" ] }, + "PathElement":{ + "type":"structure", + "members":{ + "index":{ + "shape":"Integer", + "documentation":"

Refers to an index in a JSON array.

" + }, + "key":{ + "shape":"String", + "documentation":"

Refers to a key in a JSON object.

" + }, + "substring":{ + "shape":"Substring", + "documentation":"

Refers to a substring of a literal string in a JSON object.

" + }, + "value":{ + "shape":"String", + "documentation":"

Refers to the value associated with a given key in a JSON object.

" + } + }, + "documentation":"

A single element in a path through the JSON representation of a policy.

", + "union":true + }, + "PathElementList":{ + "type":"list", + "member":{"shape":"PathElement"} + }, + "PolicyDocument":{"type":"string"}, + "PolicyName":{"type":"string"}, + "PolicyType":{ + "type":"string", + "enum":[ + "IDENTITY_POLICY", + "RESOURCE_POLICY", + "SERVICE_CONTROL_POLICY" + ] + }, + "Position":{ + "type":"structure", + "required":[ + "column", + "line", + "offset" + ], + "members":{ + "column":{ + "shape":"Integer", + "documentation":"

The column of the position, starting from 0.

" + }, + "line":{ + "shape":"Integer", + "documentation":"

The line of the position, starting from 1.

" + }, + "offset":{ + "shape":"Integer", + "documentation":"

The offset within the policy that corresponds to the position, starting from 0.

" + } + }, + "documentation":"

A position in a policy.

" + }, "PrincipalMap":{ "type":"map", "key":{"shape":"String"}, @@ -1381,6 +2101,109 @@ "AWS::SecretsManager::Secret" ] }, + "RetiringPrincipal":{"type":"string"}, + "S3AccessPointConfiguration":{ + "type":"structure", + "members":{ + "accessPointPolicy":{ + "shape":"AccessPointPolicy", + "documentation":"

The access point policy.

" + }, + "networkOrigin":{ + "shape":"NetworkOriginConfiguration", + "documentation":"

The proposed Internet and VpcConfiguration to apply to this Amazon S3 access point. If the access preview is for a new resource and neither is specified, the access preview uses Internet for the network origin. If the access preview is for an existing resource and neither is specified, the access preview uses the exiting network origin.

" + }, + "publicAccessBlock":{ + "shape":"S3PublicAccessBlockConfiguration", + "documentation":"

The proposed S3PublicAccessBlock configuration to apply to this Amazon S3 Access Point.

" + } + }, + "documentation":"

The configuration for an Amazon S3 access point for the bucket. You can propose up to 10 access points per bucket. If the proposed Amazon S3 access point configuration is for an existing bucket, the access preview uses the proposed access point configuration in place of the existing access points. To propose an access point without a policy, you can provide an empty string as the access point policy. For more information, see Creating access points. For more information about access point policy limits, see Access points restrictions and limitations.

" + }, + "S3AccessPointConfigurationsMap":{ + "type":"map", + "key":{"shape":"AccessPointArn"}, + "value":{"shape":"S3AccessPointConfiguration"} + }, + "S3BucketAclGrantConfiguration":{ + "type":"structure", + "required":[ + "grantee", + "permission" + ], + "members":{ + "grantee":{ + "shape":"AclGrantee", + "documentation":"

The grantee to whom you’re assigning access rights.

" + }, + "permission":{ + "shape":"AclPermission", + "documentation":"

The permissions being granted.

" + } + }, + "documentation":"

A proposed access control list grant configuration for an Amazon S3 bucket. For more information, see How to Specify an ACL.

" + }, + "S3BucketAclGrantConfigurationsList":{ + "type":"list", + "member":{"shape":"S3BucketAclGrantConfiguration"} + }, + "S3BucketConfiguration":{ + "type":"structure", + "members":{ + "accessPoints":{ + "shape":"S3AccessPointConfigurationsMap", + "documentation":"

The configuration of Amazon S3 access points for the bucket.

" + }, + "bucketAclGrants":{ + "shape":"S3BucketAclGrantConfigurationsList", + "documentation":"

The proposed list of ACL grants for the Amazon S3 bucket. You can propose up to 100 ACL grants per bucket. If the proposed grant configuration is for an existing bucket, the access preview uses the proposed list of grant configurations in place of the existing grants. Otherwise, the access preview uses the existing grants for the bucket.

" + }, + "bucketPolicy":{ + "shape":"S3BucketPolicy", + "documentation":"

The proposed bucket policy for the Amazon S3 bucket.

" + }, + "bucketPublicAccessBlock":{ + "shape":"S3PublicAccessBlockConfiguration", + "documentation":"

The proposed block public access configuration for the Amazon S3 bucket.

" + } + }, + "documentation":"

Proposed access control configuration for an Amazon S3 bucket. You can propose a configuration for a new Amazon S3 bucket or an existing Amazon S3 bucket that you own by specifying the Amazon S3 bucket policy, bucket ACLs, bucket BPA settings, and Amazon S3 access points attached to the bucket. If the configuration is for an existing Amazon S3 bucket and you do not specify the Amazon S3 bucket policy, the access preview uses the existing policy attached to the bucket. If the access preview is for a new resource and you do not specify the Amazon S3 bucket policy, the access preview assumes a bucket without a policy. To propose deletion of an existing bucket policy, you can specify an empty string. For more information about bucket policy limits, see Bucket Policy Examples.

" + }, + "S3BucketPolicy":{"type":"string"}, + "S3PublicAccessBlockConfiguration":{ + "type":"structure", + "required":[ + "ignorePublicAcls", + "restrictPublicBuckets" + ], + "members":{ + "ignorePublicAcls":{ + "shape":"Boolean", + "documentation":"

Specifies whether Amazon S3 should ignore public ACLs for this bucket and objects in this bucket.

" + }, + "restrictPublicBuckets":{ + "shape":"Boolean", + "documentation":"

Specifies whether Amazon S3 should restrict public bucket policies for this bucket.

" + } + }, + "documentation":"

The PublicAccessBlock configuration to apply to this Amazon S3 bucket. If the proposed configuration is for an existing Amazon S3 bucket and the configuration is not specified, the access preview uses the existing setting. If the proposed configuration is for a new bucket and the configuration is not specified, the access preview uses false. If the proposed configuration is for a new access point and the access point BPA configuration is not specified, the access preview uses true. For more information, see PublicAccessBlockConfiguration.

" + }, + "SecretsManagerSecretConfiguration":{ + "type":"structure", + "members":{ + "kmsKeyId":{ + "shape":"SecretsManagerSecretKmsId", + "documentation":"

The proposed ARN, key ID, or alias of the AWS KMS customer master key (CMK).

" + }, + "secretPolicy":{ + "shape":"SecretsManagerSecretPolicy", + "documentation":"

The proposed resource policy defining who can access or manage the secret.

" + } + }, + "documentation":"

The configuration for a Secrets Manager secret. For more information, see CreateSecret.

You can propose a configuration for a new secret or an existing secret that you own by specifying the secret policy and optional KMS encryption key. If the configuration is for an existing secret and you do not specify the secret policy, the access preview uses the existing policy for the secret. If the access preview is for a new resource and you do not specify the policy, the access preview assumes a secret without a policy. To propose deletion of an existing policy, you can specify an empty string. If the proposed configuration is for a new secret and you do not specify the KMS key ID, the access preview uses the default CMK of the AWS account. If you specify an empty string for the KMS key ID, the access preview uses the default CMK of the AWS account. For more information about secret policy limits, see Quotas for AWS Secrets Manager..

" + }, + "SecretsManagerSecretKmsId":{"type":"string"}, + "SecretsManagerSecretPolicy":{"type":"string"}, "ServiceQuotaExceededException":{ "type":"structure", "required":[ @@ -1424,6 +2247,35 @@ }, "documentation":"

The criteria used to sort.

" }, + "Span":{ + "type":"structure", + "required":[ + "end", + "start" + ], + "members":{ + "end":{ + "shape":"Position", + "documentation":"

The end position of the span (exclusive).

" + }, + "start":{ + "shape":"Position", + "documentation":"

The start position of the span (inclusive).

" + } + }, + "documentation":"

A span in a policy. The span consists of a start position (inclusive) and end position (exclusive).

" + }, + "SqsQueueConfiguration":{ + "type":"structure", + "members":{ + "queuePolicy":{ + "shape":"SqsQueuePolicy", + "documentation":"

The proposed resource policy for the SQS queue.

" + } + }, + "documentation":"

The proposed access control configuration for an SQS queue. You can propose a configuration for a new SQS queue or an existing SQS queue that you own by specifying the SQS policy. If the configuration is for an existing SQS queue and you do not specify the SQS policy, the access preview uses the existing SQS policy for the queue. If the access preview is for a new resource and you do not specify the policy, the access preview assumes an SQS queue without a policy. To propose deletion of an existing SQS queue policy, you can specify an empty string for the SQS policy. For more information about SQS policy limits, see Quotas related to policies.

" + }, + "SqsQueuePolicy":{"type":"string"}, "StartResourceScanRequest":{ "type":"structure", "required":[ @@ -1433,7 +2285,7 @@ "members":{ "analyzerArn":{ "shape":"AnalyzerArn", - "documentation":"

The ARN of the analyzer to use to scan the policies applied to the specified resource.

" + "documentation":"

The ARN of the analyzer to use to scan the policies applied to the specified resource.

" }, "resourceArn":{ "shape":"ResourceArn", @@ -1451,9 +2303,27 @@ "documentation":"

The reason code for the current status of the analyzer.

" } }, - "documentation":"

Provides more details about the current status of the analyzer. For example, if the creation for the analyzer fails, a Failed status is displayed. For an analyzer with organization as the type, this failure can be due to an issue with creating the service-linked roles required in the member accounts of the AWS organization.

" + "documentation":"

Provides more details about the current status of the analyzer. For example, if the creation for the analyzer fails, a Failed status is returned. For an analyzer with organization as the type, this failure can be due to an issue with creating the service-linked roles required in the member accounts of the AWS organization.

" }, "String":{"type":"string"}, + "Substring":{ + "type":"structure", + "required":[ + "length", + "start" + ], + "members":{ + "length":{ + "shape":"Integer", + "documentation":"

The length of the substring.

" + }, + "start":{ + "shape":"Integer", + "documentation":"

The start index of the substring, starting from 0.

" + } + }, + "documentation":"

A reference to a substring of a literal string in a JSON document.

" + }, "TagKeys":{ "type":"list", "member":{"shape":"String"} @@ -1590,7 +2460,7 @@ "members":{ "analyzerArn":{ "shape":"AnalyzerArn", - "documentation":"

The ARN of the analyzer that generated the findings to update.

" + "documentation":"

The ARN of the analyzer that generated the findings to update.

" }, "clientToken":{ "shape":"String", @@ -1612,6 +2482,99 @@ }, "documentation":"

Updates findings with the new values provided in the request.

" }, + "ValidatePolicyFinding":{ + "type":"structure", + "required":[ + "findingDetails", + "findingType", + "issueCode", + "learnMoreLink", + "locations" + ], + "members":{ + "findingDetails":{ + "shape":"String", + "documentation":"

A localized message that explains the finding and provides guidance on how to address it.

" + }, + "findingType":{ + "shape":"ValidatePolicyFindingType", + "documentation":"

The impact of the finding.

Security warnings report when the policy allows access that we consider overly permissive.

Errors report when a part of the policy is not functional.

Warnings report non-security issues when a policy does not conform to policy writing best practices.

Suggestions recommend stylistic improvements in the policy that do not impact access.

" + }, + "issueCode":{ + "shape":"IssueCode", + "documentation":"

The issue code provides an identifier of the issue associated with this finding.

" + }, + "learnMoreLink":{ + "shape":"LearnMoreLink", + "documentation":"

A link to additional documentation about the type of finding.

" + }, + "locations":{ + "shape":"LocationList", + "documentation":"

The list of locations in the policy document that are related to the finding. The issue code provides a summary of an issue identified by the finding.

" + } + }, + "documentation":"

A finding in a policy. Each finding is an actionable recommendation that can be used to improve the policy.

" + }, + "ValidatePolicyFindingList":{ + "type":"list", + "member":{"shape":"ValidatePolicyFinding"} + }, + "ValidatePolicyFindingType":{ + "type":"string", + "enum":[ + "ERROR", + "SECURITY_WARNING", + "SUGGESTION", + "WARNING" + ] + }, + "ValidatePolicyRequest":{ + "type":"structure", + "required":[ + "policyDocument", + "policyType" + ], + "members":{ + "locale":{ + "shape":"Locale", + "documentation":"

The locale to use for localizing the findings.

" + }, + "maxResults":{ + "shape":"Integer", + "documentation":"

The maximum number of results to return in the response.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"Token", + "documentation":"

A token used for pagination of results returned.

", + "location":"querystring", + "locationName":"nextToken" + }, + "policyDocument":{ + "shape":"PolicyDocument", + "documentation":"

The JSON policy document to use as the content for the policy.

" + }, + "policyType":{ + "shape":"PolicyType", + "documentation":"

The type of policy to validate. Identity policies grant permissions to IAM principals. Identity policies include managed and inline policies for IAM roles, users, and groups. They also include service-control policies (SCPs) that are attached to an AWS organization, organizational unit (OU), or an account.

Resource policies grant permissions on AWS resources. Resource policies include trust policies for IAM roles and bucket policies for S3 buckets. You can provide a generic input such as identity policy or resource policy or a specific input such as managed policy or S3 bucket policy.

" + } + } + }, + "ValidatePolicyResponse":{ + "type":"structure", + "required":["findings"], + "members":{ + "findings":{ + "shape":"ValidatePolicyFindingList", + "documentation":"

The list of findings in a policy returned by Access Analyzer based on its suite of policy checks.

" + }, + "nextToken":{ + "shape":"Token", + "documentation":"

A token used for pagination of results returned.

" + } + } + }, "ValidationException":{ "type":"structure", "required":[ @@ -1672,7 +2635,22 @@ "member":{"shape":"String"}, "max":20, "min":1 + }, + "VpcConfiguration":{ + "type":"structure", + "required":["vpcId"], + "members":{ + "vpcId":{ + "shape":"VpcId", + "documentation":"

If this field is specified, this access point will only allow connections from the specified VPC ID.

" + } + }, + "documentation":"

The proposed virtual private cloud (VPC) configuration for the Amazon S3 access point. For more information, see VpcConfiguration.

" + }, + "VpcId":{ + "type":"string", + "pattern":"^vpc-([0-9a-f]){8}(([0-9a-f]){9})?$" } }, - "documentation":"

AWS IAM Access Analyzer helps identify potential resource-access risks by enabling you to identify any policies that grant access to an external principal. It does this by using logic-based reasoning to analyze resource-based policies in your AWS environment. An external principal can be another AWS account, a root user, an IAM user or role, a federated user, an AWS service, or an anonymous user. This guide describes the AWS IAM Access Analyzer operations that you can call programmatically. For general information about Access Analyzer, see AWS IAM Access Analyzer in the IAM User Guide.

To start using Access Analyzer, you first need to create an analyzer.

" + "documentation":"

AWS IAM Access Analyzer helps identify potential resource-access risks by enabling you to identify any policies that grant access to an external principal. It does this by using logic-based reasoning to analyze resource-based policies in your AWS environment. An external principal can be another AWS account, a root user, an IAM user or role, a federated user, an AWS service, or an anonymous user. You can also use Access Analyzer to preview and validate public and cross-account access to your resources before deploying permissions changes. This guide describes the AWS IAM Access Analyzer operations that you can call programmatically. For general information about Access Analyzer, see AWS IAM Access Analyzer in the IAM User Guide.

To start using Access Analyzer, you first need to create an analyzer.

" } diff --git a/botocore/data/acm/2015-12-08/service-2.json b/botocore/data/acm/2015-12-08/service-2.json index 7c055bde..93adef55 100644 --- a/botocore/data/acm/2015-12-08/service-2.json +++ b/botocore/data/acm/2015-12-08/service-2.json @@ -26,7 +26,8 @@ {"shape":"InvalidTagException"}, {"shape":"TooManyTagsException"}, {"shape":"TagPolicyException"}, - {"shape":"InvalidParameterException"} + {"shape":"InvalidParameterException"}, + {"shape":"ThrottlingException"} ], "documentation":"

Adds one or more tags to an ACM certificate. Tags are labels that you can use to identify and organize your AWS resources. Each tag consists of a key and an optional value. You specify the certificate on input by its Amazon Resource Name (ARN). You specify the tag by using a key-value pair.

You can apply a tag to just one certificate if you want to identify a specific characteristic of that certificate, or you can apply the same tag to multiple certificates if you want to filter for a common relationship among those certificates. Similarly, you can apply the same tag to multiple resources if you want to specify a relationship among those resources. For example, you can add the same tag to an ACM certificate and an Elastic Load Balancing load balancer to indicate that they are both used by the same website. For more information, see Tagging ACM certificates.

To remove one or more tags, use the RemoveTagsFromCertificate action. To view all of the tags that have been applied to the certificate, use the ListTagsForCertificate action.

" }, @@ -73,6 +74,19 @@ ], "documentation":"

Exports a private certificate issued by a private certificate authority (CA) for use anywhere. The exported file contains the certificate, the certificate chain, and the encrypted private 2048-bit RSA key associated with the public key that is embedded in the certificate. For security, you must assign a passphrase for the private key when exporting it.

For information about exporting and formatting a certificate using the ACM console or CLI, see Export a Private Certificate.

" }, + "GetAccountConfiguration":{ + "name":"GetAccountConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{"shape":"GetAccountConfigurationResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns the account configuration options associated with an AWS account.

" + }, "GetCertificate":{ "name":"GetCertificate", "http":{ @@ -102,9 +116,10 @@ {"shape":"InvalidTagException"}, {"shape":"TooManyTagsException"}, {"shape":"TagPolicyException"}, - {"shape":"InvalidParameterException"} + {"shape":"InvalidParameterException"}, + {"shape":"InvalidArnException"} ], - "documentation":"

Imports a certificate into AWS Certificate Manager (ACM) to use with services that are integrated with ACM. Note that integrated services allow only certificate types and keys they support to be associated with their resources. Further, their support differs depending on whether the certificate is imported into IAM or into ACM. For more information, see the documentation for each service. For more information about importing certificates into ACM, see Importing Certificates in the AWS Certificate Manager User Guide.

ACM does not provide managed renewal for certificates that you import.

Note the following guidelines when importing third party certificates:

This operation returns the Amazon Resource Name (ARN) of the imported certificate.

" + "documentation":"

Imports a certificate into AWS Certificate Manager (ACM) to use with services that are integrated with ACM. Note that integrated services allow only certificate types and keys they support to be associated with their resources. Further, their support differs depending on whether the certificate is imported into IAM or into ACM. For more information, see the documentation for each service. For more information about importing certificates into ACM, see Importing Certificates in the AWS Certificate Manager User Guide.

ACM does not provide managed renewal for certificates that you import.

Note the following guidelines when importing third party certificates:

This operation returns the Amazon Resource Name (ARN) of the imported certificate.

" }, "ListCertificates":{ "name":"ListCertificates", @@ -133,6 +148,21 @@ ], "documentation":"

Lists the tags that have been applied to the ACM certificate. Use the certificate's Amazon Resource Name (ARN) to specify the certificate. To add a tag to an ACM certificate, use the AddTagsToCertificate action. To delete a tag, use the RemoveTagsFromCertificate action.

" }, + "PutAccountConfiguration":{ + "name":"PutAccountConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutAccountConfigurationRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Adds or modifies account-level configurations in ACM.

The supported configuration option is DaysBeforeExpiry. This option specifies the number of days prior to certificate expiration when ACM starts generating EventBridge events. ACM sends one event per day per certificate until the certificate expires. By default, accounts receive events starting 45 days before certificate expiration.

" + }, "RemoveTagsFromCertificate":{ "name":"RemoveTagsFromCertificate", "http":{ @@ -145,7 +175,8 @@ {"shape":"InvalidArnException"}, {"shape":"InvalidTagException"}, {"shape":"TagPolicyException"}, - {"shape":"InvalidParameterException"} + {"shape":"InvalidParameterException"}, + {"shape":"ThrottlingException"} ], "documentation":"

Remove one or more tags from an ACM certificate. A tag consists of a key-value pair. If you do not specify the value portion of the tag when calling this function, the tag will be removed regardless of value. If you specify a value, the tag is removed only if it is associated with the specified value.

To add tags to a certificate, use the AddTagsToCertificate action. To view all of the tags that have been applied to a specific ACM certificate, use the ListTagsForCertificate action.

" }, @@ -160,7 +191,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidArnException"} ], - "documentation":"

Renews an eligable ACM certificate. At this time, only exported private certificates can be renewed with this operation. In order to renew your ACM PCA certificates with ACM, you must first grant the ACM service principal permission to do so. For more information, see Testing Managed Renewal in the ACM User Guide.

" + "documentation":"

Renews an eligible ACM certificate. At this time, only exported private certificates can be renewed with this operation. In order to renew your ACM PCA certificates with ACM, you must first grant the ACM service principal permission to do so. For more information, see Testing Managed Renewal in the ACM User Guide.

" }, "RequestCertificate":{ "name":"RequestCertificate", @@ -213,6 +244,15 @@ } }, "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ServiceErrorMessage"} + }, + "documentation":"

You do not have access required to perform this action.

", + "exception":true, + "synthetic":true + }, "AddTagsToCertificateRequest":{ "type":"structure", "required":[ @@ -222,7 +262,7 @@ "members":{ "CertificateArn":{ "shape":"Arn", - "documentation":"

String that contains the ARN of the ACM certificate to which the tag is to be applied. This must be of the form:

arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" + "documentation":"

String that contains the ARN of the ACM certificate to which the tag is to be applied. This must be of the form:

arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012

For more information about ARNs, see Amazon Resource Names (ARNs).

" }, "Tags":{ "shape":"TagList", @@ -236,6 +276,7 @@ "min":20, "pattern":"arn:[\\w+=/,.@-]+:[\\w+=/,.@-]+:[\\w+=/,.@-]*:[0-9]+:[\\w+=,.@-]+(/[\\w+=,.@-]+)*" }, + "AvailabilityErrorMessage":{"type":"string"}, "CertificateBody":{ "type":"string", "max":32768, @@ -263,7 +304,7 @@ "members":{ "CertificateArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the certificate. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The Amazon Resource Name (ARN) of the certificate. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference.

" }, "DomainName":{ "shape":"DomainNameString", @@ -291,7 +332,7 @@ }, "CreatedAt":{ "shape":"TStamp", - "documentation":"

The time at which the certificate was requested. This value exists only when the certificate type is AMAZON_ISSUED.

" + "documentation":"

The time at which the certificate was requested.

" }, "IssuedAt":{ "shape":"TStamp", @@ -399,7 +440,7 @@ "members":{ "CertificateArn":{ "shape":"Arn", - "documentation":"

Amazon Resource Name (ARN) of the certificate. This is of the form:

arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" + "documentation":"

Amazon Resource Name (ARN) of the certificate. This is of the form:

arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012

For more information about ARNs, see Amazon Resource Names (ARNs).

" }, "DomainName":{ "shape":"DomainNameString", @@ -427,13 +468,21 @@ "PRIVATE" ] }, + "ConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

You are trying to update a resource or configuration that is already being created or updated. Wait for the previous operation to finish and try again.

", + "exception":true + }, "DeleteCertificateRequest":{ "type":"structure", "required":["CertificateArn"], "members":{ "CertificateArn":{ "shape":"Arn", - "documentation":"

String that contains the ARN of the ACM certificate to be deleted. This must be of the form:

arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" + "documentation":"

String that contains the ARN of the ACM certificate to be deleted. This must be of the form:

arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012

For more information about ARNs, see Amazon Resource Names (ARNs).

" } } }, @@ -443,7 +492,7 @@ "members":{ "CertificateArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the ACM certificate. The ARN must have the following form:

arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" + "documentation":"

The Amazon Resource Name (ARN) of the ACM certificate. The ARN must have the following form:

arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012

For more information about ARNs, see Amazon Resource Names (ARNs).

" } } }, @@ -537,6 +586,16 @@ "max":100, "min":1 }, + "ExpiryEventsConfiguration":{ + "type":"structure", + "members":{ + "DaysBeforeExpiry":{ + "shape":"PositiveInteger", + "documentation":"

Specifies the number of days prior to certificate expiration when ACM starts generating EventBridge events. ACM sends one event per day per certificate until the certificate expires. By default, accounts receive events starting 45 days before certificate expiration.

" + } + }, + "documentation":"

Object containing expiration events options associated with an AWS account.

" + }, "ExportCertificateRequest":{ "type":"structure", "required":[ @@ -645,18 +704,27 @@ }, "keyTypes":{ "shape":"KeyAlgorithmList", - "documentation":"

Specify one or more algorithms that can be used to generate key pairs.

Default filtering returns only RSA_2048 certificates. To return other certificate types, provide the desired type signatures in a comma-separated list. For example, \"keyTypes\": [\"RSA_2048,RSA_4096\"] returns both RSA_2048 and RSA_4096 certificates.

" + "documentation":"

Specify one or more algorithms that can be used to generate key pairs.

Default filtering returns only RSA_1024 and RSA_2048 certificates that have at least one domain. To return other certificate types, provide the desired type signatures in a comma-separated list. For example, \"keyTypes\": [\"RSA_2048,RSA_4096\"] returns both RSA_2048 and RSA_4096 certificates.

" } }, "documentation":"

This structure can be used in the ListCertificates action to filter the output of the certificate list.

" }, + "GetAccountConfigurationResponse":{ + "type":"structure", + "members":{ + "ExpiryEvents":{ + "shape":"ExpiryEventsConfiguration", + "documentation":"

Expiration events configuration options associated with the AWS account.

" + } + } + }, "GetCertificateRequest":{ "type":"structure", "required":["CertificateArn"], "members":{ "CertificateArn":{ "shape":"Arn", - "documentation":"

String that contains a certificate ARN in the following format:

arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" + "documentation":"

String that contains a certificate ARN in the following format:

arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012

For more information about ARNs, see Amazon Resource Names (ARNs).

" } } }, @@ -866,7 +934,7 @@ "members":{ "CertificateArn":{ "shape":"Arn", - "documentation":"

String that contains the ARN of the ACM certificate for which you want to list the tags. This must have the following form:

arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" + "documentation":"

String that contains the ARN of the ACM certificate for which you want to list the tags. This must have the following form:

arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012

For more information about ARNs, see Amazon Resource Names (ARNs).

" } } }, @@ -896,6 +964,10 @@ "min":4, "sensitive":true }, + "PositiveInteger":{ + "type":"integer", + "min":1 + }, "PrivateKey":{ "type":"string", "max":524288, @@ -909,6 +981,20 @@ "min":1, "sensitive":true }, + "PutAccountConfigurationRequest":{ + "type":"structure", + "required":["IdempotencyToken"], + "members":{ + "ExpiryEvents":{ + "shape":"ExpiryEventsConfiguration", + "documentation":"

Specifies expiration events associated with an account.

" + }, + "IdempotencyToken":{ + "shape":"IdempotencyToken", + "documentation":"

Customer-chosen string used to distinguish between calls to PutAccountConfiguration. Idempotency tokens time out after one hour. If you call PutAccountConfiguration multiple times with the same unexpired idempotency token, ACM treats it as the same request and returns the original result. If you change the idempotency token for each call, ACM treats each call as a new request.

" + } + } + }, "RecordType":{ "type":"string", "enum":["CNAME"] @@ -922,7 +1008,7 @@ "members":{ "CertificateArn":{ "shape":"Arn", - "documentation":"

String that contains the ARN of the ACM Certificate with one or more tags that you want to remove. This must be of the form:

arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" + "documentation":"

String that contains the ARN of the ACM Certificate with one or more tags that you want to remove. This must be of the form:

arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012

For more information about ARNs, see Amazon Resource Names (ARNs).

" }, "Tags":{ "shape":"TagList", @@ -936,7 +1022,7 @@ "members":{ "CertificateArn":{ "shape":"Arn", - "documentation":"

String that contains the ARN of the ACM certificate to be renewed. This must be of the form:

arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" + "documentation":"

String that contains the ARN of the ACM certificate to be renewed. This must be of the form:

arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012

For more information about ARNs, see Amazon Resource Names (ARNs).

" } } }, @@ -1114,6 +1200,7 @@ "A_A_COMPROMISE" ] }, + "ServiceErrorMessage":{"type":"string"}, "String":{"type":"string"}, "TStamp":{"type":"timestamp"}, "Tag":{ @@ -1157,6 +1244,15 @@ "min":0, "pattern":"[\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@]*" }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "message":{"shape":"AvailabilityErrorMessage"} + }, + "documentation":"

The request was denied because it exceeded a quota.

", + "exception":true, + "synthetic":true + }, "TooManyTagsException":{ "type":"structure", "members":{ @@ -1186,6 +1282,16 @@ "type":"list", "member":{"shape":"String"} }, + "ValidationException":{ + "type":"structure", + "members":{ + "message":{"shape":"ValidationExceptionMessage"} + }, + "documentation":"

The supplied input failed to satisfy constraints of an AWS service.

", + "exception":true, + "synthetic":true + }, + "ValidationExceptionMessage":{"type":"string"}, "ValidationMethod":{ "type":"string", "enum":[ @@ -1194,5 +1300,5 @@ ] } }, - "documentation":"AWS Certificate Manager

Welcome to the AWS Certificate Manager (ACM) API documentation.

You can use ACM to manage SSL/TLS certificates for your AWS-based websites and applications. For general information about using ACM, see the AWS Certificate Manager User Guide .

" + "documentation":"AWS Certificate Manager

You can use AWS Certificate Manager (ACM) to manage SSL/TLS certificates for your AWS-based websites and applications. For more information about using ACM, see the AWS Certificate Manager User Guide.

" } diff --git a/botocore/data/alexaforbusiness/2017-11-09/service-2.json b/botocore/data/alexaforbusiness/2017-11-09/service-2.json index 700df59a..a2159788 100644 --- a/botocore/data/alexaforbusiness/2017-11-09/service-2.json +++ b/botocore/data/alexaforbusiness/2017-11-09/service-2.json @@ -904,7 +904,7 @@ "errors":[ {"shape":"NotFoundException"} ], - "documentation":"

Determines the details for the room from which a skill request was invoked. This operation is used by skill developers.

" + "documentation":"

Determines the details for the room from which a skill request was invoked. This operation is used by skill developers.

To query ResolveRoom from an Alexa skill, the skill ID needs to be authorized. When the skill is using an AWS Lambda function, the skill is automatically authorized when you publish your skill as a private skill to your AWS account. Skills that are hosted using a custom web service must be manually authorized. To get your skill authorized, contact AWS Support with your AWS account ID that queries the ResolveRoom API and skill ID.

" }, "RevokeInvitation":{ "name":"RevokeInvitation", @@ -1872,6 +1872,10 @@ "shape":"ClientRequestToken", "documentation":"

A unique, user-specified identifier for the request that ensures idempotency.

", "idempotencyToken":true + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags to be added to the specified resource. Do not provide system tags.

" } } }, @@ -1967,6 +1971,10 @@ "shape":"ClientRequestToken", "documentation":"

The request token of the client.

", "idempotencyToken":true + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags to be added to the specified resource. Do not provide system tags.

" } } }, @@ -2011,6 +2019,10 @@ "shape":"ClientRequestToken", "documentation":"

A unique, user-specified identifier for this request that ensures idempotency.

", "idempotencyToken":true + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags to be added to the specified resource. Do not provide system tags.

" } } }, @@ -2065,6 +2077,10 @@ "shape":"ClientRequestToken", "documentation":"

A unique, user-specified identifier for the request that ensures idempotency.

", "idempotencyToken":true + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags to be added to the specified resource. Do not provide system tags.

" } } }, @@ -2162,6 +2178,10 @@ "ClientRequestToken":{ "shape":"ClientRequestToken", "idempotencyToken":true + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags to be added to the specified resource. Do not provide system tags.

" } } }, @@ -4448,7 +4468,11 @@ }, "RoomArn":{ "shape":"Arn", - "documentation":"

The ARN of the room with which to associate your AVS device.

" + "documentation":"

The Amazon Resource Name (ARN) of the room with which to associate your AVS device.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags to be added to the specified resource. Do not provide system tags.

" } } }, diff --git a/botocore/data/appflow/2020-08-23/service-2.json b/botocore/data/appflow/2020-08-23/service-2.json index b1c0d1dd..138e7322 100644 --- a/botocore/data/appflow/2020-08-23/service-2.json +++ b/botocore/data/appflow/2020-08-23/service-2.json @@ -643,6 +643,14 @@ "Upsolver":{ "shape":"UpsolverMetadata", "documentation":"

The connector metadata specific to Upsolver.

" + }, + "CustomerProfiles":{ + "shape":"CustomerProfilesMetadata", + "documentation":"

The connector metadata specific to Amazon Connect Customer Profiles.

" + }, + "Honeycode":{ + "shape":"HoneycodeMetadata", + "documentation":"

The connector metadata specific to Amazon Honeycode.

" } }, "documentation":"

A structure to specify connector-specific metadata such as oAuthScopes, supportedRegions, privateLinkServiceUrl, and so on.

" @@ -803,6 +811,10 @@ "shape":"GoogleAnalyticsConnectorProfileCredentials", "documentation":"

The connector-specific credentials required when using Google Analytics.

" }, + "Honeycode":{ + "shape":"HoneycodeConnectorProfileCredentials", + "documentation":"

The connector-specific credentials required when using Amazon Honeycode.

" + }, "InforNexus":{ "shape":"InforNexusConnectorProfileCredentials", "documentation":"

The connector-specific credentials required when using Infor Nexus.

" @@ -884,6 +896,10 @@ "shape":"GoogleAnalyticsConnectorProfileProperties", "documentation":"

The connector-specific properties required Google Analytics.

" }, + "Honeycode":{ + "shape":"HoneycodeConnectorProfileProperties", + "documentation":"

The connector-specific properties required by Amazon Honeycode.

" + }, "InforNexus":{ "shape":"InforNexusConnectorProfileProperties", "documentation":"

The connector-specific properties required by Infor Nexus.

" @@ -960,7 +976,10 @@ "Amplitude", "Veeva", "EventBridge", - "Upsolver" + "LookoutMetrics", + "Upsolver", + "Honeycode", + "CustomerProfiles" ] }, "ConnectorTypeList":{ @@ -1071,6 +1090,27 @@ "max":256, "pattern":"\\S+" }, + "CustomerProfilesDestinationProperties":{ + "type":"structure", + "required":["domainName"], + "members":{ + "domainName":{ + "shape":"DomainName", + "documentation":"

The unique name of the Amazon Connect Customer Profiles domain.

" + }, + "objectTypeName":{ + "shape":"ObjectTypeName", + "documentation":"

The object specified in the Amazon Connect Customer Profiles flow destination.

" + } + }, + "documentation":"

The properties that are applied when Amazon Connect Customer Profiles is used as a destination.

" + }, + "CustomerProfilesMetadata":{ + "type":"structure", + "members":{ + }, + "documentation":"

The connector metadata specific to Amazon Connect Customer Profiles.

" + }, "DataPullMode":{ "type":"string", "enum":[ @@ -1419,9 +1459,21 @@ "shape":"EventBridgeDestinationProperties", "documentation":"

The properties required to query Amazon EventBridge.

" }, + "LookoutMetrics":{ + "shape":"LookoutMetricsDestinationProperties", + "documentation":"

The properties required to query Amazon Lookout for Metrics.

" + }, "Upsolver":{ "shape":"UpsolverDestinationProperties", "documentation":"

The properties required to query Upsolver.

" + }, + "Honeycode":{ + "shape":"HoneycodeDestinationProperties", + "documentation":"

The properties required to query Amazon Honeycode.

" + }, + "CustomerProfiles":{ + "shape":"CustomerProfilesDestinationProperties", + "documentation":"

The properties required to query Amazon Connect Customer Profiles.

" } }, "documentation":"

This stores the information that is required to query a particular connector.

" @@ -1483,6 +1535,11 @@ "type":"list", "member":{"shape":"DestinationFlowConfig"} }, + "DomainName":{ + "type":"string", + "max":64, + "pattern":"\\S+" + }, "DynatraceConnectorOperator":{ "type":"string", "enum":[ @@ -1652,6 +1709,14 @@ "lastUpdatedAt":{ "shape":"Date", "documentation":"

Specifies the time of the most recent update.

" + }, + "dataPullStartTime":{ + "shape":"Date", + "documentation":"

The timestamp that determines the first new or updated record to be transferred in the flow run.

" + }, + "dataPullEndTime":{ + "shape":"Date", + "documentation":"

The timestamp that indicates the last new or updated record to be transferred in the flow run.

" } }, "documentation":"

Specifies information about the past flow run instances for a given flow.

" @@ -1887,6 +1952,49 @@ "max":128, "pattern":"\\S+" }, + "HoneycodeConnectorProfileCredentials":{ + "type":"structure", + "members":{ + "accessToken":{ + "shape":"AccessToken", + "documentation":"

The credentials used to access protected Amazon Honeycode resources.

" + }, + "refreshToken":{ + "shape":"RefreshToken", + "documentation":"

The credentials used to acquire new access tokens.

" + }, + "oAuthRequest":{"shape":"ConnectorOAuthRequest"} + }, + "documentation":"

The connector-specific credentials required when using Amazon Honeycode.

" + }, + "HoneycodeConnectorProfileProperties":{ + "type":"structure", + "members":{ + }, + "documentation":"

The connector-specific properties required when using Amazon Honeycode.

" + }, + "HoneycodeDestinationProperties":{ + "type":"structure", + "required":["object"], + "members":{ + "object":{ + "shape":"Object", + "documentation":"

The object specified in the Amazon Honeycode flow destination.

" + }, + "errorHandlingConfig":{"shape":"ErrorHandlingConfig"} + }, + "documentation":"

The properties that are applied when Amazon Honeycode is used as a destination.

" + }, + "HoneycodeMetadata":{ + "type":"structure", + "members":{ + "oAuthScopes":{ + "shape":"OAuthScopeList", + "documentation":"

The desired authorization scope for the Amazon Honeycode account.

" + } + }, + "documentation":"

The connector metadata specific to Amazon Honeycode.

" + }, "IdFieldNameList":{ "type":"list", "member":{"shape":"Name"}, @@ -2090,6 +2198,12 @@ } }, "Long":{"type":"long"}, + "LookoutMetricsDestinationProperties":{ + "type":"structure", + "members":{ + }, + "documentation":"

The properties that are applied when Amazon Lookout for Metrics is used as a destination.

" + }, "MarketoConnectorOperator":{ "type":"string", "enum":[ @@ -2199,6 +2313,11 @@ "max":512, "pattern":"\\S+" }, + "ObjectTypeName":{ + "type":"string", + "max":255, + "pattern":"\\S+" + }, "Operator":{ "type":"string", "enum":[ @@ -2255,11 +2374,11 @@ "members":{ "prefixType":{ "shape":"PrefixType", - "documentation":"

Determines the level of granularity that's included in the prefix.

" + "documentation":"

Determines the format of the prefix, and whether it applies to the file name, file path, or both.

" }, "prefixFormat":{ "shape":"PrefixFormat", - "documentation":"

Determines the format of the prefix, and whether it applies to the file name, file path, or both.

" + "documentation":"

Determines the level of granularity that's included in the prefix.

" } }, "documentation":"

Determines the prefix that Amazon AppFlow applies to the destination folder name. You can name your destination folders according to the flow frequency and date.

" @@ -2609,6 +2728,11 @@ "ONCE" ] }, + "ScheduleOffset":{ + "type":"long", + "max":36000, + "min":0 + }, "ScheduledTriggerProperties":{ "type":"structure", "required":["scheduleExpression"], @@ -2631,7 +2755,16 @@ }, "timezone":{ "shape":"Timezone", - "documentation":"

Specifies the time zone used when referring to the date and time of a scheduled-triggered flow.

" + "documentation":"

Specifies the time zone used when referring to the date and time of a scheduled-triggered flow, such as America/New_York.

" + }, + "scheduleOffset":{ + "shape":"ScheduleOffset", + "documentation":"

Specifies the optional offset that is added to the time interval for a schedule-triggered flow.

", + "box":true + }, + "firstExecutionFrom":{ + "shape":"Date", + "documentation":"

Specifies the date range for the records to import from the connector in the first flow run.

" } }, "documentation":"

Specifies the configuration details of a schedule-triggered flow as defined by the user. Currently, these settings only apply to the Scheduled trigger type.

" diff --git a/botocore/data/appsync/2017-07-25/service-2.json b/botocore/data/appsync/2017-07-25/service-2.json index d0fb1269..1584e66d 100644 --- a/botocore/data/appsync/2017-07-25/service-2.json +++ b/botocore/data/appsync/2017-07-25/service-2.json @@ -1148,7 +1148,8 @@ "functionVersion":{ "shape":"String", "documentation":"

The version of the request mapping template. Currently the supported value is 2018-05-29.

" - } + }, + "syncConfig":{"shape":"SyncConfig"} } }, "CreateFunctionResponse":{ @@ -1675,7 +1676,8 @@ "functionVersion":{ "shape":"String", "documentation":"

The version of the request mapping template. Currently only the 2018-05-29 version of the template is supported.

" - } + }, + "syncConfig":{"shape":"SyncConfig"} }, "documentation":"

A function is a reusable entity. Multiple functions can be used to compose the resolver logic.

" }, @@ -2920,7 +2922,8 @@ "functionVersion":{ "shape":"String", "documentation":"

The version of the request mapping template. Currently the supported value is 2018-05-29.

" - } + }, + "syncConfig":{"shape":"SyncConfig"} } }, "UpdateFunctionResponse":{ diff --git a/botocore/data/athena/2017-05-18/service-2.json b/botocore/data/athena/2017-05-18/service-2.json index 9a2fdf81..a8725926 100644 --- a/botocore/data/athena/2017-05-18/service-2.json +++ b/botocore/data/athena/2017-05-18/service-2.json @@ -69,6 +69,20 @@ "documentation":"

Creates a named query in the specified workgroup. Requires that you have access to the workgroup.

For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

", "idempotent":true }, + "CreatePreparedStatement":{ + "name":"CreatePreparedStatement", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePreparedStatementInput"}, + "output":{"shape":"CreatePreparedStatementOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Creates a prepared statement for use with SQL queries in Athena.

" + }, "CreateWorkGroup":{ "name":"CreateWorkGroup", "http":{ @@ -112,6 +126,21 @@ "documentation":"

Deletes the named query if you have access to the workgroup in which the query was saved.

For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

", "idempotent":true }, + "DeletePreparedStatement":{ + "name":"DeletePreparedStatement", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePreparedStatementInput"}, + "output":{"shape":"DeletePreparedStatementOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Deletes the prepared statement with the specified name from the specified workgroup.

" + }, "DeleteWorkGroup":{ "name":"DeleteWorkGroup", "http":{ @@ -154,7 +183,7 @@ {"shape":"InvalidRequestException"}, {"shape":"MetadataException"} ], - "documentation":"

Returns a database object for the specfied database and data catalog.

" + "documentation":"

Returns a database object for the specified database and data catalog.

" }, "GetNamedQuery":{ "name":"GetNamedQuery", @@ -170,6 +199,21 @@ ], "documentation":"

Returns information about a single query. Requires that you have access to the workgroup in which the query was saved.

" }, + "GetPreparedStatement":{ + "name":"GetPreparedStatement", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPreparedStatementInput"}, + "output":{"shape":"GetPreparedStatementOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Retrieves the prepared statement with the specified name from the specified workgroup.

" + }, "GetQueryExecution":{ "name":"GetQueryExecution", "http":{ @@ -256,6 +300,20 @@ ], "documentation":"

Lists the databases in the specified data catalog.

" }, + "ListEngineVersions":{ + "name":"ListEngineVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListEngineVersionsInput"}, + "output":{"shape":"ListEngineVersionsOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Returns a list of engine versions that are available to choose from, including the Auto option.

" + }, "ListNamedQueries":{ "name":"ListNamedQueries", "http":{ @@ -270,6 +328,20 @@ ], "documentation":"

Provides a list of available query IDs only for queries saved in the specified workgroup. Requires that you have access to the specified workgroup. If a workgroup is not specified, lists the saved queries for the primary workgroup.

For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

" }, + "ListPreparedStatements":{ + "name":"ListPreparedStatements", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPreparedStatementsInput"}, + "output":{"shape":"ListPreparedStatementsOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Lists the prepared statements in the specfied workgroup.

" + }, "ListQueryExecutions":{ "name":"ListQueryExecutions", "http":{ @@ -403,6 +475,21 @@ ], "documentation":"

Updates the data catalog that has the specified name.

" }, + "UpdatePreparedStatement":{ + "name":"UpdatePreparedStatement", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdatePreparedStatementInput"}, + "output":{"shape":"UpdatePreparedStatementOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Updates a prepared statement.

" + }, "UpdateWorkGroup":{ "name":"UpdateWorkGroup", "http":{ @@ -586,7 +673,7 @@ }, "Type":{ "shape":"DataCatalogType", - "documentation":"

The type of data catalog to create: LAMBDA for a federated catalog, GLUE for AWS Glue Catalog, or HIVE for an external hive metastore.

" + "documentation":"

The type of data catalog to create: LAMBDA for a federated catalog or HIVE for an external hive metastore.

Do not use the GLUE type. This refers to the AwsDataCatalog that already exists in your account, of which you can have only one. Specifying the GLUE type will result in an INVALID_INPUT error.

" }, "Description":{ "shape":"DescriptionString", @@ -594,7 +681,7 @@ }, "Parameters":{ "shape":"ParametersMap", - "documentation":"

Specifies the Lambda function or functions to use for creating the data catalog. This is a mapping whose values depend on the catalog type.

" + "documentation":"

Specifies the Lambda function or functions to use for creating the data catalog. This is a mapping whose values depend on the catalog type.

" }, "Tags":{ "shape":"TagList", @@ -651,6 +738,37 @@ } } }, + "CreatePreparedStatementInput":{ + "type":"structure", + "required":[ + "StatementName", + "WorkGroup", + "QueryStatement" + ], + "members":{ + "StatementName":{ + "shape":"StatementName", + "documentation":"

The name of the prepared statement.

" + }, + "WorkGroup":{ + "shape":"WorkGroupName", + "documentation":"

The name of the workgroup to which the prepared statement belongs.

" + }, + "QueryStatement":{ + "shape":"QueryString", + "documentation":"

The query string for the prepared statement.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

The description of the prepared statement.

" + } + } + }, + "CreatePreparedStatementOutput":{ + "type":"structure", + "members":{ + } + }, "CreateWorkGroupInput":{ "type":"structure", "required":["Name"], @@ -695,11 +813,11 @@ }, "Type":{ "shape":"DataCatalogType", - "documentation":"

The type of data catalog: LAMBDA for a federated catalog, GLUE for AWS Glue Catalog, or HIVE for an external hive metastore.

" + "documentation":"

The type of data catalog: LAMBDA for a federated catalog or HIVE for an external hive metastore. GLUE refers to the AwsDataCatalog that already exists in your account, of which you can have only one.

" }, "Parameters":{ "shape":"ParametersMap", - "documentation":"

Specifies the Lambda function or functions to use for the data catalog. This is a mapping whose values depend on the catalog type.

" + "documentation":"

Specifies the Lambda function or functions to use for the data catalog. This is a mapping whose values depend on the catalog type.

" } }, "documentation":"

Contains information about a data catalog in an AWS account.

" @@ -800,6 +918,28 @@ "members":{ } }, + "DeletePreparedStatementInput":{ + "type":"structure", + "required":[ + "StatementName", + "WorkGroup" + ], + "members":{ + "StatementName":{ + "shape":"StatementName", + "documentation":"

The name of the prepared statement to delete.

" + }, + "WorkGroup":{ + "shape":"WorkGroupName", + "documentation":"

The workgroup to which the statement to be deleted belongs.

" + } + } + }, + "DeletePreparedStatementOutput":{ + "type":"structure", + "members":{ + } + }, "DeleteWorkGroupInput":{ "type":"structure", "required":["WorkGroup"], @@ -810,7 +950,7 @@ }, "RecursiveDeleteOption":{ "shape":"BoxedBoolean", - "documentation":"

The option to delete the workgroup and its contents even if the workgroup contains any named queries.

" + "documentation":"

The option to delete the workgroup and its contents even if the workgroup contains any named queries or query executions.

" } } }, @@ -847,6 +987,26 @@ "CSE_KMS" ] }, + "EngineVersion":{ + "type":"structure", + "members":{ + "SelectedEngineVersion":{ + "shape":"NameString", + "documentation":"

The engine version requested by the user. Possible values are determined by the output of ListEngineVersions, including Auto. The default is Auto.

" + }, + "EffectiveEngineVersion":{ + "shape":"NameString", + "documentation":"

Read only. The engine version on which the query runs. If the user requests a valid engine version other than Auto, the effective engine version is the same as the engine version that the user requested. If the user requests Auto, the effective engine version is chosen by Athena. When a request to update the engine version is made by a CreateWorkGroup or UpdateWorkGroup operation, the EffectiveEngineVersion field is ignored.

" + } + }, + "documentation":"

The Athena engine version for running queries.

" + }, + "EngineVersionsList":{ + "type":"list", + "member":{"shape":"EngineVersion"}, + "max":10, + "min":0 + }, "ErrorCode":{ "type":"string", "documentation":"

The error code returned when the query execution failed to process, or when the processing request for the named query failed.

", @@ -923,6 +1083,32 @@ } } }, + "GetPreparedStatementInput":{ + "type":"structure", + "required":[ + "StatementName", + "WorkGroup" + ], + "members":{ + "StatementName":{ + "shape":"StatementName", + "documentation":"

The name of the prepared statement to retrieve.

" + }, + "WorkGroup":{ + "shape":"WorkGroupName", + "documentation":"

The workgroup to which the statement to be retrieved belongs.

" + } + } + }, + "GetPreparedStatementOutput":{ + "type":"structure", + "members":{ + "PreparedStatement":{ + "shape":"PreparedStatement", + "documentation":"

The name of the prepared statement that was retrieved.

" + } + } + }, "GetQueryExecutionInput":{ "type":"structure", "required":["QueryExecutionId"], @@ -1114,6 +1300,32 @@ } } }, + "ListEngineVersionsInput":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"Token", + "documentation":"

A token generated by the Athena service that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

" + }, + "MaxResults":{ + "shape":"MaxEngineVersionsCount", + "documentation":"

The maximum number of engine versions to return in this request.

" + } + } + }, + "ListEngineVersionsOutput":{ + "type":"structure", + "members":{ + "EngineVersions":{ + "shape":"EngineVersionsList", + "documentation":"

A list of engine versions that are available to choose from.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A token generated by the Athena service that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

" + } + } + }, "ListNamedQueriesInput":{ "type":"structure", "members":{ @@ -1144,6 +1356,37 @@ } } }, + "ListPreparedStatementsInput":{ + "type":"structure", + "required":["WorkGroup"], + "members":{ + "WorkGroup":{ + "shape":"WorkGroupName", + "documentation":"

The workgroup to list the prepared statements for.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A token generated by the Athena service that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

" + }, + "MaxResults":{ + "shape":"MaxPreparedStatementsCount", + "documentation":"

The maximum number of results to return in this request.

" + } + } + }, + "ListPreparedStatementsOutput":{ + "type":"structure", + "members":{ + "PreparedStatements":{ + "shape":"PreparedStatementsList", + "documentation":"

The list of prepared statements for the workgroup.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A token generated by the Athena service that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

" + } + } + }, "ListQueryExecutionsInput":{ "type":"structure", "members":{ @@ -1265,7 +1508,7 @@ "members":{ "WorkGroups":{ "shape":"WorkGroupsList", - "documentation":"

The list of workgroups, including their names, descriptions, creation times, and states.

" + "documentation":"

A list of WorkGroupSummary objects that include the names, descriptions, creation times, and states for each workgroup.

" }, "NextToken":{ "shape":"Token", @@ -1286,12 +1529,24 @@ "max":50, "min":1 }, + "MaxEngineVersionsCount":{ + "type":"integer", + "box":true, + "max":10, + "min":1 + }, "MaxNamedQueriesCount":{ "type":"integer", "box":true, "max":50, "min":0 }, + "MaxPreparedStatementsCount":{ + "type":"integer", + "box":true, + "max":50, + "min":1 + }, "MaxQueryExecutionsCount":{ "type":"integer", "box":true, @@ -1389,6 +1644,52 @@ "type":"string", "max":51200 }, + "PreparedStatement":{ + "type":"structure", + "members":{ + "StatementName":{ + "shape":"StatementName", + "documentation":"

The name of the prepared statement.

" + }, + "QueryStatement":{ + "shape":"QueryString", + "documentation":"

The query string for the prepared statement.

" + }, + "WorkGroupName":{ + "shape":"WorkGroupName", + "documentation":"

The name of the workgroup to which the prepared statement belongs.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

The description of the prepared statement.

" + }, + "LastModifiedTime":{ + "shape":"Date", + "documentation":"

The last modified time of the prepared statement.

" + } + }, + "documentation":"

A prepared SQL statement for use with Athena.

" + }, + "PreparedStatementSummary":{ + "type":"structure", + "members":{ + "StatementName":{ + "shape":"StatementName", + "documentation":"

The name of the prepared statement.

" + }, + "LastModifiedTime":{ + "shape":"Date", + "documentation":"

The last modified time of the prepared statement.

" + } + }, + "documentation":"

The name and last modified time of the prepared statement.

" + }, + "PreparedStatementsList":{ + "type":"list", + "member":{"shape":"PreparedStatementSummary"}, + "max":50, + "min":0 + }, "QueryExecution":{ "type":"structure", "members":{ @@ -1423,6 +1724,10 @@ "WorkGroup":{ "shape":"WorkGroupName", "documentation":"

The name of the workgroup in which the query ran.

" + }, + "EngineVersion":{ + "shape":"EngineVersion", + "documentation":"

The engine version that executed the query.

" } }, "documentation":"

Information about a single instance of a query execution.

" @@ -1642,6 +1947,12 @@ } } }, + "StatementName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[a-zA-Z_][a-zA-Z0-9_@:]{1,256}" + }, "StatementType":{ "type":"string", "enum":[ @@ -1869,7 +2180,7 @@ }, "Type":{ "shape":"DataCatalogType", - "documentation":"

Specifies the type of data catalog to update. Specify LAMBDA for a federated catalog, GLUE for AWS Glue Catalog, or HIVE for an external hive metastore.

" + "documentation":"

Specifies the type of data catalog to update. Specify LAMBDA for a federated catalog or HIVE for an external hive metastore.

Do not use the GLUE type. This refers to the AwsDataCatalog that already exists in your account, of which you can have only one. Specifying the GLUE type will result in an INVALID_INPUT error.

" }, "Description":{ "shape":"DescriptionString", @@ -1877,7 +2188,7 @@ }, "Parameters":{ "shape":"ParametersMap", - "documentation":"

Specifies the Lambda function or functions to use for updating the data catalog. This is a mapping whose values depend on the catalog type.

" + "documentation":"

Specifies the Lambda function or functions to use for updating the data catalog. This is a mapping whose values depend on the catalog type.

" } } }, @@ -1886,6 +2197,37 @@ "members":{ } }, + "UpdatePreparedStatementInput":{ + "type":"structure", + "required":[ + "StatementName", + "WorkGroup", + "QueryStatement" + ], + "members":{ + "StatementName":{ + "shape":"StatementName", + "documentation":"

The name of the prepared statement.

" + }, + "WorkGroup":{ + "shape":"WorkGroupName", + "documentation":"

The workgroup for the prepared statement.

" + }, + "QueryStatement":{ + "shape":"QueryString", + "documentation":"

The query string for the prepared statement.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

The description of the prepared statement.

" + } + } + }, + "UpdatePreparedStatementOutput":{ + "type":"structure", + "members":{ + } + }, "UpdateWorkGroupInput":{ "type":"structure", "required":["WorkGroup"], @@ -1962,6 +2304,10 @@ "RequesterPaysEnabled":{ "shape":"BoxedBoolean", "documentation":"

If set to true, allows members assigned to a workgroup to reference Amazon S3 Requester Pays buckets in queries. If set to false, workgroup members cannot query data from Requester Pays buckets, and queries that retrieve data from Requester Pays buckets cause an error. The default is false. For more information about Requester Pays buckets, see Requester Pays Buckets in the Amazon Simple Storage Service Developer Guide.

" + }, + "EngineVersion":{ + "shape":"EngineVersion", + "documentation":"

The engine version that all queries running on the workgroup use. Queries on the AmazonAthenaPreviewFunctionality workgroup run on the preview engine regardless of this setting.

" } }, "documentation":"

The configuration of the workgroup, which includes the location in Amazon S3 where query results are stored, the encryption option, if any, used for query results, whether the Amazon CloudWatch Metrics are enabled for the workgroup and whether workgroup settings override query settings, and the data usage limits for the amount of data scanned per query or per workgroup. The workgroup settings override is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

" @@ -1992,6 +2338,10 @@ "RequesterPaysEnabled":{ "shape":"BoxedBoolean", "documentation":"

If set to true, allows members assigned to a workgroup to specify Amazon S3 Requester Pays buckets in queries. If set to false, workgroup members cannot query data from Requester Pays buckets, and queries that retrieve data from Requester Pays buckets cause an error. The default is false. For more information about Requester Pays buckets, see Requester Pays Buckets in the Amazon Simple Storage Service Developer Guide.

" + }, + "EngineVersion":{ + "shape":"EngineVersion", + "documentation":"

The engine version requested when a workgroup is updated. After the update, all queries on the workgroup run on the requested engine version. If no value was previously set, the default is Auto. Queries on the AmazonAthenaPreviewFunctionality workgroup run on the preview engine regardless of this setting.

" } }, "documentation":"

The configuration information that will be updated for this workgroup, which includes the location in Amazon S3 where query results are stored, the encryption option, if any, used for query results, whether the Amazon CloudWatch Metrics are enabled for the workgroup, whether the workgroup settings override the client-side settings, and the data usage limit for the amount of bytes scanned per query, if it is specified.

" @@ -2030,6 +2380,10 @@ "CreationTime":{ "shape":"Date", "documentation":"

The workgroup creation date and time.

" + }, + "EngineVersion":{ + "shape":"EngineVersion", + "documentation":"

The engine version setting for all queries on the workgroup. Queries on the AmazonAthenaPreviewFunctionality workgroup run on the preview engine regardless of this setting.

" } }, "documentation":"

The summary information for the workgroup, which includes its name, state, description, and the date and time it was created.

" diff --git a/botocore/data/auditmanager/2017-07-25/service-2.json b/botocore/data/auditmanager/2017-07-25/service-2.json index 13de0927..3578fa18 100644 --- a/botocore/data/auditmanager/2017-07-25/service-2.json +++ b/botocore/data/auditmanager/2017-07-25/service-2.json @@ -497,6 +497,7 @@ "input":{"shape":"GetServicesInScopeRequest"}, "output":{"shape":"GetServicesInScopeResponse"}, "errors":[ + {"shape":"AccessDeniedException"}, {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], diff --git a/botocore/data/autoscaling/2011-01-01/service-2.json b/botocore/data/autoscaling/2011-01-01/service-2.json index 7c18ccdd..4b65e552 100644 --- a/botocore/data/autoscaling/2011-01-01/service-2.json +++ b/botocore/data/autoscaling/2011-01-01/service-2.json @@ -90,7 +90,7 @@ {"shape":"LimitExceededFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Creates or updates one or more scheduled scaling actions for an Auto Scaling group. If you leave a parameter unspecified when updating a scheduled scaling action, the corresponding value remains unchanged.

" + "documentation":"

Creates or updates one or more scheduled scaling actions for an Auto Scaling group.

" }, "CancelInstanceRefresh":{ "name":"CancelInstanceRefresh", @@ -108,7 +108,7 @@ {"shape":"ResourceContentionFault"}, {"shape":"ActiveInstanceRefreshNotFoundFault"} ], - "documentation":"

Cancels an instance refresh operation in progress. Cancellation does not roll back any replacements that have already been completed, but it prevents new replacements from being started.

For more information, see Replacing Auto Scaling Instances Based on an Instance Refresh.

" + "documentation":"

Cancels an instance refresh operation in progress. Cancellation does not roll back any replacements that have already been completed, but it prevents new replacements from being started.

For more information, see Replacing Auto Scaling instances based on an instance refresh in the Amazon EC2 Auto Scaling User Guide.

" }, "CompleteLifecycleAction":{ "name":"CompleteLifecycleAction", @@ -357,7 +357,7 @@ {"shape":"InvalidNextToken"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Describes one or more instance refreshes.

You can determine the status of a request by looking at the Status parameter. The following are the possible statuses:

For more information, see Replacing Auto Scaling Instances Based on an Instance Refresh.

" + "documentation":"

Describes one or more instance refreshes.

You can determine the status of a request by looking at the Status parameter. The following are the possible statuses:

For more information, see Replacing Auto Scaling instances based on an instance refresh in the Amazon EC2 Auto Scaling User Guide.

" }, "DescribeLaunchConfigurations":{ "name":"DescribeLaunchConfigurations", @@ -504,7 +504,7 @@ {"shape":"InvalidNextToken"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Describes one or more scaling activities for the specified Auto Scaling group.

" + "documentation":"

Describes one or more scaling activities for the specified Auto Scaling group.

To view the scaling activities from the Amazon EC2 Auto Scaling console, choose the Activity tab of the Auto Scaling group. When scaling events occur, you see scaling activity messages in the Activity history. For more information, see Verifying a scaling activity for an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.

" }, "DescribeScalingProcessTypes":{ "name":"DescribeScalingProcessTypes", @@ -748,7 +748,7 @@ {"shape":"LimitExceededFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Creates or updates a scheduled scaling action for an Auto Scaling group. If you leave a parameter unspecified when updating a scheduled scaling action, the corresponding value remains unchanged.

For more information, see Scheduled scaling in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Creates or updates a scheduled scaling action for an Auto Scaling group.

For more information, see Scheduled scaling in the Amazon EC2 Auto Scaling User Guide.

" }, "RecordLifecycleActionHeartbeat":{ "name":"RecordLifecycleActionHeartbeat", @@ -837,7 +837,7 @@ {"shape":"ResourceContentionFault"}, {"shape":"InstanceRefreshInProgressFault"} ], - "documentation":"

Starts a new instance refresh operation, which triggers a rolling replacement of all previously launched instances in the Auto Scaling group with a new group of instances.

If successful, this call creates a new instance refresh request with a unique ID that you can use to track its progress. To query its status, call the DescribeInstanceRefreshes API. To describe the instance refreshes that have already run, call the DescribeInstanceRefreshes API. To cancel an instance refresh operation in progress, use the CancelInstanceRefresh API.

For more information, see Replacing Auto Scaling Instances Based on an Instance Refresh.

" + "documentation":"

Starts a new instance refresh operation, which triggers a rolling replacement of all previously launched instances in the Auto Scaling group with a new group of instances.

If successful, this call creates a new instance refresh request with a unique ID that you can use to track its progress. To query its status, call the DescribeInstanceRefreshes API. To describe the instance refreshes that have already run, call the DescribeInstanceRefreshes API. To cancel an instance refresh operation in progress, use the CancelInstanceRefresh API.

For more information, see Replacing Auto Scaling instances based on an instance refresh in the Amazon EC2 Auto Scaling User Guide.

" }, "SuspendProcesses":{ "name":"SuspendProcesses", @@ -965,6 +965,14 @@ "Details":{ "shape":"XmlString", "documentation":"

The details about the activity.

" + }, + "AutoScalingGroupState":{ + "shape":"AutoScalingGroupState", + "documentation":"

The state of the Auto Scaling group, which is either InService or Deleted.

" + }, + "AutoScalingGroupARN":{ + "shape":"ResourceName", + "documentation":"

The Amazon Resource Name (ARN) of the Auto Scaling group.

" } }, "documentation":"

Describes scaling activity, which is a long-running process that represents a change to your Auto Scaling group, such as changing its size or replacing an instance.

" @@ -1243,6 +1251,11 @@ } } }, + "AutoScalingGroupState":{ + "type":"string", + "max":32, + "min":1 + }, "AutoScalingGroups":{ "type":"list", "member":{"shape":"AutoScalingGroup"} @@ -1456,6 +1469,15 @@ } }, "CapacityRebalanceEnabled":{"type":"boolean"}, + "CheckpointDelay":{ + "type":"integer", + "max":172800, + "min":0 + }, + "CheckpointPercentages":{ + "type":"list", + "member":{"shape":"NonZeroIntPercent"} + }, "ClassicLinkVPCSecurityGroups":{ "type":"list", "member":{"shape":"XmlStringMaxLen255"} @@ -1518,7 +1540,7 @@ }, "MixedInstancesPolicy":{ "shape":"MixedInstancesPolicy", - "documentation":"

An embedded object that specifies a mixed instances policy. The required parameters must be specified. If optional parameters are unspecified, their default values are used.

The policy includes parameters that not only define the distribution of On-Demand Instances and Spot Instances, the maximum price to pay for Spot Instances, and how the Auto Scaling group allocates instance types to fulfill On-Demand and Spot capacities, but also the parameters that specify the instance configuration information—the launch template and instance types. The policy can also include a weight for each instance type and different launch templates for individual instance types. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

An embedded object that specifies a mixed instances policy. The required properties must be specified. If optional properties are unspecified, their default values are used.

The policy includes properties that not only define the distribution of On-Demand Instances and Spot Instances, the maximum price to pay for Spot Instances, and how the Auto Scaling group allocates instance types to fulfill On-Demand and Spot capacities, but also the properties that specify the instance configuration information—the launch template and instance types. The policy can also include a weight for each instance type and different launch templates for individual instance types. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide.

" }, "InstanceId":{ "shape":"XmlStringMaxLen19", @@ -2080,6 +2102,10 @@ "shape":"XmlStringMaxLen255", "documentation":"

The name of the Auto Scaling group.

" }, + "IncludeDeletedGroups":{ + "shape":"IncludeDeletedGroups", + "documentation":"

Indicates whether to include scaling activity from deleted Auto Scaling groups.

" + }, "MaxRecords":{ "shape":"MaxRecords", "documentation":"

The maximum number of items to return with this call. The default value is 100 and the maximum value is 100.

" @@ -2430,6 +2456,7 @@ "HealthCheckGracePeriod":{"type":"integer"}, "HeartbeatTimeout":{"type":"integer"}, "HonorCooldown":{"type":"boolean"}, + "IncludeDeletedGroups":{"type":"boolean"}, "Instance":{ "type":"structure", "required":[ @@ -2610,7 +2637,7 @@ "members":{ "OnDemandAllocationStrategy":{ "shape":"XmlString", - "documentation":"

Indicates how to allocate instance types to fulfill On-Demand capacity. The only valid value is prioritized, which is also the default value. This strategy uses the order of instance types in the overrides to define the launch priority of each instance type. The first instance type in the array is prioritized higher than the last. If all your On-Demand capacity cannot be fulfilled using your highest priority instance, then the Auto Scaling groups launches the remaining capacity using the second priority instance type, and so on.

" + "documentation":"

Indicates how to allocate instance types to fulfill On-Demand capacity. The only valid value is prioritized, which is also the default value. This strategy uses the order of instance types in the LaunchTemplateOverrides to define the launch priority of each instance type. The first instance type in the array is prioritized higher than the last. If all your On-Demand capacity cannot be fulfilled using your highest priority instance, then the Auto Scaling groups launches the remaining capacity using the second priority instance type, and so on.

" }, "OnDemandBaseCapacity":{ "shape":"OnDemandBaseCapacity", @@ -2622,7 +2649,7 @@ }, "SpotAllocationStrategy":{ "shape":"XmlString", - "documentation":"

Indicates how to allocate instances across Spot Instance pools. If the allocation strategy is capacity-optimized (recommended), the Auto Scaling group launches instances using Spot pools that are optimally chosen based on the available Spot capacity. If the allocation strategy is lowest-price, the Auto Scaling group launches instances using the Spot pools with the lowest price, and evenly allocates your instances across the number of Spot pools that you specify. Defaults to lowest-price if not specified.

" + "documentation":"

Indicates how to allocate instances across Spot Instance pools.

If the allocation strategy is lowest-price, the Auto Scaling group launches instances using the Spot pools with the lowest price, and evenly allocates your instances across the number of Spot pools that you specify. Defaults to lowest-price if not specified.

If the allocation strategy is capacity-optimized (recommended), the Auto Scaling group launches instances using Spot pools that are optimally chosen based on the available Spot capacity. Alternatively, you can use capacity-optimized-prioritized and set the order of instance types in the list of launch template overrides from highest to lowest priority (from first to last in the list). Amazon EC2 Auto Scaling honors the instance type priorities on a best-effort basis but optimizes for capacity first.

" }, "SpotInstancePools":{ "shape":"SpotInstancePools", @@ -2810,10 +2837,10 @@ }, "Overrides":{ "shape":"Overrides", - "documentation":"

Any parameters that you specify override the same parameters in the launch template. If not provided, Amazon EC2 Auto Scaling uses the instance type specified in the launch template when it launches an instance.

" + "documentation":"

Any properties that you specify override the same properties in the launch template. If not provided, Amazon EC2 Auto Scaling uses the instance type specified in the launch template when it launches an instance.

" } }, - "documentation":"

Describes a launch template and overrides.

You specify these parameters as part of a mixed instances policy.

When you update the launch template or overrides, existing Amazon EC2 instances continue to run. When scale out occurs, Amazon EC2 Auto Scaling launches instances to match the new settings. When scale in occurs, Amazon EC2 Auto Scaling terminates instances according to the group's termination policies.

" + "documentation":"

Describes a launch template and overrides.

You specify these properties as part of a mixed instances policy.

When you update the launch template or overrides, existing Amazon EC2 instances continue to run. When scale out occurs, Amazon EC2 Auto Scaling launches instances to match the new settings. When scale in occurs, Amazon EC2 Auto Scaling terminates instances according to the group's termination policies.

" }, "LaunchTemplateName":{ "type":"string", @@ -3133,13 +3160,18 @@ }, "InstancesDistribution":{ "shape":"InstancesDistribution", - "documentation":"

Specifies the instances distribution. If not provided, the value for each parameter in InstancesDistribution uses a default value.

" + "documentation":"

Specifies the instances distribution. If not provided, the value for each property in InstancesDistribution uses a default value.

" } }, - "documentation":"

Describes a mixed instances policy for an Auto Scaling group. With mixed instances, your Auto Scaling group can provision a combination of On-Demand Instances and Spot Instances across multiple instance types. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide.

You can create a mixed instances policy for a new Auto Scaling group, or you can create it for an existing group by updating the group to specify MixedInstancesPolicy as the top-level parameter instead of a launch configuration or launch template.

" + "documentation":"

Describes a mixed instances policy for an Auto Scaling group. With mixed instances, your Auto Scaling group can provision a combination of On-Demand Instances and Spot Instances across multiple instance types. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide.

You can create a mixed instances policy for a new Auto Scaling group, or you can create it for an existing group by updating the group to specify MixedInstancesPolicy as the top-level property instead of a launch configuration or launch template.

" }, "MonitoringEnabled":{"type":"boolean"}, "NoDevice":{"type":"boolean"}, + "NonZeroIntPercent":{ + "type":"integer", + "max":100, + "min":1 + }, "NotificationConfiguration":{ "type":"structure", "members":{ @@ -3412,11 +3444,11 @@ }, "EndTime":{ "shape":"TimestampType", - "documentation":"

The date and time for the recurring schedule to end. Amazon EC2 Auto Scaling does not perform the action after this time.

" + "documentation":"

The date and time for the recurring schedule to end, in UTC.

" }, "Recurrence":{ "shape":"XmlStringMaxLen255", - "documentation":"

The recurring schedule for this action, in Unix cron syntax format. This format consists of five fields separated by white spaces: [Minute] [Hour] [Day_of_Month] [Month_of_Year] [Day_of_Week]. The value must be in quotes (for example, \"30 0 1 1,6,12 *\"). For more information about this format, see Crontab.

When StartTime and EndTime are specified with Recurrence, they form the boundaries of when the recurring action starts and stops.

" + "documentation":"

The recurring schedule for this action. This format consists of five fields separated by white spaces: [Minute] [Hour] [Day_of_Month] [Month_of_Year] [Day_of_Week]. The value must be in quotes (for example, \"30 0 1 1,6,12 *\"). For more information about this format, see Crontab.

When StartTime and EndTime are specified with Recurrence, they form the boundaries of when the recurring action starts and stops.

Cron expressions use Universal Coordinated Time (UTC) by default.

" }, "MinSize":{ "shape":"AutoScalingGroupMinSize", @@ -3429,6 +3461,10 @@ "DesiredCapacity":{ "shape":"AutoScalingGroupDesiredCapacity", "documentation":"

The desired capacity is the initial capacity of the Auto Scaling group after the scheduled action runs and the capacity it attempts to maintain. It can scale beyond this capacity if you add more scaling conditions.

" + }, + "TimeZone":{ + "shape":"XmlStringMaxLen255", + "documentation":"

Specifies the time zone for a cron expression. If a time zone is not provided, UTC is used by default.

Valid values are the canonical names of the IANA time zones, derived from the IANA Time Zone Database (such as Etc/GMT+9 or Pacific/Tahiti). For more information, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones.

" } } }, @@ -3476,9 +3512,17 @@ "InstanceWarmup":{ "shape":"RefreshInstanceWarmup", "documentation":"

The number of seconds until a newly launched instance is configured and ready to use. During this time, Amazon EC2 Auto Scaling does not immediately move on to the next replacement. The default is to use the value for the health check grace period defined for the group.

" + }, + "CheckpointPercentages":{ + "shape":"CheckpointPercentages", + "documentation":"

Threshold values for each checkpoint in ascending order. Each number must be unique. To replace all instances in the Auto Scaling group, the last number in the array must be 100.

For usage examples, see Adding checkpoints to an instance refresh in the Amazon EC2 Auto Scaling User Guide.

" + }, + "CheckpointDelay":{ + "shape":"CheckpointDelay", + "documentation":"

The amount of time, in seconds, to wait after a checkpoint before continuing. This property is optional, but if you specify a value for it, you must also specify a value for CheckpointPercentages. If you specify a value for CheckpointPercentages and not for CheckpointDelay, the CheckpointDelay defaults to 3600 (1 hour).

" } }, - "documentation":"

Describes information used to start an instance refresh.

" + "documentation":"

Describes information used to start an instance refresh.

All properties are optional. However, if you specify a value for CheckpointDelay, you must also provide a value for CheckpointPercentages.

" }, "RefreshStrategy":{ "type":"string", @@ -3699,6 +3743,10 @@ "DesiredCapacity":{ "shape":"AutoScalingGroupDesiredCapacity", "documentation":"

The desired capacity is the initial capacity of the Auto Scaling group after the scheduled action runs and the capacity it attempts to maintain.

" + }, + "TimeZone":{ + "shape":"XmlStringMaxLen255", + "documentation":"

The time zone for the cron expression.

" } }, "documentation":"

Describes a scheduled scaling action.

" @@ -3717,11 +3765,11 @@ }, "EndTime":{ "shape":"TimestampType", - "documentation":"

The date and time for the recurring schedule to end. Amazon EC2 Auto Scaling does not perform the action after this time.

" + "documentation":"

The date and time for the recurring schedule to end, in UTC.

" }, "Recurrence":{ "shape":"XmlStringMaxLen255", - "documentation":"

The recurring schedule for the action, in Unix cron syntax format. This format consists of five fields separated by white spaces: [Minute] [Hour] [Day_of_Month] [Month_of_Year] [Day_of_Week]. The value must be in quotes (for example, \"30 0 1 1,6,12 *\"). For more information about this format, see Crontab.

When StartTime and EndTime are specified with Recurrence, they form the boundaries of when the recurring action starts and stops.

" + "documentation":"

The recurring schedule for the action, in Unix cron syntax format. This format consists of five fields separated by white spaces: [Minute] [Hour] [Day_of_Month] [Month_of_Year] [Day_of_Week]. The value must be in quotes (for example, \"30 0 1 1,6,12 *\"). For more information about this format, see Crontab.

When StartTime and EndTime are specified with Recurrence, they form the boundaries of when the recurring action starts and stops.

Cron expressions use Universal Coordinated Time (UTC) by default.

" }, "MinSize":{ "shape":"AutoScalingGroupMinSize", @@ -3734,9 +3782,13 @@ "DesiredCapacity":{ "shape":"AutoScalingGroupDesiredCapacity", "documentation":"

The desired capacity is the initial capacity of the Auto Scaling group after the scheduled action runs and the capacity it attempts to maintain.

" + }, + "TimeZone":{ + "shape":"XmlStringMaxLen255", + "documentation":"

Specifies the time zone for a cron expression. If a time zone is not provided, UTC is used by default.

Valid values are the canonical names of the IANA time zones, derived from the IANA Time Zone Database (such as Etc/GMT+9 or Pacific/Tahiti). For more information, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones.

" } }, - "documentation":"

Describes information used for one or more scheduled scaling action updates in a BatchPutScheduledUpdateGroupAction operation.

When updating a scheduled scaling action, all optional parameters are left unchanged if not specified.

" + "documentation":"

Describes information used for one or more scheduled scaling action updates in a BatchPutScheduledUpdateGroupAction operation.

" }, "ScheduledUpdateGroupActionRequests":{ "type":"list", @@ -4061,7 +4113,7 @@ }, "MixedInstancesPolicy":{ "shape":"MixedInstancesPolicy", - "documentation":"

An embedded object that specifies a mixed instances policy. When you make changes to an existing policy, all optional parameters are left unchanged if not specified. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

An embedded object that specifies a mixed instances policy. When you make changes to an existing policy, all optional properties are left unchanged if not specified. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide.

" }, "MinSize":{ "shape":"AutoScalingGroupMinSize", diff --git a/botocore/data/backup/2018-11-15/service-2.json b/botocore/data/backup/2018-11-15/service-2.json index e26bcf83..7f36a395 100644 --- a/botocore/data/backup/2018-11-15/service-2.json +++ b/botocore/data/backup/2018-11-15/service-2.json @@ -156,10 +156,11 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, + {"shape":"InvalidResourceStateException"}, {"shape":"ServiceUnavailableException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Deletes the recovery point specified by a recovery point ID.

", + "documentation":"

Deletes the recovery point specified by a recovery point ID.

If the recovery point ID belongs to a continuous backup, calling this endpoint deletes the existing continuous backup and stops future continuous backup.

", "idempotent":true }, "DescribeBackupJob":{ @@ -223,6 +224,7 @@ "input":{"shape":"DescribeGlobalSettingsInput"}, "output":{"shape":"DescribeGlobalSettingsOutput"}, "errors":[ + {"shape":"InvalidRequestException"}, {"shape":"ServiceUnavailableException"} ], "documentation":"

Describes the global settings of the AWS account, including whether it is opted in to cross-account backup.

" @@ -292,6 +294,23 @@ "documentation":"

Returns metadata associated with a restore job that is specified by a job ID.

", "idempotent":true }, + "DisassociateRecoveryPoint":{ + "name":"DisassociateRecoveryPoint", + "http":{ + "method":"POST", + "requestUri":"/backup-vaults/{backupVaultName}/recovery-points/{recoveryPointArn}/disassociate" + }, + "input":{"shape":"DisassociateRecoveryPointInput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"InvalidResourceStateException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Deletes the specified continuous backup recovery point from AWS Backup and releases control of that continuous backup to the source service, such as Amazon RDS. The source service will continue to create and retain continuous backups using the lifecycle that you specified in your original backup plan.

Does not support snapshot backup recovery points.

" + }, "ExportBackupPlanTemplate":{ "name":"ExportBackupPlanTemplate", "http":{ @@ -322,7 +341,7 @@ {"shape":"MissingParameterValueException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Returns BackupPlan details for the specified BackupPlanId. Returns the body of a backup plan in JSON format, in addition to plan metadata.

", + "documentation":"

Returns BackupPlan details for the specified BackupPlanId. The details are the body of a backup plan in JSON format, in addition to plan metadata.

", "idempotent":true }, "GetBackupPlanFromJSON":{ @@ -450,7 +469,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Returns a list of existing backup jobs for an authenticated account.

", + "documentation":"

Returns a list of existing backup jobs for an authenticated account for the last 30 days. For a longer period of time, consider using these monitoring tools.

", "idempotent":true }, "ListBackupPlanTemplates":{ @@ -698,9 +717,10 @@ {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, {"shape":"ServiceUnavailableException"}, - {"shape":"LimitExceededException"} + {"shape":"LimitExceededException"}, + {"shape":"InvalidRequestException"} ], - "documentation":"

Starts a job to create a one-time copy of the specified resource.

", + "documentation":"

Starts a job to create a one-time copy of the specified resource.

Does not support continuous backups.

", "idempotent":true }, "StartRestoreJob":{ @@ -717,7 +737,7 @@ {"shape":"MissingParameterValueException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Recovers the saved resource identified by an Amazon Resource Name (ARN).

", + "documentation":"

Recovers the saved resource identified by an Amazon Resource Name (ARN).

", "idempotent":true }, "StopBackupJob":{ @@ -815,7 +835,7 @@ {"shape":"MissingParameterValueException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Sets the transition lifecycle of a recovery point.

The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. AWS Backup transitions and expires backups automatically according to the lifecycle that you define.

Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the “expire after days” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold.

Only Amazon EFS file system backups can be transitioned to cold storage.

", + "documentation":"

Sets the transition lifecycle of a recovery point.

The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. AWS Backup transitions and expires backups automatically according to the lifecycle that you define.

Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the “expire after days” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold.

Only Amazon EFS file system backups can be transitioned to cold storage.

Does not support continuous backups.

", "idempotent":true }, "UpdateRegionSettings":{ @@ -1153,6 +1173,10 @@ "CopyActions":{ "shape":"CopyActions", "documentation":"

An array of CopyAction objects, which contains the details of the copy operation.

" + }, + "EnableContinuousBackup":{ + "shape":"Boolean", + "documentation":"

Specifies whether AWS Backup creates continuous backups. True causes AWS Backup to create continuous backups capable of point-in-time restore (PITR). False (or not specified) causes AWS Backup to create snapshot backups.

" } }, "documentation":"

Specifies a scheduled task used to back up a selection of resources.

" @@ -1195,6 +1219,10 @@ "CopyActions":{ "shape":"CopyActions", "documentation":"

An array of CopyAction objects, which contains the details of the copy operation.

" + }, + "EnableContinuousBackup":{ + "shape":"Boolean", + "documentation":"

Specifies whether AWS Backup creates continuous backups. True causes AWS Backup to create continuous backups capable of point-in-time restore (PITR). False (or not specified) causes AWS Backup to create snapshot backups.

" } }, "documentation":"

Specifies a scheduled task used to back up a selection of resources.

" @@ -1455,7 +1483,7 @@ "CreatedBy":{"shape":"RecoveryPointCreator"}, "ResourceType":{ "shape":"ResourceType", - "documentation":"

The type of AWS resource to be copied; for example, an Amazon Elastic Block Store (Amazon EBS) volume or an Amazon Relational Database Service (Amazon RDS) database.

" + "documentation":"

The type of AWS resource to be copied; for example, an Amazon Elastic Block Store (Amazon EBS) volume or an Amazon Relational Database Service (Amazon RDS) database.

" } }, "documentation":"

Contains detailed information about a copy job.

" @@ -2114,6 +2142,27 @@ } } }, + "DisassociateRecoveryPointInput":{ + "type":"structure", + "required":[ + "BackupVaultName", + "RecoveryPointArn" + ], + "members":{ + "BackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

The unique name of an AWS Backup vault. Required.

", + "location":"uri", + "locationName":"backupVaultName" + }, + "RecoveryPointArn":{ + "shape":"ARN", + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies an AWS Backup recovery point. Required.

", + "location":"uri", + "locationName":"recoveryPointArn" + } + } + }, "ExportBackupPlanTemplateInput":{ "type":"structure", "required":["BackupPlanId"], @@ -2432,6 +2481,23 @@ "documentation":"

Indicates that something is wrong with the input to the request. For example, a parameter is of the wrong type.

", "exception":true }, + "InvalidResourceStateException":{ + "type":"structure", + "members":{ + "Code":{"shape":"string"}, + "Message":{"shape":"string"}, + "Type":{ + "shape":"string", + "documentation":"

" + }, + "Context":{ + "shape":"string", + "documentation":"

" + } + }, + "documentation":"

AWS Backup is already performing an action on this recovery point. It can't perform the action you requested until the first action finishes. Try again later.

", + "exception":true + }, "IsEnabled":{"type":"boolean"}, "Lifecycle":{ "type":"structure", @@ -3556,7 +3622,7 @@ }, "Tags":{ "shape":"Tags", - "documentation":"

Key-value pairs that are used to help organize your resources. You can assign your own metadata to the resources you create.

" + "documentation":"

Key-value pairs that are used to help organize your resources. You can assign your own metadata to the resources you create.

" } } }, diff --git a/botocore/data/batch/2016-08-10/service-2.json b/botocore/data/batch/2016-08-10/service-2.json index 8994532c..2344cba5 100644 --- a/botocore/data/batch/2016-08-10/service-2.json +++ b/botocore/data/batch/2016-08-10/service-2.json @@ -24,7 +24,7 @@ {"shape":"ClientException"}, {"shape":"ServerException"} ], - "documentation":"

Cancels a job in an AWS Batch job queue. Jobs that are in the SUBMITTED, PENDING, or RUNNABLE state are canceled. Jobs that have progressed to STARTING or RUNNING are not canceled (but the API operation still succeeds, even if no job is canceled); these jobs must be terminated with the TerminateJob operation.

" + "documentation":"

Cancels a job in an AWS Batch job queue. Jobs that are in the SUBMITTED, PENDING, or RUNNABLE state are canceled. Jobs that have progressed to STARTING or RUNNING aren't canceled, but the API operation still succeeds, even if no job is canceled. These jobs must be terminated with the TerminateJob operation.

" }, "CreateComputeEnvironment":{ "name":"CreateComputeEnvironment", @@ -38,7 +38,7 @@ {"shape":"ClientException"}, {"shape":"ServerException"} ], - "documentation":"

Creates an AWS Batch compute environment. You can create MANAGED or UNMANAGED compute environments. MANAGED compute environments can use Amazon EC2 or AWS Fargate resources. UNMANAGED compute environments can only use EC2 resources.

In a managed compute environment, AWS Batch manages the capacity and instance types of the compute resources within the environment. This is based on the compute resource specification that you define or the launch template that you specify when you create the compute environment. You can choose either to use EC2 On-Demand Instances and EC2 Spot Instances, or to use Fargate and Fargate Spot capacity in your managed compute environment. You can optionally set a maximum price so that Spot Instances only launch when the Spot Instance price is less than a specified percentage of the On-Demand price.

Multi-node parallel jobs are not supported on Spot Instances.

In an unmanaged compute environment, you can manage your own EC2 compute resources and have a lot of flexibility with how you configure your compute resources. For example, you can use custom AMI. However, you need to verify that your AMI meets the Amazon ECS container instance AMI specification. For more information, see container instance AMIs in the Amazon Elastic Container Service Developer Guide. After you have created your unmanaged compute environment, you can use the DescribeComputeEnvironments operation to find the Amazon ECS cluster that's associated with it. Then, manually launch your container instances into that Amazon ECS cluster. For more information, see Launching an Amazon ECS container instance in the Amazon Elastic Container Service Developer Guide.

AWS Batch doesn't upgrade the AMIs in a compute environment after it's created. For example, it doesn't update the AMIs when a newer version of the Amazon ECS-optimized AMI is available. Therefore, you're responsible for the management of the guest operating system (including updates and security patches) and any additional application software or utilities that you install on the compute resources. To use a new AMI for your AWS Batch jobs, complete these steps:

  1. Create a new compute environment with the new AMI.

  2. Add the compute environment to an existing job queue.

  3. Remove the earlier compute environment from your job queue.

  4. Delete the earlier compute environment.

" + "documentation":"

Creates an AWS Batch compute environment. You can create MANAGED or UNMANAGED compute environments. MANAGED compute environments can use Amazon EC2 or AWS Fargate resources. UNMANAGED compute environments can only use EC2 resources.

In a managed compute environment, AWS Batch manages the capacity and instance types of the compute resources within the environment. This is based on the compute resource specification that you define or the launch template that you specify when you create the compute environment. Either, you can choose to use EC2 On-Demand Instances and EC2 Spot Instances. Or, you can use Fargate and Fargate Spot capacity in your managed compute environment. You can optionally set a maximum price so that Spot Instances only launch when the Spot Instance price is less than a specified percentage of the On-Demand price.

Multi-node parallel jobs aren't supported on Spot Instances.

In an unmanaged compute environment, you can manage your own EC2 compute resources and have a lot of flexibility with how you configure your compute resources. For example, you can use custom AMIs. However, you must verify that each of your AMIs meet the Amazon ECS container instance AMI specification. For more information, see container instance AMIs in the Amazon Elastic Container Service Developer Guide. After you created your unmanaged compute environment, you can use the DescribeComputeEnvironments operation to find the Amazon ECS cluster that's associated with it. Then, launch your container instances into that Amazon ECS cluster. For more information, see Launching an Amazon ECS container instance in the Amazon Elastic Container Service Developer Guide.

AWS Batch doesn't upgrade the AMIs in a compute environment after the environment is created. For example, it doesn't update the AMIs when a newer version of the Amazon ECS optimized AMI is available. Therefore, you're responsible for managing the guest operating system (including its updates and security patches) and any additional application software or utilities that you install on the compute resources. To use a new AMI for your AWS Batch jobs, complete these steps:

  1. Create a new compute environment with the new AMI.

  2. Add the compute environment to an existing job queue.

  3. Remove the earlier compute environment from your job queue.

  4. Delete the earlier compute environment.

" }, "CreateJobQueue":{ "name":"CreateJobQueue", @@ -52,7 +52,7 @@ {"shape":"ClientException"}, {"shape":"ServerException"} ], - "documentation":"

Creates an AWS Batch job queue. When you create a job queue, you associate one or more compute environments to the queue and assign an order of preference for the compute environments.

You also set a priority to the job queue that determines the order in which the AWS Batch scheduler places jobs onto its associated compute environments. For example, if a compute environment is associated with more than one job queue, the job queue with a higher priority is given preference for scheduling jobs to that compute environment.

" + "documentation":"

Creates an AWS Batch job queue. When you create a job queue, you associate one or more compute environments to the queue and assign an order of preference for the compute environments.

You also set a priority to the job queue that determines the order that the AWS Batch scheduler places jobs onto its associated compute environments. For example, if a compute environment is associated with more than one job queue, the job queue with a higher priority is given preference for scheduling jobs to that compute environment.

" }, "DeleteComputeEnvironment":{ "name":"DeleteComputeEnvironment", @@ -66,7 +66,7 @@ {"shape":"ClientException"}, {"shape":"ServerException"} ], - "documentation":"

Deletes an AWS Batch compute environment.

Before you can delete a compute environment, you must set its state to DISABLED with the UpdateComputeEnvironment API operation and disassociate it from any job queues with the UpdateJobQueue API operation. Compute environments that use AWS Fargate resources must terminate all active jobs on that compute environment before deleting the compute environment. If this isn't done, the compute environment will end up in an invalid state.

" + "documentation":"

Deletes an AWS Batch compute environment.

Before you can delete a compute environment, you must set its state to DISABLED with the UpdateComputeEnvironment API operation and disassociate it from any job queues with the UpdateJobQueue API operation. Compute environments that use AWS Fargate resources must terminate all active jobs on that compute environment before deleting the compute environment. If this isn't done, the compute environment enters an invalid state.

" }, "DeleteJobQueue":{ "name":"DeleteJobQueue", @@ -164,7 +164,7 @@ {"shape":"ClientException"}, {"shape":"ServerException"} ], - "documentation":"

Returns a list of AWS Batch jobs.

You must specify only one of the following items:

You can filter the results by job status with the jobStatus parameter. If you don't specify a status, only RUNNING jobs are returned.

" + "documentation":"

Returns a list of AWS Batch jobs.

You must specify only one of the following items:

You can filter the results by job status with the jobStatus parameter. If you don't specify a status, only RUNNING jobs are returned.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -206,7 +206,7 @@ {"shape":"ClientException"}, {"shape":"ServerException"} ], - "documentation":"

Submits an AWS Batch job from a job definition. Parameters specified during SubmitJob override parameters defined in the job definition.

Jobs run on Fargate resources don't run for more than 14 days. After 14 days, the Fargate resources might no longer be available and the job is terminated.

" + "documentation":"

Submits an AWS Batch job from a job definition. Parameters that are specified during SubmitJob override parameters defined in the job definition. vCPU and memory requirements that are specified in the ResourceRequirements objects in the job definition are the exception. They can't be overridden this way using the memory and vcpus parameters. Rather, you must specify updates to job definition parameters in a ResourceRequirements object that's included in the containerOverrides parameter.

Jobs that run on Fargate resources can't be guaranteed to run for more than 14 days. This is because, after 14 days, Fargate resources might become unavailable and job might be terminated.

" }, "TagResource":{ "name":"TagResource", @@ -563,7 +563,7 @@ }, "allocationStrategy":{ "shape":"CRAllocationStrategy", - "documentation":"

The allocation strategy to use for the compute resource if not enough instances of the best fitting instance type can be allocated. This might be because of availability of the instance type in the Region or Amazon EC2 service limits. For more information, see Allocation Strategies in the AWS Batch User Guide.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

BEST_FIT (default)

AWS Batch selects an instance type that best fits the needs of the jobs with a preference for the lowest-cost instance type. If additional instances of the selected instance type aren't available, AWS Batch will wait for the additional instances to be available. If there are not enough instances available, or if the user is hitting Amazon EC2 service limits then additional jobs aren't run until currently running jobs have completed. This allocation strategy keeps costs lower but can limit scaling. If you are using Spot Fleets with BEST_FIT then the Spot Fleet IAM Role must be specified.

BEST_FIT_PROGRESSIVE

AWS Batch will select additional instance types that are large enough to meet the requirements of the jobs in the queue, with a preference for instance types with a lower cost per unit vCPU. If additional instances of the previously selected instance types aren't available, AWS Batch will select new instance types.

SPOT_CAPACITY_OPTIMIZED

AWS Batch will select one or more instance types that are large enough to meet the requirements of the jobs in the queue, with a preference for instance types that are less likely to be interrupted. This allocation strategy is only available for Spot Instance compute resources.

With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED strategies, AWS Batch might need to go above maxvCpus to meet your capacity requirements. In this event, AWS Batch never exceeds maxvCpus by more than a single instance.

" + "documentation":"

The allocation strategy to use for the compute resource if not enough instances of the best fitting instance type can be allocated. This might be because of availability of the instance type in the Region or Amazon EC2 service limits. For more information, see Allocation Strategies in the AWS Batch User Guide.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

BEST_FIT (default)

AWS Batch selects an instance type that best fits the needs of the jobs with a preference for the lowest-cost instance type. If additional instances of the selected instance type aren't available, AWS Batch waits for the additional instances to be available. If there aren't enough instances available, or if the user is hitting Amazon EC2 service limits then additional jobs aren't run until the currently running jobs have completed. This allocation strategy keeps costs lower but can limit scaling. If you are using Spot Fleets with BEST_FIT then the Spot Fleet IAM Role must be specified.

BEST_FIT_PROGRESSIVE

AWS Batch will select additional instance types that are large enough to meet the requirements of the jobs in the queue, with a preference for instance types with a lower cost per unit vCPU. If additional instances of the previously selected instance types aren't available, AWS Batch will select new instance types.

SPOT_CAPACITY_OPTIMIZED

AWS Batch will select one or more instance types that are large enough to meet the requirements of the jobs in the queue, with a preference for instance types that are less likely to be interrupted. This allocation strategy is only available for Spot Instance compute resources.

With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED strategies, AWS Batch might need to go above maxvCpus to meet your capacity requirements. In this event, AWS Batch never exceeds maxvCpus by more than a single instance.

" }, "minvCpus":{ "shape":"Integer", @@ -571,7 +571,7 @@ }, "maxvCpus":{ "shape":"Integer", - "documentation":"

The maximum number of Amazon EC2 vCPUs that a compute environment can reach.

With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED allocation strategies, AWS Batch might need to go above maxvCpus to meet your capacity requirements. In this event, AWS Batch will never go above maxvCpus by more than a single instance (e.g., no more than a single instance from among those specified in your compute environment).

" + "documentation":"

The maximum number of Amazon EC2 vCPUs that a compute environment can reach.

With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED allocation strategies, AWS Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, AWS Batch never exceeds maxvCpus by more than a single instance. For example, no more than a single instance from among those specified in your compute environment is allocated.

" }, "desiredvCpus":{ "shape":"Integer", @@ -579,7 +579,7 @@ }, "instanceTypes":{ "shape":"StringList", - "documentation":"

The instances types that can be launched. You can specify instance families to launch any instance type within those families (for example, c5 or p3), or you can specify specific sizes within a family (such as c5.8xlarge). You can also choose optimal to select instance types (from the C4, M4, and R4 instance families) on the fly that match the demand of your job queues.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

When you create a compute environment, the instance types that you select for the compute environment must share the same architecture. For example, you can't mix x86 and ARM instances in the same compute environment.

Currently, optimal uses instance types from the C4, M4, and R4 instance families. In Regions that don't have instance types from those instance families, instance types from the C5, M5. and R5 instance families are used.

" + "documentation":"

The instances types that can be launched. You can specify instance families to launch any instance type within those families (for example, c5 or p3), or you can specify specific sizes within a family (such as c5.8xlarge). You can also choose optimal to select instance types (from the C4, M4, and R4 instance families) that match the demand of your job queues.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

When you create a compute environment, the instance types that you select for the compute environment must share the same architecture. For example, you can't mix x86 and ARM instances in the same compute environment.

Currently, optimal uses instance types from the C4, M4, and R4 instance families. In Regions that don't have instance types from those instance families, instance types from the C5, M5. and R5 instance families are used.

" }, "imageId":{ "shape":"String", @@ -589,11 +589,11 @@ }, "subnets":{ "shape":"StringList", - "documentation":"

The VPC subnets into which the compute resources are launched. These subnets must be within the same VPC. This parameter is required for jobs running on Fargate resources, where it can contain up to 16 subnets. For more information, see VPCs and Subnets in the Amazon VPC User Guide.

" + "documentation":"

The VPC subnets into which the compute resources are launched. These subnets must be within the same VPC. Fargate compute resources can contain up to 16 subnets. For more information, see VPCs and Subnets in the Amazon VPC User Guide.

" }, "securityGroupIds":{ "shape":"StringList", - "documentation":"

The Amazon EC2 security groups associated with instances launched in the compute environment. One or more security groups must be specified, either in securityGroupIds or using a launch template referenced in launchTemplate. This parameter is required for jobs running on Fargate resources and must contain at least one security group. (Fargate does not support launch templates.) If security groups are specified using both securityGroupIds and launchTemplate, the values in securityGroupIds will be used.

" + "documentation":"

The Amazon EC2 security groups associated with instances launched in the compute environment. One or more security groups must be specified, either in securityGroupIds or using a launch template referenced in launchTemplate. This parameter is required for jobs running on Fargate resources and must contain at least one security group. Fargate doesn't support launch templates. If security groups are specified using both securityGroupIds and launchTemplate, the values in securityGroupIds is used.

" }, "ec2KeyPair":{ "shape":"String", @@ -605,7 +605,7 @@ }, "tags":{ "shape":"TagsMap", - "documentation":"

Key-value pair tags to be applied to EC2 resources that are launched in the compute environment. For AWS Batch, these take the form of \"String1\": \"String2\", where String1 is the tag key and String2 is the tag value−for example, { \"Name\": \"AWS Batch Instance - C4OnDemand\" }. This is helpful for recognizing your AWS Batch instances in the Amazon EC2 console. These tags can't be updated or removed after the compute environment has been created; any changes require creating a new compute environment and removing the old compute environment. These tags are not seen when using the AWS Batch ListTagsForResource API operation.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

" + "documentation":"

Key-value pair tags to be applied to EC2 resources that are launched in the compute environment. For AWS Batch, these take the form of \"String1\": \"String2\", where String1 is the tag key and String2 is the tag value−for example, { \"Name\": \"AWS Batch Instance - C4OnDemand\" }. This is helpful for recognizing your AWS Batch instances in the Amazon EC2 console. These tags can't be updated or removed after the compute environment has been created; any changes require creating a new compute environment and removing the old compute environment. These tags aren't seen when using the AWS Batch ListTagsForResource API operation.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

" }, "placementGroup":{ "shape":"String", @@ -639,7 +639,7 @@ }, "maxvCpus":{ "shape":"Integer", - "documentation":"

The maximum number of Amazon EC2 vCPUs that an environment can reach.

With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED allocation strategies, AWS Batch might need to go above maxvCpus to meet your capacity requirements. In this event, AWS Batch will never go above maxvCpus by more than a single instance (e.g., no more than a single instance from among those specified in your compute environment).

" + "documentation":"

The maximum number of Amazon EC2 vCPUs that an environment can reach.

With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED allocation strategies, AWS Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, AWS Batch never exceeds maxvCpus by more than a single instance. That is, no more than a single instance from among those specified in your compute environment.

" }, "desiredvCpus":{ "shape":"Integer", @@ -647,7 +647,7 @@ }, "subnets":{ "shape":"StringList", - "documentation":"

The VPC subnets that the compute resources are launched into. This parameter is required for jobs running on Fargate compute resources, where it can contain up to 16 subnets. For more information, see VPCs and Subnets in the Amazon VPC User Guide. This can't be specified for EC2 compute resources. Providing an empty list will be handled as if this parameter wasn't specified and no change is made.

" + "documentation":"

The VPC subnets that the compute resources are launched into. Fargate compute resources can contain up to 16 subnets. Providing an empty list will be handled as if this parameter wasn't specified and no change is made. This can't be specified for EC2 compute resources. For more information, see VPCs and Subnets in the Amazon VPC User Guide.

" }, "securityGroupIds":{ "shape":"StringList", @@ -665,7 +665,7 @@ }, "vcpus":{ "shape":"Integer", - "documentation":"

The number of vCPUs reserved for the container. Jobs running on EC2 resources can specify the vCPU requirement for the job using resourceRequirements but the vCPU requirements can't be specified both here and in the resourceRequirement object. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to 1,024 CPU shares. You must specify at least one vCPU. This is required but can be specified in several places. It must be specified for each node at least once.

This parameter isn't applicable to jobs running on Fargate resources. Jobs running on Fargate resources must specify the vCPU requirement for the job using resourceRequirements.

" + "documentation":"

The number of vCPUs reserved for the container. For jobs that run on EC2 resources, you can specify the vCPU requirement for the job using resourceRequirements, but you can't specify the vCPU requirements in both the vcpus and resourceRequirement object. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to 1,024 CPU shares. You must specify at least one vCPU. This is required but can be specified in several places. It must be specified for each node at least once.

This parameter isn't applicable to jobs that run on Fargate resources. For jobs that run on Fargate resources, you must specify the vCPU requirement for the job using resourceRequirements.

" }, "memory":{ "shape":"Integer", @@ -749,7 +749,7 @@ }, "logConfiguration":{ "shape":"LogConfiguration", - "documentation":"

The log configuration specification for the container.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However the container might use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance. Or, alternatively, it must be configured on a different log server for remote logging options. For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

AWS Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Additional log drivers might be available in future releases of the Amazon ECS container agent.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log into your container instance and run the following command: sudo docker version | grep \"Server API version\"

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The log configuration specification for the container.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However, the container might use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance. Or, alternatively, it must be configured on a different log server for remote logging options. For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

AWS Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Additional log drivers might be available in future releases of the Amazon ECS container agent.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log into your container instance and run the following command: sudo docker version | grep \"Server API version\"

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

" }, "secrets":{ "shape":"SecretList", @@ -771,13 +771,13 @@ "members":{ "vcpus":{ "shape":"Integer", - "documentation":"

This parameter is deprecated and not supported for jobs run on Fargate resources, see resourceRequirement. For jobs run on EC2 resources, the number of vCPUs to reserve for the container. This value overrides the value set in the job definition. Jobs run on EC2 resources can specify the vCPU requirement using resourceRequirement but the vCPU requirements can't be specified both here and in resourceRequirement. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to 1,024 CPU shares. You must specify at least one vCPU.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided. Jobs running on Fargate resources must specify the vCPU requirement for the job using resourceRequirements.

", + "documentation":"

This parameter indicates the number of vCPUs reserved for the container.It overrides the vcpus parameter that's set in the job definition, but doesn't override any vCPU requirement specified in the resourceRequirement structure in the job definition.

This parameter is supported for jobs that run on EC2 resources, but isn't supported for jobs that run on Fargate resources. For Fargate resources, you can only use resourceRequirement. For EC2 resources, you can use either this parameter or resourceRequirement but not both.

This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to 1,024 CPU shares. You must specify at least one vCPU.

This parameter isn't applicable to jobs that run on Fargate resources and shouldn't be provided. For jobs that run on Fargate resources, you must specify the vCPU requirement for the job using resourceRequirements.

", "deprecated":true, "deprecatedMessage":"This field is deprecated, use resourceRequirements instead." }, "memory":{ "shape":"Integer", - "documentation":"

This parameter is deprecated and not supported for jobs run on Fargate resources, use ResourceRequirement. For jobs run on EC2 resource, the number of MiB of memory reserved for the job. This value overrides the value set in the job definition.

", + "documentation":"

This parameter indicates the amount of memory (in MiB) that's reserved for the job. It overrides the memory parameter set in the job definition, but doesn't override any memory requirement specified in the ResourceRequirement structure in the job definition.

This parameter is supported for jobs that run on EC2 resources, but isn't supported for jobs that run on Fargate resources. For these resources, use resourceRequirement instead.

", "deprecated":true, "deprecatedMessage":"This field is deprecated, use resourceRequirements instead." }, @@ -809,13 +809,13 @@ }, "vcpus":{ "shape":"Integer", - "documentation":"

This parameter is deprecated and not supported for jobs run on Fargate resources, see resourceRequirement. The number of vCPUs reserved for the container. Jobs running on EC2 resources can specify the vCPU requirement for the job using resourceRequirements but the vCPU requirements can't be specified both here and in the resourceRequirement structure. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to 1,024 CPU shares. You must specify at least one vCPU. This is required but can be specified in several places. It must be specified for each node at least once.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided. Jobs running on Fargate resources must specify the vCPU requirement for the job using resourceRequirements.

", + "documentation":"

The number of vCPUs reserved for the job. Each vCPU is equivalent to 1,024 CPU shares. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. The number of vCPUs must be specified but can be be specified in several places. You must specify it at least once for each node.

This parameter is supported on EC2 resources but isn't supported for jobs that run on Fargate resources. For these resources, use resourceRequirement instead. You can use this parameter or resourceRequirements structure but not both.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided. For jobs that run on Fargate resources, you must specify the vCPU requirement for the job using resourceRequirements.

", "deprecated":true, "deprecatedMessage":"This field is deprecated, use resourceRequirements instead." }, "memory":{ "shape":"Integer", - "documentation":"

This parameter is deprecated and not supported for jobs run on Fargate resources, use ResourceRequirement. For jobs run on EC2 resources can specify the memory requirement using the ResourceRequirement structure. The hard limit (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run. You must specify at least 4 MiB of memory for a job. This is required but can be specified in several places; it must be specified for each node at least once.

If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for a particular instance type, see Memory Management in the AWS Batch User Guide.

", + "documentation":"

This parameter indicates the memory hard limit (in MiB) for a container. If your container attempts to exceed the specified number, it is terminated. You must specify at least 4 MiB of memory for a job using this parameter. The memory hard limit can be specified in several places. It must be specified for each node at least once.

This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run.

This parameter is supported on EC2 resources but isn't supported on Fargate resources. For Fargate resources, you should specify the memory requirement using resourceRequirement. You can do this for EC2 resources.

If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for a particular instance type, see Memory Management in the AWS Batch User Guide.

", "deprecated":true, "deprecatedMessage":"This field is deprecated, use resourceRequirements instead." }, @@ -829,7 +829,7 @@ }, "executionRoleArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the execution role that AWS Batch can assume. Jobs running on Fargate resources must provide an execution role. For more information, see AWS Batch execution IAM role in the AWS Batch User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the execution role that AWS Batch can assume. For jobs that run on Fargate resources, you must provide an execution role. For more information, see AWS Batch execution IAM role in the AWS Batch User Guide.

" }, "volumes":{ "shape":"Volumes", @@ -861,7 +861,7 @@ }, "instanceType":{ "shape":"String", - "documentation":"

The instance type to use for a multi-node parallel job. All node groups in a multi-node parallel job must use the same instance type.

This parameter isn't applicable to single-node container jobs or for jobs running on Fargate resources and shouldn't be provided.

" + "documentation":"

The instance type to use for a multi-node parallel job. All node groups in a multi-node parallel job must use the same instance type.

This parameter isn't applicable to single-node container jobs or for jobs that run on Fargate resources and shouldn't be provided.

" }, "resourceRequirements":{ "shape":"ResourceRequirements", @@ -908,8 +908,7 @@ "type":"structure", "required":[ "computeEnvironmentName", - "type", - "serviceRole" + "type" ], "members":{ "computeEnvironmentName":{ @@ -930,7 +929,7 @@ }, "serviceRole":{ "shape":"String", - "documentation":"

The full Amazon Resource Name (ARN) of the IAM role that allows AWS Batch to make calls to other AWS services on your behalf. For more information, see AWS Batch service IAM role in the AWS Batch User Guide.

If your specified role has a path other than /, then you must either specify the full role ARN (this is recommended) or prefix the role name with the path.

Depending on how you created your AWS Batch service role, its ARN might contain the service-role path prefix. When you only specify the name of the service role, AWS Batch assumes that your ARN doesn't use the service-role path prefix. Because of this, we recommend that you specify the full ARN of your service role when you create compute environments.

" + "documentation":"

The full Amazon Resource Name (ARN) of the IAM role that allows AWS Batch to make calls to other AWS services on your behalf. For more information, see AWS Batch service IAM role in the AWS Batch User Guide.

If your account has already created the AWS Batch service-linked role, that role is used by default for your compute environment unless you specify a role here. If the AWS Batch service-linked role does not exist in your account, and no role is specified here, the service will try to create the AWS Batch service-linked role in your account.

If your specified role has a path other than /, then you must specify either the full role ARN (recommended) or prefix the role name with the path. For example, if a role with the name bar has a path of /foo/ then you would specify /foo/bar as the role name. For more information, see Friendly names and paths in the IAM User Guide.

Depending on how you created your AWS Batch service role, its ARN might contain the service-role path prefix. When you only specify the name of the service role, AWS Batch assumes that your ARN doesn't use the service-role path prefix. Because of this, we recommend that you specify the full ARN of your service role when you create compute environments.

" }, "tags":{ "shape":"TagrisTagsMap", @@ -1087,7 +1086,7 @@ }, "maxResults":{ "shape":"Integer", - "documentation":"

The maximum number of results returned by DescribeJobDefinitions in paginated output. When this parameter is used, DescribeJobDefinitions only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another DescribeJobDefinitions request with the returned nextToken value. This value can be between 1 and 100. If this parameter isn't used, then DescribeJobDefinitions returns up to 100 results and a nextToken value if applicable.

" + "documentation":"

The maximum number of results returned by DescribeJobDefinitions in paginated output. When this parameter is used, DescribeJobDefinitions only returns maxResults results in a single page and a nextToken response element. The remaining results of the initial request can be seen by sending another DescribeJobDefinitions request with the returned nextToken value. This value can be between 1 and 100. If this parameter isn't used, then DescribeJobDefinitions returns up to 100 results and a nextToken value if applicable.

" }, "jobDefinitionName":{ "shape":"String", @@ -1126,7 +1125,7 @@ }, "maxResults":{ "shape":"Integer", - "documentation":"

The maximum number of results returned by DescribeJobQueues in paginated output. When this parameter is used, DescribeJobQueues only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another DescribeJobQueues request with the returned nextToken value. This value can be between 1 and 100. If this parameter isn't used, then DescribeJobQueues returns up to 100 results and a nextToken value if applicable.

" + "documentation":"

The maximum number of results returned by DescribeJobQueues in paginated output. When this parameter is used, DescribeJobQueues only returns maxResults results in a single page and a nextToken response element. The remaining results of the initial request can be seen by sending another DescribeJobQueues request with the returned nextToken value. This value can be between 1 and 100. If this parameter isn't used, then DescribeJobQueues returns up to 100 results and a nextToken value if applicable.

" }, "nextToken":{ "shape":"String", @@ -1178,7 +1177,7 @@ }, "containerPath":{ "shape":"String", - "documentation":"

The path inside the container used to expose the host device. By default the hostPath value is used.

" + "documentation":"

The path inside the container used to expose the host device. By default, the hostPath value is used.

" }, "permissions":{ "shape":"DeviceCgroupPermissions", @@ -1232,19 +1231,19 @@ "members":{ "onStatusReason":{ "shape":"String", - "documentation":"

Contains a glob pattern to match against the StatusReason returned for a job. The patten can be up to 512 characters long, can contain letters, numbers, periods (.), colons (:), and white space (spaces, tabs). and can optionally end with an asterisk (*) so that only the start of the string needs to be an exact match.

" + "documentation":"

Contains a glob pattern to match against the StatusReason returned for a job. The pattern can be up to 512 characters long, and can contain letters, numbers, periods (.), colons (:), and white space (including spaces or tabs). It can optionally end with an asterisk (*) so that only the start of the string needs to be an exact match.

" }, "onReason":{ "shape":"String", - "documentation":"

Contains a glob pattern to match against the Reason returned for a job. The patten can be up to 512 characters long, can contain letters, numbers, periods (.), colons (:), and white space (spaces, tabs), and can optionally end with an asterisk (*) so that only the start of the string needs to be an exact match.

" + "documentation":"

Contains a glob pattern to match against the Reason returned for a job. The pattern can be up to 512 characters long, and can contain letters, numbers, periods (.), colons (:), and white space (including spaces and tabs). It can optionally end with an asterisk (*) so that only the start of the string needs to be an exact match.

" }, "onExitCode":{ "shape":"String", - "documentation":"

Contains a glob pattern to match against the decimal representation of the ExitCode returned for a job. The patten can be up to 512 characters long, can contain only numbers, and can optionally end with an asterisk (*) so that only the start of the string needs to be an exact match.

" + "documentation":"

Contains a glob pattern to match against the decimal representation of the ExitCode returned for a job. The pattern can be up to 512 characters long, can contain only numbers, and can optionally end with an asterisk (*) so that only the start of the string needs to be an exact match.

" }, "action":{ "shape":"RetryAction", - "documentation":"

Specifies the action to take if all of the specified conditions (onStatusReason, onReason, and onExitCode) are met. The values are not case sensitive.

" + "documentation":"

Specifies the action to take if all of the specified conditions (onStatusReason, onReason, and onExitCode) are met. The values aren't case sensitive.

" } }, "documentation":"

Specifies a set of conditions to be met, and an action to take (RETRY or EXIT) if all conditions are met.

" @@ -1258,17 +1257,17 @@ "members":{ "platformVersion":{ "shape":"String", - "documentation":"

The AWS Fargate platform version on which the jobs are running. A platform version is specified only for jobs running on Fargate resources. If one isn't specified, the LATEST platform version is used by default. This will use a recent, approved version of the AWS Fargate platform for compute resources. For more information, see AWS Fargate platform versions in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The AWS Fargate platform version where the jobs are running. A platform version is specified only for jobs running on Fargate resources. If one isn't specified, the LATEST platform version is used by default. This uses a recent, approved version of the AWS Fargate platform for compute resources. For more information, see AWS Fargate platform versions in the Amazon Elastic Container Service Developer Guide.

" } }, - "documentation":"

The platform configuration for jobs running on Fargate resources. Jobs running on EC2 resources must not specify this parameter.

" + "documentation":"

The platform configuration for jobs running on Fargate resources. For jobs that run on EC2 resources, you shouldn't specify this parameter.

" }, "Host":{ "type":"structure", "members":{ "sourcePath":{ "shape":"String", - "documentation":"

The path on the host container instance that's presented to the container. If this parameter is empty, then the Docker daemon has assigned a host path for you. If this parameter contains a file location, then the data volume persists at the specified location on the host container instance until you delete it manually. If the source path location does not exist on the host container instance, the Docker daemon creates it. If the location does exist, the contents of the source path folder are exported.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided.

" + "documentation":"

The path on the host container instance that's presented to the container. If this parameter is empty, then the Docker daemon has assigned a host path for you. If this parameter contains a file location, then the data volume persists at the specified location on the host container instance until you delete it manually. If the source path location doesn't exist on the host container instance, the Docker daemon creates it. If the location does exist, the contents of the source path folder are exported.

This parameter isn't applicable to jobs that run on Fargate resources and shouldn't be provided.

" } }, "documentation":"

Determine whether your data volume persists on the host container instance and where it is stored. If this parameter is empty, then the Docker daemon assigns a host path for your data volume, but the data isn't guaranteed to persist after the containers associated with it stop running.

" @@ -1488,7 +1487,7 @@ }, "propagateTags":{ "shape":"Boolean", - "documentation":"

Specifies whether to propagate the tags from the job or job definition to the corresponding Amazon ECS task. If no value is specified, the tags are not propagated. Tags can only be propagated to the tasks during task creation. For tags with the same name, job tags are given priority over job definitions tags. If the total number of combined tags from the job and job definition is over 50, the job is moved to the FAILED state.

" + "documentation":"

Specifies whether to propagate the tags from the job or job definition to the corresponding Amazon ECS task. If no value is specified, the tags aren't propagated. Tags can only be propagated to the tasks during task creation. For tags with the same name, job tags are given priority over job definitions tags. If the total number of combined tags from the job and job definition is over 50, the job is moved to the FAILED state.

" }, "platformCapabilities":{ "shape":"PlatformCapabilityList", @@ -1533,7 +1532,7 @@ }, "priority":{ "shape":"Integer", - "documentation":"

The priority of the job queue. Job queues with a higher priority (or a higher integer value for the priority parameter) are evaluated first when associated with the same compute environment. Priority is determined in descending order, for example, a job queue with a priority value of 10 is given scheduling preference over a job queue with a priority value of 1. All of the compute environments must be either EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT); EC2 and Fargate compute environments cannot be mixed.

" + "documentation":"

The priority of the job queue. Job queues with a higher priority (or a higher integer value for the priority parameter) are evaluated first when associated with the same compute environment. Priority is determined in descending order, for example, a job queue with a priority value of 10 is given scheduling preference over a job queue with a priority value of 1. All of the compute environments must be either EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT); EC2 and Fargate compute environments can't be mixed.

" }, "computeEnvironmentOrder":{ "shape":"ComputeEnvironmentOrders", @@ -1687,7 +1686,7 @@ }, "swappiness":{ "shape":"Integer", - "documentation":"

This allows you to tune a container's memory swappiness behavior. A swappiness value of 0 causes swapping not to happen unless absolutely necessary. A swappiness value of 100 causes pages to be swapped very aggressively. Accepted values are whole numbers between 0 and 100. If the swappiness parameter isn't specified, a default value of 60 is used. If a value isn't specified for maxSwap then this parameter is ignored. If maxSwap is set to 0, the container doesn't use swap. This parameter maps to the --memory-swappiness option to docker run.

Consider the following when you use a per-container swap configuration.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided.

" + "documentation":"

This allows you to tune a container's memory swappiness behavior. A swappiness value of 0 causes swapping not to happen unless absolutely necessary. A swappiness value of 100 causes pages to be swapped very aggressively. Accepted values are whole numbers between 0 and 100. If the swappiness parameter isn't specified, a default value of 60 is used. If a value isn't specified for maxSwap then this parameter is ignored. If maxSwap is set to 0, the container doesn't use swap. This parameter maps to the --memory-swappiness option to docker run.

Consider the following when you use a per-container swap configuration.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided.

" } }, "documentation":"

Linux-specific modifications that are applied to the container, such as details for device mappings.

" @@ -1713,7 +1712,7 @@ }, "maxResults":{ "shape":"Integer", - "documentation":"

The maximum number of results returned by ListJobs in paginated output. When this parameter is used, ListJobs only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListJobs request with the returned nextToken value. This value can be between 1 and 100. If this parameter isn't used, then ListJobs returns up to 100 results and a nextToken value if applicable.

" + "documentation":"

The maximum number of results returned by ListJobs in paginated output. When this parameter is used, ListJobs only returns maxResults results in a single page and a nextToken response element. The remaining results of the initial request can be seen by sending another ListJobs request with the returned nextToken value. This value can be between 1 and 100. If this parameter isn't used, then ListJobs returns up to 100 results and a nextToken value if applicable.

" }, "nextToken":{ "shape":"String", @@ -1763,7 +1762,7 @@ "members":{ "logDriver":{ "shape":"LogDriver", - "documentation":"

The log driver to use for the container. The valid values listed for this parameter are log drivers that the Amazon ECS container agent can communicate with by default.

The supported log drivers are awslogs, fluentd, gelf, json-file, journald, logentries, syslog, and splunk.

Jobs running on Fargate resources are restricted to the awslogs and splunk log drivers.

awslogs

Specifies the Amazon CloudWatch Logs logging driver. For more information, see Using the awslogs Log Driver in the AWS Batch User Guide and Amazon CloudWatch Logs logging driver in the Docker documentation.

fluentd

Specifies the Fluentd logging driver. For more information, including usage and options, see Fluentd logging driver in the Docker documentation.

gelf

Specifies the Graylog Extended Format (GELF) logging driver. For more information, including usage and options, see Graylog Extended Format logging driver in the Docker documentation.

journald

Specifies the journald logging driver. For more information, including usage and options, see Journald logging driver in the Docker documentation.

json-file

Specifies the JSON file logging driver. For more information, including usage and options, see JSON File logging driver in the Docker documentation.

splunk

Specifies the Splunk logging driver. For more information, including usage and options, see Splunk logging driver in the Docker documentation.

syslog

Specifies the syslog logging driver. For more information, including usage and options, see Syslog logging driver in the Docker documentation.

If you have a custom driver that'sn't listed earlier that you want to work with the Amazon ECS container agent, you can fork the Amazon ECS container agent project that's available on GitHub and customize it to work with that driver. We encourage you to submit pull requests for changes that you want to have included. However, Amazon Web Services doesn't currently support running modified copies of this software.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log into your container instance and run the following command: sudo docker version | grep \"Server API version\"

" + "documentation":"

The log driver to use for the container. The valid values listed for this parameter are log drivers that the Amazon ECS container agent can communicate with by default.

The supported log drivers are awslogs, fluentd, gelf, json-file, journald, logentries, syslog, and splunk.

Jobs running on Fargate resources are restricted to the awslogs and splunk log drivers.

awslogs

Specifies the Amazon CloudWatch Logs logging driver. For more information, see Using the awslogs Log Driver in the AWS Batch User Guide and Amazon CloudWatch Logs logging driver in the Docker documentation.

fluentd

Specifies the Fluentd logging driver. For more information, including usage and options, see Fluentd logging driver in the Docker documentation.

gelf

Specifies the Graylog Extended Format (GELF) logging driver. For more information, including usage and options, see Graylog Extended Format logging driver in the Docker documentation.

journald

Specifies the journald logging driver. For more information, including usage and options, see Journald logging driver in the Docker documentation.

json-file

Specifies the JSON file logging driver. For more information, including usage and options, see JSON File logging driver in the Docker documentation.

splunk

Specifies the Splunk logging driver. For more information, including usage and options, see Splunk logging driver in the Docker documentation.

syslog

Specifies the syslog logging driver. For more information, including usage and options, see Syslog logging driver in the Docker documentation.

If you have a custom driver that's not listed earlier that you want to work with the Amazon ECS container agent, you can fork the Amazon ECS container agent project that's available on GitHub and customize it to work with that driver. We encourage you to submit pull requests for changes that you want to have included. However, Amazon Web Services doesn't currently support running modified copies of this software.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log into your container instance and run the following command: sudo docker version | grep \"Server API version\"

" }, "options":{ "shape":"LogConfigurationOptionsMap", @@ -2052,7 +2051,7 @@ "members":{ "value":{ "shape":"String", - "documentation":"

The quantity of the specified resource to reserve for the container. The values vary based on the type specified.

type=\"GPU\"

The number of physical GPUs to reserve for the container. The number of GPUs reserved for all containers in a job shouldn't exceed the number of available GPUs on the compute resource that the job is launched on.

GPUs are not available for jobs running on Fargate resources.

type=\"MEMORY\"

For jobs running on EC2 resources, the hard limit (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run. You must specify at least 4 MiB of memory for a job. This is required but can be specified in several places for multi-node parallel (MNP) jobs. It must be specified for each node at least once. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run.

If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for a particular instance type, see Memory Management in the AWS Batch User Guide.

For jobs running on Fargate resources, then value is the hard limit (in MiB), and must match one of the supported values and the VCPU values must be one of the values supported for that memory value.

value = 512

VCPU = 0.25

value = 1024

VCPU = 0.25 or 0.5

value = 2048

VCPU = 0.25, 0.5, or 1

value = 3072

VCPU = 0.5, or 1

value = 4096

VCPU = 0.5, 1, or 2

value = 5120, 6144, or 7168

VCPU = 1 or 2

value = 8192

VCPU = 1, 2, or 4

value = 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384

VCPU = 2 or 4

value = 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720

VCPU = 4

type=\"VCPU\"

The number of vCPUs reserved for the container. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to 1,024 CPU shares. For EC2 resources, you must specify at least one vCPU. This is required but can be specified in several places; it must be specified for each node at least once.

For jobs running on Fargate resources, then value must match one of the supported values and the MEMORY values must be one of the values supported for that VCPU value. The supported values are 0.25, 0.5, 1, 2, and 4

value = 0.25

MEMORY = 512, 1024, or 2048

value = 0.5

MEMORY = 1024, 2048, 3072, or 4096

value = 1

MEMORY = 2048, 3072, 4096, 5120, 6144, 7168, or 8192

value = 2

MEMORY = 4096, 5120, 6144, 7168, 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384

value = 4

MEMORY = 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, 16384, 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720

" + "documentation":"

The quantity of the specified resource to reserve for the container. The values vary based on the type specified.

type=\"GPU\"

The number of physical GPUs to reserve for the container. The number of GPUs reserved for all containers in a job shouldn't exceed the number of available GPUs on the compute resource that the job is launched on.

GPUs are not available for jobs running on Fargate resources.

type=\"MEMORY\"

The memory hard limit (in MiB) present to the container. This parameter is supported for jobs running on EC2 resources. If your container attempts to exceed the memory specified, the container is terminated. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run. You must specify at least 4 MiB of memory for a job. This is required but can be specified in several places for multi-node parallel (MNP) jobs. It must be specified for each node at least once. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run.

If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for a particular instance type, see Memory Management in the AWS Batch User Guide.

For jobs running on Fargate resources, then value is the hard limit (in MiB), and must match one of the supported values and the VCPU values must be one of the values supported for that memory value.

value = 512

VCPU = 0.25

value = 1024

VCPU = 0.25 or 0.5

value = 2048

VCPU = 0.25, 0.5, or 1

value = 3072

VCPU = 0.5, or 1

value = 4096

VCPU = 0.5, 1, or 2

value = 5120, 6144, or 7168

VCPU = 1 or 2

value = 8192

VCPU = 1, 2, or 4

value = 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384

VCPU = 2 or 4

value = 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720

VCPU = 4

type=\"VCPU\"

The number of vCPUs reserved for the container. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to 1,024 CPU shares. For EC2 resources, you must specify at least one vCPU. This is required but can be specified in several places; it must be specified for each node at least once.

For jobs running on Fargate resources, then value must match one of the supported values and the MEMORY values must be one of the values supported for that VCPU value. The supported values are 0.25, 0.5, 1, 2, and 4

value = 0.25

MEMORY = 512, 1024, or 2048

value = 0.5

MEMORY = 1024, 2048, 3072, or 4096

value = 1

MEMORY = 2048, 3072, 4096, 5120, 6144, 7168, or 8192

value = 2

MEMORY = 4096, 5120, 6144, 7168, 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384

value = 4

MEMORY = 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, 16384, 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720

" }, "type":{ "shape":"ResourceType", @@ -2107,7 +2106,7 @@ }, "valueFrom":{ "shape":"String", - "documentation":"

The secret to expose to the container. The supported values are either the full ARN of the AWS Secrets Manager secret or the full ARN of the parameter in the AWS Systems Manager Parameter Store.

If the AWS Systems Manager Parameter Store parameter exists in the same Region as the job you are launching, then you can use either the full ARN or name of the parameter. If the parameter exists in a different Region, then the full ARN must be specified.

" + "documentation":"

The secret to expose to the container. The supported values are either the full ARN of the AWS Secrets Manager secret or the full ARN of the parameter in the AWS Systems Manager Parameter Store.

If the AWS Systems Manager Parameter Store parameter exists in the same Region as the job you're launching, then you can use either the full ARN or name of the parameter. If the parameter exists in a different Region, then the full ARN must be specified.

" } }, "documentation":"

An object representing the secret to expose to your container. Secrets can be exposed to a container in the following ways:

For more information, see Specifying sensitive data in the AWS Batch User Guide.

" @@ -2145,7 +2144,7 @@ }, "jobQueue":{ "shape":"String", - "documentation":"

The job queue into which the job is submitted. You can specify either the name or the Amazon Resource Name (ARN) of the queue.

" + "documentation":"

The job queue where the job is submitted. You can specify either the name or the Amazon Resource Name (ARN) of the queue.

" }, "arrayProperties":{ "shape":"ArrayProperties", @@ -2165,7 +2164,7 @@ }, "containerOverrides":{ "shape":"ContainerOverrides", - "documentation":"

A list of container overrides in JSON format that specify the name of a container in the specified job definition and the overrides it should receive. You can override the default command for a container (that's specified in the job definition or the Docker image) with a command override. You can also override existing environment variables (that are specified in the job definition or Docker image) on a container or add new environment variables to it with an environment override.

" + "documentation":"

A list of container overrides in the JSON format that specify the name of a container in the specified job definition and the overrides it should receive. You can override the default command for a container, which is specified in the job definition or the Docker image, with a command override. You can also override existing environment variables on a container or add new environment variables to it with an environment override.

" }, "nodeOverrides":{ "shape":"NodeOverrides", @@ -2382,7 +2381,7 @@ }, "serviceRole":{ "shape":"String", - "documentation":"

The full Amazon Resource Name (ARN) of the IAM role that allows AWS Batch to make calls to other AWS services on your behalf. For more information, see AWS Batch service IAM role in the AWS Batch User Guide.

If your specified role has a path other than /, then you must either specify the full role ARN (this is recommended) or prefix the role name with the path.

Depending on how you created your AWS Batch service role, its ARN might contain the service-role path prefix. When you only specify the name of the service role, AWS Batch assumes that your ARN does not use the service-role path prefix. Because of this, we recommend that you specify the full ARN of your service role when you create compute environments.

" + "documentation":"

The full Amazon Resource Name (ARN) of the IAM role that allows AWS Batch to make calls to other AWS services on your behalf. For more information, see AWS Batch service IAM role in the AWS Batch User Guide.

If your specified role has a path other than /, then you must either specify the full role ARN (this is recommended) or prefix the role name with the path.

Depending on how you created your AWS Batch service role, its ARN might contain the service-role path prefix. When you only specify the name of the service role, AWS Batch assumes that your ARN doesn't use the service-role path prefix. Because of this, we recommend that you specify the full ARN of your service role when you create compute environments.

" } }, "documentation":"

Contains the parameters for UpdateComputeEnvironment.

" @@ -2410,15 +2409,15 @@ }, "state":{ "shape":"JQState", - "documentation":"

Describes the queue's ability to accept new jobs. If the job queue state is ENABLED, it is able to accept jobs. If the job queue state is DISABLED, new jobs cannot be added to the queue, but jobs already in the queue can finish.

" + "documentation":"

Describes the queue's ability to accept new jobs. If the job queue state is ENABLED, it can accept jobs. If the job queue state is DISABLED, new jobs can't be added to the queue, but jobs already in the queue can finish.

" }, "priority":{ "shape":"Integer", - "documentation":"

The priority of the job queue. Job queues with a higher priority (or a higher integer value for the priority parameter) are evaluated first when associated with the same compute environment. Priority is determined in descending order, for example, a job queue with a priority value of 10 is given scheduling preference over a job queue with a priority value of 1. All of the compute environments must be either EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT); EC2 and Fargate compute environments cannot be mixed.

" + "documentation":"

The priority of the job queue. Job queues with a higher priority (or a higher integer value for the priority parameter) are evaluated first when associated with the same compute environment. Priority is determined in descending order, for example, a job queue with a priority value of 10 is given scheduling preference over a job queue with a priority value of 1. All of the compute environments must be either EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT). EC2 and Fargate compute environments can't be mixed.

" }, "computeEnvironmentOrder":{ "shape":"ComputeEnvironmentOrders", - "documentation":"

Details the set of compute environments mapped to a job queue and their order relative to each other. This is one of the parameters used by the job scheduler to determine which compute environment should run a given job. Compute environments must be in the VALID state before you can associate them with a job queue. All of the compute environments must be either EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT); EC2 and Fargate compute environments can't be mixed.

All compute environments that are associated with a job queue must share the same architecture. AWS Batch doesn't support mixing compute environment architecture types in a single job queue.

" + "documentation":"

Details the set of compute environments mapped to a job queue and their order relative to each other. This is one of the parameters used by the job scheduler to determine which compute environment should run a given job. Compute environments must be in the VALID state before you can associate them with a job queue. All of the compute environments must be either EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT). EC2 and Fargate compute environments can't be mixed.

All compute environments that are associated with a job queue must share the same architecture. AWS Batch doesn't support mixing compute environment architecture types in a single job queue.

" } }, "documentation":"

Contains the parameters for UpdateJobQueue.

" @@ -2455,5 +2454,5 @@ "member":{"shape":"Volume"} } }, - "documentation":"

Using AWS Batch, you can run batch computing workloads on the AWS Cloud. Batch computing is a common means for developers, scientists, and engineers to access large amounts of compute resources. AWS Batch utilizes the advantages of this computing workload to remove the undifferentiated heavy lifting of configuring and managing required infrastructure, while also adopting a familiar batch computing software approach. Given these advantages, AWS Batch can help you to efficiently provision resources in response to jobs submitted, thus effectively helping to eliminate capacity constraints, reduce compute costs, and deliver your results more quickly.

As a fully managed service, AWS Batch can run batch computing workloads of any scale. AWS Batch automatically provisions compute resources and optimizes workload distribution based on the quantity and scale of your specific workloads. With AWS Batch, there's no need to install or manage batch computing software. This means that you can focus your time and energy on analyzing results and solving your specific problems.

" + "documentation":"

Using AWS Batch, you can run batch computing workloads on the AWS Cloud. Batch computing is a common means for developers, scientists, and engineers to access large amounts of compute resources. AWS Batch uses the advantages of this computing workload to remove the undifferentiated heavy lifting of configuring and managing required infrastructure. At the same time, it also adopts a familiar batch computing software approach. Given these advantages, AWS Batch can help you to efficiently provision resources in response to jobs submitted, thus effectively helping you to eliminate capacity constraints, reduce compute costs, and deliver your results more quickly.

As a fully managed service, AWS Batch can run batch computing workloads of any scale. AWS Batch automatically provisions compute resources and optimizes workload distribution based on the quantity and scale of your specific workloads. With AWS Batch, there's no need to install or manage batch computing software. This means that you can focus your time and energy on analyzing results and solving your specific problems.

" } diff --git a/botocore/data/ce/2017-10-25/service-2.json b/botocore/data/ce/2017-10-25/service-2.json index e660add9..d45d5d4b 100644 --- a/botocore/data/ce/2017-10-25/service-2.json +++ b/botocore/data/ce/2017-10-25/service-2.json @@ -720,10 +720,32 @@ "ProcessingStatus":{ "shape":"CostCategoryProcessingStatusList", "documentation":"

The list of processing statuses for Cost Management products for a specific cost category.

" - } + }, + "DefaultValue":{"shape":"CostCategoryValue"} }, "documentation":"

The structure of Cost Categories. This includes detailed metadata and the set of rules for the CostCategory object.

" }, + "CostCategoryInheritedValueDimension":{ + "type":"structure", + "members":{ + "DimensionName":{ + "shape":"CostCategoryInheritedValueDimensionName", + "documentation":"

The name of dimension for which to group costs.

If you specify LINKED_ACCOUNT_NAME, the cost category value will be based on account name. If you specify TAG, the cost category value will be based on the value of the specified tag key.

" + }, + "DimensionKey":{ + "shape":"GenericString", + "documentation":"

The key to extract cost category values.

" + } + }, + "documentation":"

When creating or updating a cost category, you can define the CostCategoryRule rule type as INHERITED_VALUE. This rule type adds the flexibility of defining a rule that dynamically inherits the cost category value from the dimension value defined by CostCategoryInheritedValueDimension. For example, if you wanted to dynamically group costs based on the value of a specific tag key, you would first choose an inherited value rule type, then choose the tag dimension and specify the tag key to use.

" + }, + "CostCategoryInheritedValueDimensionName":{ + "type":"string", + "enum":[ + "LINKED_ACCOUNT_NAME", + "TAG" + ] + }, "CostCategoryMaxResults":{ "type":"integer", "max":100, @@ -732,7 +754,7 @@ "CostCategoryName":{ "type":"string", "documentation":"

The unique name of the Cost Category.

", - "max":255, + "max":50, "min":1, "pattern":"^(?! )[\\p{L}\\p{N}\\p{Z}-_]*(? A list of unique cost category values in a specific cost category.

" - } + }, + "DefaultValue":{"shape":"CostCategoryValue"} }, "documentation":"

A reference to a Cost Category containing only enough information to identify the Cost Category.

You can use this information to retrieve the full Cost Category information using DescribeCostCategory.

" }, @@ -795,19 +818,30 @@ }, "CostCategoryRule":{ "type":"structure", - "required":[ - "Value", - "Rule" - ], "members":{ "Value":{"shape":"CostCategoryValue"}, "Rule":{ "shape":"Expression", "documentation":"

An Expression object used to categorize costs. This supports dimensions, tags, and nested expressions. Currently the only dimensions supported are LINKED_ACCOUNT, SERVICE_CODE, RECORD_TYPE, and LINKED_ACCOUNT_NAME.

Root level OR is not supported. We recommend that you create a separate rule instead.

RECORD_TYPE is a dimension used for Cost Explorer APIs, and is also supported for Cost Category expressions. This dimension uses different terms, depending on whether you're using the console or API/JSON editor. For a detailed comparison, see Term Comparisons in the AWS Billing and Cost Management User Guide.

" + }, + "InheritedValue":{ + "shape":"CostCategoryInheritedValueDimension", + "documentation":"

The value the line item will be categorized as, if the line item contains the matched dimension.

" + }, + "Type":{ + "shape":"CostCategoryRuleType", + "documentation":"

You can define the CostCategoryRule rule type as either REGULAR or INHERITED_VALUE. The INHERITED_VALUE rule type adds the flexibility of defining a rule that dynamically inherits the cost category value from the dimension value defined by CostCategoryInheritedValueDimension. For example, if you wanted to dynamically group costs based on the value of a specific tag key, you would first choose an inherited value rule type, then choose the tag dimension and specify the tag key to use.

" } }, "documentation":"

Rules are processed in order. If there are multiple rules that match the line item, then the first rule to match is used to determine that Cost Category value.

" }, + "CostCategoryRuleType":{ + "type":"string", + "enum":[ + "REGULAR", + "INHERITED_VALUE" + ] + }, "CostCategoryRuleVersion":{ "type":"string", "documentation":"

The rule schema version in this particular Cost Category.

", @@ -832,8 +866,8 @@ }, "CostCategoryValue":{ "type":"string", - "documentation":"

The value a line item will be categorized as, if it matches the rule.

", - "max":255, + "documentation":"

The default value for the cost category.

", + "max":50, "min":1, "pattern":"^(?! )[\\p{L}\\p{N}\\p{Z}-_]*(?The Cost Category rules used to categorize costs. For more information, see CostCategoryRule.

" - } + }, + "DefaultValue":{"shape":"CostCategoryValue"} } }, "CreateCostCategoryDefinitionResponse":{ @@ -1738,7 +1773,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

This field is only used when SortBy is provided in the request.

The maximum number of objects that to be returned for this request. If MaxResults is not specified with SortBy, the request will return 1000 results as the default value for this parameter.

" + "documentation":"

This field is only used when SortBy is provided in the request.

The maximum number of objects that to be returned for this request. If MaxResults is not specified with SortBy, the request will return 1000 results as the default value for this parameter.

For GetCostCategories, MaxResults has an upper limit of 1000.

" }, "NextPageToken":{ "shape":"NextPageToken", @@ -1797,7 +1832,7 @@ }, "Filter":{ "shape":"Expression", - "documentation":"

The filters that you want to use to filter your forecast. Cost Explorer API supports all of the Cost Explorer filters.

" + "documentation":"

The filters that you want to use to filter your forecast. The GetCostForecast API supports filtering by the following dimensions:

" }, "PredictionIntervalLevel":{ "shape":"PredictionIntervalLevel", @@ -1848,7 +1883,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

This field is only used when SortBy is provided in the request. The maximum number of objects that to be returned for this request. If MaxResults is not specified with SortBy, the request will return 1000 results as the default value for this parameter.

" + "documentation":"

This field is only used when SortBy is provided in the request. The maximum number of objects that to be returned for this request. If MaxResults is not specified with SortBy, the request will return 1000 results as the default value for this parameter.

For GetDimensionValues, MaxResults has an upper limit of 1000.

" }, "NextPageToken":{ "shape":"NextPageToken", @@ -2326,7 +2361,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

This field is only used when SortBy is provided in the request. The maximum number of objects that to be returned for this request. If MaxResults is not specified with SortBy, the request will return 1000 results as the default value for this parameter.

" + "documentation":"

This field is only used when SortBy is provided in the request. The maximum number of objects that to be returned for this request. If MaxResults is not specified with SortBy, the request will return 1000 results as the default value for this parameter.

For GetTags, MaxResults has an upper limit of 1000.

" }, "NextPageToken":{ "shape":"NextPageToken", @@ -2382,7 +2417,7 @@ }, "Filter":{ "shape":"Expression", - "documentation":"

The filters that you want to use to filter your forecast. Cost Explorer API supports all of the Cost Explorer filters.

" + "documentation":"

The filters that you want to use to filter your forecast. The GetUsageForecast API supports filtering by the following dimensions:

" }, "PredictionIntervalLevel":{ "shape":"PredictionIntervalLevel", @@ -3981,7 +4016,8 @@ "Rules":{ "shape":"CostCategoryRulesList", "documentation":"

The Expression object used to categorize costs. For more information, see CostCategoryRule .

" - } + }, + "DefaultValue":{"shape":"CostCategoryValue"} } }, "UpdateCostCategoryDefinitionResponse":{ diff --git a/botocore/data/cloudformation/2010-05-15/service-2.json b/botocore/data/cloudformation/2010-05-15/service-2.json index 95134db6..f5bed8a7 100644 --- a/botocore/data/cloudformation/2010-05-15/service-2.json +++ b/botocore/data/cloudformation/2010-05-15/service-2.json @@ -195,7 +195,7 @@ {"shape":"CFNRegistryException"}, {"shape":"TypeNotFoundException"} ], - "documentation":"

Removes a type or type version from active use in the CloudFormation registry. If a type or type version is deregistered, it cannot be used in CloudFormation operations.

To deregister a type, you must individually deregister all registered versions of that type. If a type has only a single registered version, deregistering that version results in the type itself being deregistered.

You cannot deregister the default version of a type, unless it is the only registered version of that type, in which case the type itself is deregistered as well.

", + "documentation":"

Marks an extension or extension version as DEPRECATED in the CloudFormation registry, removing it from active use. Deprecated extensions or extension versions cannot be used in CloudFormation operations.

To deregister an entire extension, you must individually deregister all active versions of that extension. If an extension has only a single active version, deregistering that version results in the extension itself being deregistered and marked as deprecated in the registry.

You cannot deregister the default version of an extension if there are other active version of that extension. If you do deregister the default version of an extension, the textensionype itself is deregistered as well and marked as deprecated.

To view the deprecation status of an extension or extension version, use DescribeType.

", "idempotent":true }, "DescribeAccountLimits":{ @@ -370,7 +370,7 @@ {"shape":"CFNRegistryException"}, {"shape":"TypeNotFoundException"} ], - "documentation":"

Returns detailed information about a type that has been registered.

If you specify a VersionId, DescribeType returns information about that specific type version. Otherwise, it returns information about the default type version.

", + "documentation":"

Returns detailed information about an extension that has been registered.

If you specify a VersionId, DescribeType returns information about that specific extension version. Otherwise, it returns information about the default extension version.

", "idempotent":true }, "DescribeTypeRegistration":{ @@ -387,7 +387,7 @@ "errors":[ {"shape":"CFNRegistryException"} ], - "documentation":"

Returns information about a type's registration, including its current status and type and version identifiers.

When you initiate a registration request using RegisterType , you can then use DescribeTypeRegistration to monitor the progress of that registration request.

Once the registration request has completed, use DescribeType to return detailed informaiton about a type.

", + "documentation":"

Returns information about an extension's registration, including its current status and type and version identifiers.

When you initiate a registration request using RegisterType , you can then use DescribeTypeRegistration to monitor the progress of that registration request.

Once the registration request has completed, use DescribeType to return detailed information about an extension.

", "idempotent":true }, "DetectStackDrift":{ @@ -432,7 +432,7 @@ {"shape":"OperationInProgressException"}, {"shape":"StackSetNotFoundException"} ], - "documentation":"

Detect drift on a stack set. When CloudFormation performs drift detection on a stack set, it performs drift detection on the stack associated with each stack instance in the stack set. For more information, see How CloudFormation Performs Drift Detection on a Stack Set.

DetectStackSetDrift returns the OperationId of the stack set drift detection operation. Use this operation id with DescribeStackSetOperation to monitor the progress of the drift detection operation. The drift detection operation may take some time, depending on the number of stack instances included in the stack set, as well as the number of resources included in each stack.

Once the operation has completed, use the following actions to return drift information:

For more information on performing a drift detection operation on a stack set, see Detecting Unmanaged Changes in Stack Sets.

You can only run a single drift detection operation on a given stack set at one time.

To stop a drift detection stack set operation, use StopStackSetOperation .

" + "documentation":"

Detect drift on a stack set. When CloudFormation performs drift detection on a stack set, it performs drift detection on the stack associated with each stack instance in the stack set. For more information, see How CloudFormation Performs Drift Detection on a Stack Set.

DetectStackSetDrift returns the OperationId of the stack set drift detection operation. Use this operation id with DescribeStackSetOperation to monitor the progress of the drift detection operation. The drift detection operation may take some time, depending on the number of stack instances included in the stack set, as well as the number of resources included in each stack.

Once the operation has completed, use the following actions to return drift information:

For more information on performing a drift detection operation on a stack set, see Detecting Unmanaged Changes in Stack Sets.

You can only run a single drift detection operation on a given stack set at one time.

To stop a drift detection stack set operation, use StopStackSetOperation .

" }, "EstimateTemplateCost":{ "name":"EstimateTemplateCost", @@ -623,7 +623,7 @@ "shape":"ListStackSetsOutput", "resultWrapper":"ListStackSetsResult" }, - "documentation":"

Returns summary information about stack sets that are associated with the user.

" + "documentation":"

Returns summary information about stack sets that are associated with the user.

" }, "ListStacks":{ "name":"ListStacks", @@ -652,7 +652,7 @@ "errors":[ {"shape":"CFNRegistryException"} ], - "documentation":"

Returns a list of registration tokens for the specified type(s).

", + "documentation":"

Returns a list of registration tokens for the specified extension(s).

", "idempotent":true }, "ListTypeVersions":{ @@ -669,7 +669,7 @@ "errors":[ {"shape":"CFNRegistryException"} ], - "documentation":"

Returns summary information about the versions of a type.

", + "documentation":"

Returns summary information about the versions of an extension.

", "idempotent":true }, "ListTypes":{ @@ -686,7 +686,7 @@ "errors":[ {"shape":"CFNRegistryException"} ], - "documentation":"

Returns summary information about types that have been registered with CloudFormation.

", + "documentation":"

Returns summary information about extension that have been registered with CloudFormation.

", "idempotent":true }, "RecordHandlerProgress":{ @@ -721,7 +721,7 @@ "errors":[ {"shape":"CFNRegistryException"} ], - "documentation":"

Registers a type with the CloudFormation service. Registering a type makes it available for use in CloudFormation templates in your AWS account, and includes:

For more information on how to develop types and ready them for registeration, see Creating Resource Providers in the CloudFormation CLI User Guide.

You can have a maximum of 50 resource type versions registered at a time. This maximum is per account and per region. Use DeregisterType to deregister specific resource type versions if necessary.

Once you have initiated a registration request using RegisterType , you can use DescribeTypeRegistration to monitor the progress of the registration request.

", + "documentation":"

Registers an extension with the CloudFormation service. Registering an extension makes it available for use in CloudFormation templates in your AWS account, and includes:

For more information on how to develop extensions and ready them for registeration, see Creating Resource Providers in the CloudFormation CLI User Guide.

You can have a maximum of 50 resource extension versions registered at a time. This maximum is per account and per region. Use DeregisterType to deregister specific extension versions if necessary.

Once you have initiated a registration request using RegisterType , you can use DescribeTypeRegistration to monitor the progress of the registration request.

", "idempotent":true }, "SetStackPolicy":{ @@ -748,7 +748,7 @@ {"shape":"CFNRegistryException"}, {"shape":"TypeNotFoundException"} ], - "documentation":"

Specify the default version of a type. The default version of a type will be used in CloudFormation operations.

", + "documentation":"

Specify the default version of an extension. The default version of an extension will be used in CloudFormation operations.

", "idempotent":true }, "SignalResource":{ @@ -944,7 +944,7 @@ "documentation":"

If set to true, stack resources are retained when an account is removed from a target organization or OU. If set to false, stack resources are deleted. Specify only if Enabled is set to True.

" } }, - "documentation":"

[Service-managed permissions] Describes whether StackSets automatically deploys to AWS Organizations accounts that are added to a target organization or organizational unit (OU).

" + "documentation":"

[Service-managed permissions] Describes whether StackSets automatically deploys to AWS Organizations accounts that are added to a target organization or organizational unit (OU).

" }, "AutoDeploymentNullable":{"type":"boolean"}, "BoxedInteger":{ @@ -970,6 +970,13 @@ }, "exception":true }, + "CallAs":{ + "type":"string", + "enum":[ + "SELF", + "DELEGATED_ADMIN" + ] + }, "CancelUpdateStackInput":{ "type":"structure", "required":["StackName"], @@ -1207,7 +1214,7 @@ }, "TemplateURL":{ "shape":"TemplateURL", - "documentation":"

The location of the file that contains the revised template. The URL must point to a template (max size: 460,800 bytes) that is located in an S3 bucket. AWS CloudFormation generates the change set by comparing this template with the stack that you specified.

Conditional: You must specify only TemplateBody or TemplateURL.

" + "documentation":"

The location of the file that contains the revised template. The URL must point to a template (max size: 460,800 bytes) that is located in an S3 bucket or a Systems Manager document. AWS CloudFormation generates the change set by comparing this template with the stack that you specified.

Conditional: You must specify only TemplateBody or TemplateURL.

" }, "UsePreviousTemplate":{ "shape":"UsePreviousTemplate", @@ -1296,7 +1303,7 @@ }, "TemplateURL":{ "shape":"TemplateURL", - "documentation":"

Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is located in an Amazon S3 bucket. For more information, go to the Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.

" + "documentation":"

Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is located in an Amazon S3 bucket or a Systems Manager document. For more information, go to the Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.

" }, "Parameters":{ "shape":"Parameters", @@ -1370,11 +1377,11 @@ }, "Accounts":{ "shape":"AccountList", - "documentation":"

[Self-managed permissions] The names of one or more AWS accounts that you want to create stack instances in the specified Region(s) for.

You can specify Accounts or DeploymentTargets, but not both.

" + "documentation":"

[Self-managed permissions] The names of one or more AWS accounts that you want to create stack instances in the specified Region(s) for.

You can specify Accounts or DeploymentTargets, but not both.

" }, "DeploymentTargets":{ "shape":"DeploymentTargets", - "documentation":"

[Service-managed permissions] The AWS Organizations accounts for which to create stack instances in the specified Regions.

You can specify Accounts or DeploymentTargets, but not both.

" + "documentation":"

[Service-managed permissions] The AWS Organizations accounts for which to create stack instances in the specified Regions.

You can specify Accounts or DeploymentTargets, but not both.

" }, "Regions":{ "shape":"RegionList", @@ -1392,6 +1399,10 @@ "shape":"ClientRequestToken", "documentation":"

The unique identifier for this stack set operation.

The operation ID also functions as an idempotency token, to ensure that AWS CloudFormation performs the stack set operation only once, even if you retry the request multiple times. You might retry stack set operation requests to ensure that AWS CloudFormation successfully received them.

If you don't specify an operation ID, the SDK generates one automatically.

Repeating this stack set operation with a new operation ID retries all stack instances whose status is OUTDATED.

", "idempotencyToken":true + }, + "CallAs":{ + "shape":"CallAs", + "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

" } } }, @@ -1432,7 +1443,7 @@ }, "TemplateURL":{ "shape":"TemplateURL", - "documentation":"

The location of the file that contains the template body. The URL must point to a template (maximum size: 460,800 bytes) that's located in an Amazon S3 bucket. For more information, see Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.

" + "documentation":"

The location of the file that contains the template body. The URL must point to a template (maximum size: 460,800 bytes) that's located in an Amazon S3 bucket or a Systems Manager document. For more information, see Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.

" }, "Parameters":{ "shape":"Parameters", @@ -1462,6 +1473,10 @@ "shape":"AutoDeployment", "documentation":"

Describes whether StackSets automatically deploys to AWS Organizations accounts that are added to the target organization or organizational unit (OU). Specify only if PermissionModel is SERVICE_MANAGED.

" }, + "CallAs":{ + "shape":"CallAs", + "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

Stack sets with service-managed permissions are created in the management account, including stack sets that are created by delegated administrators.

" + }, "ClientRequestToken":{ "shape":"ClientRequestToken", "documentation":"

A unique identifier for this CreateStackSet request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to create another stack set with the same name. You might retry CreateStackSet requests to ensure that AWS CloudFormation successfully received them.

If you don't specify an operation ID, the SDK generates one automatically.

", @@ -1549,11 +1564,11 @@ }, "Accounts":{ "shape":"AccountList", - "documentation":"

[Self-managed permissions] The names of the AWS accounts that you want to delete stack instances for.

You can specify Accounts or DeploymentTargets, but not both.

" + "documentation":"

[Self-managed permissions] The names of the AWS accounts that you want to delete stack instances for.

You can specify Accounts or DeploymentTargets, but not both.

" }, "DeploymentTargets":{ "shape":"DeploymentTargets", - "documentation":"

[Service-managed permissions] The AWS Organizations accounts from which to delete stack instances.

You can specify Accounts or DeploymentTargets, but not both.

" + "documentation":"

[Service-managed permissions] The AWS Organizations accounts from which to delete stack instances.

You can specify Accounts or DeploymentTargets, but not both.

" }, "Regions":{ "shape":"RegionList", @@ -1571,6 +1586,10 @@ "shape":"ClientRequestToken", "documentation":"

The unique identifier for this stack set operation.

If you don't specify an operation ID, the SDK generates one automatically.

The operation ID also functions as an idempotency token, to ensure that AWS CloudFormation performs the stack set operation only once, even if you retry the request multiple times. You can retry stack set operation requests to ensure that AWS CloudFormation successfully received them.

Repeating this stack set operation with a new operation ID retries all stack instances whose status is OUTDATED.

", "idempotencyToken":true + }, + "CallAs":{ + "shape":"CallAs", + "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

" } } }, @@ -1590,6 +1609,10 @@ "StackSetName":{ "shape":"StackSetName", "documentation":"

The name or unique ID of the stack set that you're deleting. You can obtain this value by running ListStackSets.

" + }, + "CallAs":{ + "shape":"CallAs", + "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

" } } }, @@ -1611,7 +1634,7 @@ "documentation":"

The organization root ID or organizational unit (OU) IDs to which StackSets deploys.

" } }, - "documentation":"

[Service-managed permissions] The AWS Organizations accounts to which StackSets deploys. StackSets does not deploy stack instances to the organization management account, even if the organization management account is in your organization or in an OU in your organization.

For update operations, you can specify either Accounts or OrganizationalUnitIds. For create and delete operations, specify OrganizationalUnitIds.

" + "documentation":"

[Service-managed permissions] The AWS Organizations accounts to which StackSets deploys. StackSets does not deploy stack instances to the organization management account, even if the organization management account is in your organization or in an OU in your organization.

For update operations, you can specify either Accounts or OrganizationalUnitIds. For create and delete operations, specify OrganizationalUnitIds.

" }, "DeprecatedStatus":{ "type":"string", @@ -1625,19 +1648,19 @@ "members":{ "Arn":{ "shape":"PrivateTypeArn", - "documentation":"

The Amazon Resource Name (ARN) of the type.

Conditional: You must specify either TypeName and Type, or Arn.

" + "documentation":"

The Amazon Resource Name (ARN) of the extension.

Conditional: You must specify either TypeName and Type, or Arn.

" }, "Type":{ "shape":"RegistryType", - "documentation":"

The kind of type.

Currently the only valid value is RESOURCE.

Conditional: You must specify either TypeName and Type, or Arn.

" + "documentation":"

The kind of extension.

Conditional: You must specify either TypeName and Type, or Arn.

" }, "TypeName":{ "shape":"TypeName", - "documentation":"

The name of the type.

Conditional: You must specify either TypeName and Type, or Arn.

" + "documentation":"

The name of the extension.

Conditional: You must specify either TypeName and Type, or Arn.

" }, "VersionId":{ "shape":"TypeVersionId", - "documentation":"

The ID of a specific version of the type. The version ID is the value at the end of the Amazon Resource Name (ARN) assigned to the type version when it is registered.

" + "documentation":"

The ID of a specific version of the extension. The version ID is the value at the end of the Amazon Resource Name (ARN) assigned to the extension version when it is registered.

" } } }, @@ -1867,6 +1890,10 @@ "StackInstanceRegion":{ "shape":"Region", "documentation":"

The name of a Region that's associated with this stack instance.

" + }, + "CallAs":{ + "shape":"CallAs", + "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

" } } }, @@ -1978,6 +2005,10 @@ "StackSetName":{ "shape":"StackSetName", "documentation":"

The name or unique ID of the stack set whose description you want.

" + }, + "CallAs":{ + "shape":"CallAs", + "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

" } } }, @@ -1995,6 +2026,10 @@ "OperationId":{ "shape":"ClientRequestToken", "documentation":"

The unique ID of the stack set operation.

" + }, + "CallAs":{ + "shape":"CallAs", + "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

" } } }, @@ -2049,19 +2084,19 @@ "members":{ "Type":{ "shape":"RegistryType", - "documentation":"

The kind of type.

Currently the only valid value is RESOURCE.

Conditional: You must specify either TypeName and Type, or Arn.

" + "documentation":"

The kind of extension.

Conditional: You must specify either TypeName and Type, or Arn.

" }, "TypeName":{ "shape":"TypeName", - "documentation":"

The name of the type.

Conditional: You must specify either TypeName and Type, or Arn.

" + "documentation":"

The name of the extension.

Conditional: You must specify either TypeName and Type, or Arn.

" }, "Arn":{ "shape":"TypeArn", - "documentation":"

The Amazon Resource Name (ARN) of the type.

Conditional: You must specify either TypeName and Type, or Arn.

" + "documentation":"

The Amazon Resource Name (ARN) of the extension.

Conditional: You must specify either TypeName and Type, or Arn.

" }, "VersionId":{ "shape":"TypeVersionId", - "documentation":"

The ID of a specific version of the type. The version ID is the value at the end of the Amazon Resource Name (ARN) assigned to the type version when it is registered.

If you specify a VersionId, DescribeType returns information about that specific type version. Otherwise, it returns information about the default type version.

" + "documentation":"

The ID of a specific version of the extension. The version ID is the value at the end of the Amazon Resource Name (ARN) assigned to the extension version when it is registered.

If you specify a VersionId, DescribeType returns information about that specific extension version. Otherwise, it returns information about the default extension version.

" } } }, @@ -2070,67 +2105,67 @@ "members":{ "Arn":{ "shape":"TypeArn", - "documentation":"

The Amazon Resource Name (ARN) of the type.

" + "documentation":"

The Amazon Resource Name (ARN) of the extension.

" }, "Type":{ "shape":"RegistryType", - "documentation":"

The kind of type.

Currently the only valid value is RESOURCE.

" + "documentation":"

The kind of extension.

" }, "TypeName":{ "shape":"TypeName", - "documentation":"

The name of the registered type.

" + "documentation":"

The name of the registered extension.

" }, "DefaultVersionId":{ "shape":"TypeVersionId", - "documentation":"

The ID of the default version of the type. The default version is used when the type version is not specified.

To set the default version of a type, use SetTypeDefaultVersion .

" + "documentation":"

The ID of the default version of the extension. The default version is used when the extension version is not specified.

To set the default version of an extension, use SetTypeDefaultVersion .

" }, "IsDefaultVersion":{ "shape":"IsDefaultVersion", - "documentation":"

Whether the specified type version is set as the default version.

" + "documentation":"

Whether the specified extension version is set as the default version.

" }, "Description":{ "shape":"Description", - "documentation":"

The description of the registered type.

" + "documentation":"

The description of the registered extension.

" }, "Schema":{ "shape":"TypeSchema", - "documentation":"

The schema that defines the type.

For more information on type schemas, see Resource Provider Schema in the CloudFormation CLI User Guide.

" + "documentation":"

The schema that defines the extension.

For more information on extension schemas, see Resource Provider Schema in the CloudFormation CLI User Guide.

" }, "ProvisioningType":{ "shape":"ProvisioningType", - "documentation":"

The provisioning behavior of the type. AWS CloudFormation determines the provisioning type during registration, based on the types of handlers in the schema handler package submitted.

Valid values include:

" + "documentation":"

The provisioning behavior of the extension. AWS CloudFormation determines the provisioning type during registration, based on the types of handlers in the schema handler package submitted.

Valid values include:

" }, "DeprecatedStatus":{ "shape":"DeprecatedStatus", - "documentation":"

The deprecation status of the type.

Valid values include:

" + "documentation":"

The deprecation status of the extension version.

Valid values include:

" }, "LoggingConfig":{ "shape":"LoggingConfig", - "documentation":"

Contains logging configuration information for a type.

" + "documentation":"

Contains logging configuration information for an extension.

" }, "ExecutionRoleArn":{ "shape":"RoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the IAM execution role used to register the type. If your resource type calls AWS APIs in any of its handlers, you must create an IAM execution role that includes the necessary permissions to call those AWS APIs, and provision that execution role in your account. CloudFormation then assumes that execution role to provide your resource type with the appropriate credentials.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM execution role used to register the extension. If your resource type calls AWS APIs in any of its handlers, you must create an IAM execution role that includes the necessary permissions to call those AWS APIs, and provision that execution role in your account. CloudFormation then assumes that execution role to provide your extension with the appropriate credentials.

" }, "Visibility":{ "shape":"Visibility", - "documentation":"

The scope at which the type is visible and usable in CloudFormation operations.

Valid values include:

" + "documentation":"

The scope at which the extension is visible and usable in CloudFormation operations.

Valid values include:

" }, "SourceUrl":{ "shape":"OptionalSecureUrl", - "documentation":"

The URL of the source code for the type.

" + "documentation":"

The URL of the source code for the extension.

" }, "DocumentationUrl":{ "shape":"OptionalSecureUrl", - "documentation":"

The URL of a page providing detailed documentation for this type.

" + "documentation":"

The URL of a page providing detailed documentation for this extension.

" }, "LastUpdated":{ "shape":"Timestamp", - "documentation":"

When the specified type version was registered.

" + "documentation":"

When the specified extension version was registered.

" }, "TimeCreated":{ "shape":"Timestamp", - "documentation":"

When the specified type version was registered.

" + "documentation":"

When the specified extension version was registered.

" } } }, @@ -2149,19 +2184,19 @@ "members":{ "ProgressStatus":{ "shape":"RegistrationStatus", - "documentation":"

The current status of the type registration request.

" + "documentation":"

The current status of the extension registration request.

" }, "Description":{ "shape":"Description", - "documentation":"

The description of the type registration request.

" + "documentation":"

The description of the extension registration request.

" }, "TypeArn":{ "shape":"TypeArn", - "documentation":"

The Amazon Resource Name (ARN) of the type being registered.

For registration requests with a ProgressStatus of other than COMPLETE, this will be null.

" + "documentation":"

The Amazon Resource Name (ARN) of the extension being registered.

For registration requests with a ProgressStatus of other than COMPLETE, this will be null.

" }, "TypeVersionArn":{ "shape":"TypeArn", - "documentation":"

The Amazon Resource Name (ARN) of this specific version of the type being registered.

For registration requests with a ProgressStatus of other than COMPLETE, this will be null.

" + "documentation":"

The Amazon Resource Name (ARN) of this specific version of the extension being registered.

For registration requests with a ProgressStatus of other than COMPLETE, this will be null.

" } } }, @@ -2234,6 +2269,10 @@ "shape":"ClientRequestToken", "documentation":"

The ID of the stack set operation.

", "idempotencyToken":true + }, + "CallAs":{ + "shape":"CallAs", + "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

" } } }, @@ -2274,7 +2313,7 @@ }, "TemplateURL":{ "shape":"TemplateURL", - "documentation":"

Location of file containing the template body. The URL must point to a template that is located in an Amazon S3 bucket. For more information, go to Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used.

" + "documentation":"

Location of file containing the template body. The URL must point to a template that is located in an Amazon S3 bucket or a Systems Manager document. For more information, go to Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used.

" }, "Parameters":{ "shape":"Parameters", @@ -2442,7 +2481,7 @@ }, "TemplateURL":{ "shape":"TemplateURL", - "documentation":"

Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is located in an Amazon S3 bucket. For more information about templates, see Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must specify only one of the following parameters: StackName, StackSetName, TemplateBody, or TemplateURL.

" + "documentation":"

Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is located in an Amazon S3 bucket or a Systems Manager document. For more information about templates, see Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must specify only one of the following parameters: StackName, StackSetName, TemplateBody, or TemplateURL.

" }, "StackName":{ "shape":"StackNameOrId", @@ -2699,6 +2738,10 @@ "StackInstanceRegion":{ "shape":"Region", "documentation":"

The name of the Region where you want to list stack instances.

" + }, + "CallAs":{ + "shape":"CallAs", + "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

" } } }, @@ -2766,6 +2809,10 @@ "MaxResults":{ "shape":"MaxResults", "documentation":"

The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.

" + }, + "CallAs":{ + "shape":"CallAs", + "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

" } } }, @@ -2797,6 +2844,10 @@ "MaxResults":{ "shape":"MaxResults", "documentation":"

The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.

" + }, + "CallAs":{ + "shape":"CallAs", + "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

" } } }, @@ -2827,6 +2878,10 @@ "Status":{ "shape":"StackSetStatus", "documentation":"

The status of the stack sets that you want to get summary information about.

" + }, + "CallAs":{ + "shape":"CallAs", + "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

" } } }, @@ -2876,19 +2931,19 @@ "members":{ "Type":{ "shape":"RegistryType", - "documentation":"

The kind of type.

Currently the only valid value is RESOURCE.

Conditional: You must specify either TypeName and Type, or Arn.

" + "documentation":"

The kind of extension.

Conditional: You must specify either TypeName and Type, or Arn.

" }, "TypeName":{ "shape":"TypeName", - "documentation":"

The name of the type.

Conditional: You must specify either TypeName and Type, or Arn.

" + "documentation":"

The name of the extension.

Conditional: You must specify either TypeName and Type, or Arn.

" }, "TypeArn":{ "shape":"TypeArn", - "documentation":"

The Amazon Resource Name (ARN) of the type.

Conditional: You must specify either TypeName and Type, or Arn.

" + "documentation":"

The Amazon Resource Name (ARN) of the extension.

Conditional: You must specify either TypeName and Type, or Arn.

" }, "RegistrationStatusFilter":{ "shape":"RegistrationStatus", - "documentation":"

The current status of the type registration request.

The default is IN_PROGRESS.

" + "documentation":"

The current status of the extension registration request.

The default is IN_PROGRESS.

" }, "MaxResults":{ "shape":"MaxResults", @@ -2905,7 +2960,7 @@ "members":{ "RegistrationTokenList":{ "shape":"RegistrationTokenList", - "documentation":"

A list of type registration tokens.

Use DescribeTypeRegistration to return detailed information about a type registration request.

" + "documentation":"

A list of extension registration tokens.

Use DescribeTypeRegistration to return detailed information about a type registration request.

" }, "NextToken":{ "shape":"NextToken", @@ -2918,15 +2973,15 @@ "members":{ "Type":{ "shape":"RegistryType", - "documentation":"

The kind of the type.

Currently the only valid value is RESOURCE.

Conditional: You must specify either TypeName and Type, or Arn.

" + "documentation":"

The kind of the extension.

Conditional: You must specify either TypeName and Type, or Arn.

" }, "TypeName":{ "shape":"TypeName", - "documentation":"

The name of the type for which you want version summary information.

Conditional: You must specify either TypeName and Type, or Arn.

" + "documentation":"

The name of the extension for which you want version summary information.

Conditional: You must specify either TypeName and Type, or Arn.

" }, "Arn":{ "shape":"PrivateTypeArn", - "documentation":"

The Amazon Resource Name (ARN) of the type for which you want version summary information.

Conditional: You must specify either TypeName and Type, or Arn.

" + "documentation":"

The Amazon Resource Name (ARN) of the extension for which you want version summary information.

Conditional: You must specify either TypeName and Type, or Arn.

" }, "MaxResults":{ "shape":"MaxResults", @@ -2938,7 +2993,7 @@ }, "DeprecatedStatus":{ "shape":"DeprecatedStatus", - "documentation":"

The deprecation status of the type versions that you want to get summary information about.

Valid values include:

The default is LIVE.

" + "documentation":"

The deprecation status of the extension versions that you want to get summary information about.

Valid values include:

The default is LIVE.

" } } }, @@ -2947,7 +3002,7 @@ "members":{ "TypeVersionSummaries":{ "shape":"TypeVersionSummaries", - "documentation":"

A list of TypeVersionSummary structures that contain information about the specified type's versions.

" + "documentation":"

A list of TypeVersionSummary structures that contain information about the specified extension's versions.

" }, "NextToken":{ "shape":"NextToken", @@ -2960,15 +3015,15 @@ "members":{ "Visibility":{ "shape":"Visibility", - "documentation":"

The scope at which the type is visible and usable in CloudFormation operations.

Valid values include:

The default is PRIVATE.

" + "documentation":"

The scope at which the extension is visible and usable in CloudFormation operations.

Valid values include:

The default is PRIVATE.

" }, "ProvisioningType":{ "shape":"ProvisioningType", - "documentation":"

The provisioning behavior of the type. AWS CloudFormation determines the provisioning type during registration, based on the types of handlers in the schema handler package submitted.

Valid values include:

" + "documentation":"

The provisioning behavior of the type. AWS CloudFormation determines the provisioning type during registration, based on the types of handlers in the schema handler package submitted.

Valid values include:

" }, "DeprecatedStatus":{ "shape":"DeprecatedStatus", - "documentation":"

The deprecation status of the types that you want to get summary information about.

Valid values include:

" + "documentation":"

The deprecation status of the extension that you want to get summary information about.

Valid values include:

" }, "Type":{ "shape":"RegistryType", @@ -2989,7 +3044,7 @@ "members":{ "TypeSummaries":{ "shape":"TypeSummaries", - "documentation":"

A list of TypeSummary structures that contain information about the specified types.

" + "documentation":"

A list of TypeSummary structures that contain information about the specified extensions.

" }, "NextToken":{ "shape":"NextToken", @@ -3405,27 +3460,27 @@ "members":{ "Type":{ "shape":"RegistryType", - "documentation":"

The kind of type.

Currently, the only valid value is RESOURCE.

" + "documentation":"

The kind of extension.

" }, "TypeName":{ "shape":"TypeName", - "documentation":"

The name of the type being registered.

We recommend that type names adhere to the following pattern: company_or_organization::service::type.

The following organization namespaces are reserved and cannot be used in your resource type names:

" + "documentation":"

The name of the extension being registered.

We recommend that extension names adhere to the following pattern: company_or_organization::service::type.

The following organization namespaces are reserved and cannot be used in your extension names:

" }, "SchemaHandlerPackage":{ "shape":"S3Url", - "documentation":"

A url to the S3 bucket containing the schema handler package that contains the schema, event handlers, and associated files for the type you want to register.

For information on generating a schema handler package for the type you want to register, see submit in the CloudFormation CLI User Guide.

The user registering the resource provider type must be able to access the the schema handler package in the S3 bucket. That is, the user needs to have GetObject permissions for the schema handler package. For more information, see Actions, Resources, and Condition Keys for Amazon S3 in the AWS Identity and Access Management User Guide.

" + "documentation":"

A url to the S3 bucket containing the extension project package that contains the neccessary files for the extension you want to register.

For information on generating a schema handler package for the extension you want to register, see submit in the CloudFormation CLI User Guide.

The user registering the extension must be able to access the package in the S3 bucket. That is, the user needs to have GetObject permissions for the schema handler package. For more information, see Actions, Resources, and Condition Keys for Amazon S3 in the AWS Identity and Access Management User Guide.

" }, "LoggingConfig":{ "shape":"LoggingConfig", - "documentation":"

Specifies logging configuration information for a type.

" + "documentation":"

Specifies logging configuration information for an extension.

" }, "ExecutionRoleArn":{ "shape":"RoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the IAM role for CloudFormation to assume when invoking the resource provider. If your resource type calls AWS APIs in any of its handlers, you must create an IAM execution role that includes the necessary permissions to call those AWS APIs, and provision that execution role in your account. When CloudFormation needs to invoke the resource provider handler, CloudFormation assumes this execution role to create a temporary session token, which it then passes to the resource provider handler, thereby supplying your resource provider with the appropriate credentials.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role for CloudFormation to assume when invoking the extension. If your extension calls AWS APIs in any of its handlers, you must create an IAM execution role that includes the necessary permissions to call those AWS APIs, and provision that execution role in your account. When CloudFormation needs to invoke the extension handler, CloudFormation assumes this execution role to create a temporary session token, which it then passes to the extension handler, thereby supplying your extension with the appropriate credentials.

" }, "ClientRequestToken":{ "shape":"RequestToken", - "documentation":"

A unique identifier that acts as an idempotency key for this registration request. Specifying a client request token prevents CloudFormation from generating more than one version of a type from the same registeration request, even if the request is submitted multiple times.

" + "documentation":"

A unique identifier that acts as an idempotency key for this registration request. Specifying a client request token prevents CloudFormation from generating more than one version of an extension from the same registeration request, even if the request is submitted multiple times.

" } } }, @@ -3434,7 +3489,7 @@ "members":{ "RegistrationToken":{ "shape":"RegistrationToken", - "documentation":"

The identifier for this registration request.

Use this registration token when calling DescribeTypeRegistration , which returns information about the status and IDs of the type registration.

" + "documentation":"

The identifier for this registration request.

Use this registration token when calling DescribeTypeRegistration , which returns information about the status and IDs of the extension registration.

" } } }, @@ -3798,19 +3853,19 @@ "members":{ "Arn":{ "shape":"PrivateTypeArn", - "documentation":"

The Amazon Resource Name (ARN) of the type for which you want version summary information.

Conditional: You must specify either TypeName and Type, or Arn.

" + "documentation":"

The Amazon Resource Name (ARN) of the extension for which you want version summary information.

Conditional: You must specify either TypeName and Type, or Arn.

" }, "Type":{ "shape":"RegistryType", - "documentation":"

The kind of type.

Conditional: You must specify either TypeName and Type, or Arn.

" + "documentation":"

The kind of extension.

Conditional: You must specify either TypeName and Type, or Arn.

" }, "TypeName":{ "shape":"TypeName", - "documentation":"

The name of the type.

Conditional: You must specify either TypeName and Type, or Arn.

" + "documentation":"

The name of the extension.

Conditional: You must specify either TypeName and Type, or Arn.

" }, "VersionId":{ "shape":"TypeVersionId", - "documentation":"

The ID of a specific version of the type. The version ID is the value at the end of the Amazon Resource Name (ARN) assigned to the type version when it is registered.

" + "documentation":"

The ID of a specific version of the extension. The version ID is the value at the end of the Amazon Resource Name (ARN) assigned to the extension version when it is registered.

" } } }, @@ -4073,7 +4128,7 @@ }, "Account":{ "shape":"Account", - "documentation":"

[Self-managed permissions] The name of the AWS account that the stack instance is associated with.

" + "documentation":"

[Self-managed permissions] The name of the AWS account that the stack instance is associated with.

" }, "StackId":{ "shape":"StackId", @@ -4097,7 +4152,7 @@ }, "OrganizationalUnitId":{ "shape":"OrganizationalUnitId", - "documentation":"

[Service-managed permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets.

" + "documentation":"

[Service-managed permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets.

" }, "DriftStatus":{ "shape":"StackDriftStatus", @@ -4196,7 +4251,7 @@ }, "Account":{ "shape":"Account", - "documentation":"

[Self-managed permissions] The name of the AWS account that the stack instance is associated with.

" + "documentation":"

[Self-managed permissions] The name of the AWS account that the stack instance is associated with.

" }, "StackId":{ "shape":"StackId", @@ -4216,7 +4271,7 @@ }, "OrganizationalUnitId":{ "shape":"OrganizationalUnitId", - "documentation":"

[Service-managed permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets.

" + "documentation":"

[Service-managed permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets.

" }, "DriftStatus":{ "shape":"StackDriftStatus", @@ -4582,7 +4637,7 @@ }, "AutoDeployment":{ "shape":"AutoDeployment", - "documentation":"

[Service-managed permissions] Describes whether StackSets automatically deploys to AWS Organizations accounts that are added to a target organization or organizational unit (OU).

" + "documentation":"

[Service-managed permissions] Describes whether StackSets automatically deploys to AWS Organizations accounts that are added to a target organization or organizational unit (OU).

" }, "PermissionModel":{ "shape":"PermissionModels", @@ -4590,7 +4645,7 @@ }, "OrganizationalUnitIds":{ "shape":"OrganizationalUnitIdList", - "documentation":"

[Service-managed permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets.

" + "documentation":"

[Service-managed permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets.

" } }, "documentation":"

A structure that contains information about a stack set. A stack set enables you to provision stacks into AWS accounts and across Regions by using a single CloudFormation template. In the stack set, you specify the template to use, as well as any parameters and capabilities that the template requires.

" @@ -4699,7 +4754,7 @@ }, "Status":{ "shape":"StackSetOperationStatus", - "documentation":"

The status of the operation.

" + "documentation":"

The status of the operation.

" }, "OperationPreferences":{ "shape":"StackSetOperationPreferences", @@ -4727,7 +4782,7 @@ }, "DeploymentTargets":{ "shape":"DeploymentTargets", - "documentation":"

[Service-managed permissions] The AWS Organizations accounts affected by the stack operation.

" + "documentation":"

[Service-managed permissions] The AWS Organizations accounts affected by the stack operation.

" }, "StackSetDriftDetectionDetails":{ "shape":"StackSetDriftDetectionDetails", @@ -4790,7 +4845,7 @@ "members":{ "Account":{ "shape":"Account", - "documentation":"

[Self-managed permissions] The name of the AWS account for this operation result.

" + "documentation":"

[Self-managed permissions] The name of the AWS account for this operation result.

" }, "Region":{ "shape":"Region", @@ -4810,7 +4865,7 @@ }, "OrganizationalUnitId":{ "shape":"OrganizationalUnitId", - "documentation":"

[Service-managed permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets.

" + "documentation":"

[Service-managed permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets.

" } }, "documentation":"

The structure that contains information about a specified operation's results for a given account in a given Region.

" @@ -4843,7 +4898,7 @@ }, "Status":{ "shape":"StackSetOperationStatus", - "documentation":"

The overall status of the operation.

" + "documentation":"

The overall status of the operation.

" }, "CreationTimestamp":{ "shape":"Timestamp", @@ -4888,7 +4943,7 @@ }, "AutoDeployment":{ "shape":"AutoDeployment", - "documentation":"

[Service-managed permissions] Describes whether StackSets automatically deploys to AWS Organizations accounts that are added to a target organizational unit (OU).

" + "documentation":"

[Service-managed permissions] Describes whether StackSets automatically deploys to AWS Organizations accounts that are added to a target organizational unit (OU).

" }, "PermissionModel":{ "shape":"PermissionModels", @@ -5034,6 +5089,10 @@ "OperationId":{ "shape":"ClientRequestToken", "documentation":"

The ID of the stack operation.

" + }, + "CallAs":{ + "shape":"CallAs", + "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

" } } }, @@ -5266,7 +5325,7 @@ }, "TemplateURL":{ "shape":"TemplateURL", - "documentation":"

Location of file containing the template body. The URL must point to a template that is located in an Amazon S3 bucket. For more information, go to Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must specify only one of the following parameters: TemplateBody, TemplateURL, or set the UsePreviousTemplate to true.

" + "documentation":"

Location of file containing the template body. The URL must point to a template that is located in an Amazon S3 bucket or a Systems Manager document. For more information, go to Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must specify only one of the following parameters: TemplateBody, TemplateURL, or set the UsePreviousTemplate to true.

" }, "UsePreviousTemplate":{ "shape":"UsePreviousTemplate", @@ -5336,11 +5395,11 @@ }, "Accounts":{ "shape":"AccountList", - "documentation":"

[Self-managed permissions] The names of one or more AWS accounts for which you want to update parameter values for stack instances. The overridden parameter values will be applied to all stack instances in the specified accounts and Regions.

You can specify Accounts or DeploymentTargets, but not both.

" + "documentation":"

[Self-managed permissions] The names of one or more AWS accounts for which you want to update parameter values for stack instances. The overridden parameter values will be applied to all stack instances in the specified accounts and Regions.

You can specify Accounts or DeploymentTargets, but not both.

" }, "DeploymentTargets":{ "shape":"DeploymentTargets", - "documentation":"

[Service-managed permissions] The AWS Organizations accounts for which you want to update parameter values for stack instances. If your update targets OUs, the overridden parameter values only apply to the accounts that are currently in the target OUs and their child OUs. Accounts added to the target OUs and their child OUs in the future won't use the overridden values.

You can specify Accounts or DeploymentTargets, but not both.

" + "documentation":"

[Service-managed permissions] The AWS Organizations accounts for which you want to update parameter values for stack instances. If your update targets OUs, the overridden parameter values only apply to the accounts that are currently in the target OUs and their child OUs. Accounts added to the target OUs and their child OUs in the future won't use the overridden values.

You can specify Accounts or DeploymentTargets, but not both.

" }, "Regions":{ "shape":"RegionList", @@ -5358,6 +5417,10 @@ "shape":"ClientRequestToken", "documentation":"

The unique identifier for this stack set operation.

The operation ID also functions as an idempotency token, to ensure that AWS CloudFormation performs the stack set operation only once, even if you retry the request multiple times. You might retry stack set operation requests to ensure that AWS CloudFormation successfully received them.

If you don't specify an operation ID, the SDK generates one automatically.

", "idempotencyToken":true + }, + "CallAs":{ + "shape":"CallAs", + "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

" } } }, @@ -5398,7 +5461,7 @@ }, "TemplateURL":{ "shape":"TemplateURL", - "documentation":"

The location of the file that contains the template body. The URL must point to a template (maximum size: 460,800 bytes) that is located in an Amazon S3 bucket. For more information, see Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must specify only one of the following parameters: TemplateBody or TemplateURL—or set UsePreviousTemplate to true.

" + "documentation":"

The location of the file that contains the template body. The URL must point to a template (maximum size: 460,800 bytes) that is located in an Amazon S3 bucket or a Systems Manager document. For more information, see Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must specify only one of the following parameters: TemplateBody or TemplateURL—or set UsePreviousTemplate to true.

" }, "UsePreviousTemplate":{ "shape":"UsePreviousTemplate", @@ -5430,7 +5493,7 @@ }, "DeploymentTargets":{ "shape":"DeploymentTargets", - "documentation":"

[Service-managed permissions] The AWS Organizations accounts in which to update associated stack instances.

To update all the stack instances associated with this stack set, do not specify DeploymentTargets or Regions.

If the stack set update includes changes to the template (that is, if TemplateBody or TemplateURL is specified), or the Parameters, AWS CloudFormation marks all stack instances with a status of OUTDATED prior to updating the stack instances in the specified accounts and Regions. If the stack set update does not include changes to the template or parameters, AWS CloudFormation updates the stack instances in the specified accounts and Regions, while leaving all other stack instances with their existing stack instance status.

" + "documentation":"

[Service-managed permissions] The AWS Organizations accounts in which to update associated stack instances.

To update all the stack instances associated with this stack set, do not specify DeploymentTargets or Regions.

If the stack set update includes changes to the template (that is, if TemplateBody or TemplateURL is specified), or the Parameters, AWS CloudFormation marks all stack instances with a status of OUTDATED prior to updating the stack instances in the specified accounts and Regions. If the stack set update does not include changes to the template or parameters, AWS CloudFormation updates the stack instances in the specified accounts and Regions, while leaving all other stack instances with their existing stack instance status.

" }, "PermissionModel":{ "shape":"PermissionModels", @@ -5438,7 +5501,7 @@ }, "AutoDeployment":{ "shape":"AutoDeployment", - "documentation":"

[Service-managed permissions] Describes whether StackSets automatically deploys to AWS Organizations accounts that are added to a target organization or organizational unit (OU).

If you specify AutoDeployment, do not specify DeploymentTargets or Regions.

" + "documentation":"

[Service-managed permissions] Describes whether StackSets automatically deploys to AWS Organizations accounts that are added to a target organization or organizational unit (OU).

If you specify AutoDeployment, do not specify DeploymentTargets or Regions.

" }, "OperationId":{ "shape":"ClientRequestToken", @@ -5447,11 +5510,15 @@ }, "Accounts":{ "shape":"AccountList", - "documentation":"

[Self-managed permissions] The accounts in which to update associated stack instances. If you specify accounts, you must also specify the Regions in which to update stack set instances.

To update all the stack instances associated with this stack set, do not specify the Accounts or Regions properties.

If the stack set update includes changes to the template (that is, if the TemplateBody or TemplateURL properties are specified), or the Parameters property, AWS CloudFormation marks all stack instances with a status of OUTDATED prior to updating the stack instances in the specified accounts and Regions. If the stack set update does not include changes to the template or parameters, AWS CloudFormation updates the stack instances in the specified accounts and Regions, while leaving all other stack instances with their existing stack instance status.

" + "documentation":"

[Self-managed permissions] The accounts in which to update associated stack instances. If you specify accounts, you must also specify the Regions in which to update stack set instances.

To update all the stack instances associated with this stack set, do not specify the Accounts or Regions properties.

If the stack set update includes changes to the template (that is, if the TemplateBody or TemplateURL properties are specified), or the Parameters property, AWS CloudFormation marks all stack instances with a status of OUTDATED prior to updating the stack instances in the specified accounts and Regions. If the stack set update does not include changes to the template or parameters, AWS CloudFormation updates the stack instances in the specified accounts and Regions, while leaving all other stack instances with their existing stack instance status.

" }, "Regions":{ "shape":"RegionList", "documentation":"

The Regions in which to update associated stack instances. If you specify Regions, you must also specify accounts in which to update stack set instances.

To update all the stack instances associated with this stack set, do not specify the Accounts or Regions properties.

If the stack set update includes changes to the template (that is, if the TemplateBody or TemplateURL properties are specified), or the Parameters property, AWS CloudFormation marks all stack instances with a status of OUTDATED prior to updating the stack instances in the specified accounts and Regions. If the stack set update does not include changes to the template or parameters, AWS CloudFormation updates the stack instances in the specified accounts and Regions, while leaving all other stack instances with their existing stack instance status.

" + }, + "CallAs":{ + "shape":"CallAs", + "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

" } } }, @@ -5502,7 +5569,7 @@ }, "TemplateURL":{ "shape":"TemplateURL", - "documentation":"

Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is located in an Amazon S3 bucket. For more information, go to Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used.

" + "documentation":"

Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is located in an Amazon S3 bucket or a Systems Manager document. For more information, go to Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used.

" } }, "documentation":"

The input for ValidateTemplate action.

" diff --git a/botocore/data/cloudtrail/2013-11-01/service-2.json b/botocore/data/cloudtrail/2013-11-01/service-2.json index 4baeb106..2f0e4c5e 100644 --- a/botocore/data/cloudtrail/2013-11-01/service-2.json +++ b/botocore/data/cloudtrail/2013-11-01/service-2.json @@ -91,7 +91,8 @@ {"shape":"UnsupportedOperationException"}, {"shape":"OperationNotPermittedException"}, {"shape":"NotOrganizationMasterAccountException"}, - {"shape":"InsufficientDependencyServiceAccessPermissionException"} + {"shape":"InsufficientDependencyServiceAccessPermissionException"}, + {"shape":"ConflictException"} ], "documentation":"

Deletes a trail. This operation must be called from the region in which the trail was created. DeleteTrail cannot be called on the shadow trails (replicated trails in other regions) of a trail that is enabled in all regions.

", "idempotent":true @@ -289,6 +290,8 @@ {"shape":"InvalidInsightSelectorsException"}, {"shape":"InsufficientS3BucketPolicyException"}, {"shape":"InsufficientEncryptionPolicyException"}, + {"shape":"S3BucketDoesNotExistException"}, + {"shape":"KmsException"}, {"shape":"UnsupportedOperationException"}, {"shape":"OperationNotPermittedException"}, {"shape":"NotOrganizationMasterAccountException"} @@ -446,7 +449,7 @@ "members":{ "Field":{ "shape":"SelectorField", - "documentation":"

A field in an event record on which to filter events to be logged. Supported fields include readOnly, eventCategory, eventSource (for management events), eventName, resources.type, and resources.ARN.

" + "documentation":"

A field in an event record on which to filter events to be logged. Supported fields include readOnly, eventCategory, eventSource (for management events), eventName, resources.type, and resources.ARN.

" }, "Equals":{ "shape":"Operator", @@ -510,6 +513,13 @@ "documentation":"

Cannot set a CloudWatch Logs delivery for this region.

", "exception":true }, + "ConflictException":{ + "type":"structure", + "members":{ + }, + "documentation":"

This exception is thrown when the specified resource is not ready for an operation. This can occur when you try to run an operation on a trail before CloudTrail has time to fully load the trail. If this exception occurs, wait a few minutes, and then try the operation again.

", + "exception":true + }, "CreateTrailRequest":{ "type":"structure", "required":[ @@ -629,7 +639,7 @@ "members":{ "Type":{ "shape":"String", - "documentation":"

The resource type in which you want to log data events. You can specify AWS::S3::Object or AWS::Lambda::Function resources.

" + "documentation":"

The resource type in which you want to log data events. You can specify AWS::S3::Object or AWS::Lambda::Function resources.

The AWS::S3Outposts::Object resource type is not valid in basic event selectors. To log data events on this resource type, use advanced event selectors.

" }, "Values":{ "shape":"DataResourceValues", diff --git a/botocore/data/codeartifact/2018-09-22/service-2.json b/botocore/data/codeartifact/2018-09-22/service-2.json index 43b491ad..1c7e506f 100644 --- a/botocore/data/codeartifact/2018-09-22/service-2.json +++ b/botocore/data/codeartifact/2018-09-22/service-2.json @@ -139,7 +139,7 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Deletes one or more versions of a package. A deleted package version cannot be restored in your repository. If you want to remove a package version from your repository and be able to restore it later, set its status to Archived. Archived packages cannot be downloaded from a repository and don't show up with list package APIs (for example, ListackageVersions ), but you can restore them using UpdatePackageVersionsStatus .

" + "documentation":"

Deletes one or more versions of a package. A deleted package version cannot be restored in your repository. If you want to remove a package version from your repository and be able to restore it later, set its status to Archived. Archived packages cannot be downloaded from a repository and don't show up with list package APIs (for example, ListackageVersions), but you can restore them using UpdatePackageVersionsStatus.

" }, "DeleteRepository":{ "name":"DeleteRepository", @@ -350,7 +350,7 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns the endpoint of a repository for a specific package format. A repository has one endpoint for each package format:

" + "documentation":"

Returns the endpoint of a repository for a specific package format. A repository has one endpoint for each package format:

" }, "GetRepositoryPermissionsPolicy":{ "name":"GetRepositoryPermissionsPolicy", @@ -383,7 +383,7 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns a list of DomainSummary objects for all domains owned by the AWS account that makes this call. Each returned DomainSummary object contains information about a domain.

" + "documentation":"

Returns a list of DomainSummary objects for all domains owned by the AWS account that makes this call. Each returned DomainSummary object contains information about a domain.

" }, "ListPackageVersionAssets":{ "name":"ListPackageVersionAssets", @@ -700,7 +700,7 @@ }, "externalConnection":{ "shape":"ExternalConnectionName", - "documentation":"

The name of the external connection to add to the repository. The following values are supported:

", + "documentation":"

The name of the external connection to add to the repository. The following values are supported:

", "location":"querystring", "locationName":"external-connection" } @@ -775,13 +775,13 @@ }, "format":{ "shape":"PackageFormat", - "documentation":"

The format of the package that is copied. The valid package types are:

", + "documentation":"

The format of the package that is copied. The valid package types are:

", "location":"querystring", "locationName":"format" }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

", "location":"querystring", "locationName":"namespace" }, @@ -860,7 +860,7 @@ "members":{ "domain":{ "shape":"DomainName", - "documentation":"

The domain that contains the created repository.

", + "documentation":"

The name of the domain that contains the created repository.

", "location":"querystring", "locationName":"domain" }, @@ -989,13 +989,13 @@ }, "format":{ "shape":"PackageFormat", - "documentation":"

The format of the package versions to delete. The valid values are:

", + "documentation":"

The format of the package versions to delete. The valid values are:

", "location":"querystring", "locationName":"format" }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

", "location":"querystring", "locationName":"namespace" }, @@ -1160,13 +1160,13 @@ }, "format":{ "shape":"PackageFormat", - "documentation":"

A format that specifies the type of the requested package version. The valid values are:

", + "documentation":"

A format that specifies the type of the requested package version. The valid values are:

", "location":"querystring", "locationName":"format" }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

", "location":"querystring", "locationName":"namespace" }, @@ -1190,7 +1190,7 @@ "members":{ "packageVersion":{ "shape":"PackageVersionDescription", - "documentation":"

A PackageVersionDescription object that contains information about the requested package version.

" + "documentation":"

A PackageVersionDescription object that contains information about the requested package version.

" } } }, @@ -1308,13 +1308,13 @@ }, "format":{ "shape":"PackageFormat", - "documentation":"

A format that specifies the type of package versions you want to dispose. The valid values are:

", + "documentation":"

A format that specifies the type of package versions you want to dispose. The valid values are:

", "location":"querystring", "locationName":"format" }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

", "location":"querystring", "locationName":"namespace" }, @@ -1526,7 +1526,7 @@ "members":{ "domain":{ "shape":"DomainName", - "documentation":"

The domain that contains the repository that contains the package version with the requested asset.

", + "documentation":"

The name of the domain that contains the repository that contains the package version with the requested asset.

", "location":"querystring", "locationName":"domain" }, @@ -1544,13 +1544,13 @@ }, "format":{ "shape":"PackageFormat", - "documentation":"

A format that specifies the type of the package version with the requested asset file. The valid values are:

", + "documentation":"

A format that specifies the type of the package version with the requested asset file. The valid values are:

", "location":"querystring", "locationName":"format" }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

", "location":"querystring", "locationName":"namespace" }, @@ -1638,13 +1638,13 @@ }, "format":{ "shape":"PackageFormat", - "documentation":"

A format that specifies the type of the package version with the requested readme file. The valid values are:

", + "documentation":"

A format that specifies the type of the package version with the requested readme file. The valid values are:

", "location":"querystring", "locationName":"format" }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

", "location":"querystring", "locationName":"namespace" }, @@ -1667,11 +1667,11 @@ "members":{ "format":{ "shape":"PackageFormat", - "documentation":"

The format of the package with the requested readme file. Valid format types are:

" + "documentation":"

The format of the package with the requested readme file. Valid format types are:

" }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

" + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

" }, "package":{ "shape":"PackageName", @@ -1719,7 +1719,7 @@ }, "format":{ "shape":"PackageFormat", - "documentation":"

Returns which endpoint of a repository to return. A repository has one endpoint for each package format:

", + "documentation":"

Returns which endpoint of a repository to return. A repository has one endpoint for each package format:

", "location":"querystring", "locationName":"format" } @@ -1838,7 +1838,7 @@ "members":{ "domains":{ "shape":"DomainSummaryList", - "documentation":"

The returned list of DomainSummary objects.

" + "documentation":"

The returned list of DomainSummary objects.

" }, "nextToken":{ "shape":"PaginationToken", @@ -1881,13 +1881,13 @@ }, "format":{ "shape":"PackageFormat", - "documentation":"

The format of the package that contains the returned package version assets. The valid package types are:

", + "documentation":"

The format of the package that contains the returned package version assets. The valid package types are:

", "location":"querystring", "locationName":"format" }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

", "location":"querystring", "locationName":"namespace" }, @@ -1926,7 +1926,7 @@ }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

" + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

" }, "package":{ "shape":"PackageName", @@ -1962,7 +1962,7 @@ "members":{ "domain":{ "shape":"DomainName", - "documentation":"

The domain that contains the repository that contains the requested package version dependencies.

", + "documentation":"

The name of the domain that contains the repository that contains the requested package version dependencies.

", "location":"querystring", "locationName":"domain" }, @@ -1980,13 +1980,13 @@ }, "format":{ "shape":"PackageFormat", - "documentation":"

The format of the package with the requested dependencies. The valid package types are:

", + "documentation":"

The format of the package with the requested dependencies. The valid package types are:

", "location":"querystring", "locationName":"format" }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

", "location":"querystring", "locationName":"namespace" }, @@ -2015,11 +2015,11 @@ "members":{ "format":{ "shape":"PackageFormat", - "documentation":"

A format that specifies the type of the package that contains the returned dependencies. The valid values are:

" + "documentation":"

A format that specifies the type of the package that contains the returned dependencies. The valid values are:

" }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

" + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

" }, "package":{ "shape":"PackageName", @@ -2077,13 +2077,13 @@ }, "format":{ "shape":"PackageFormat", - "documentation":"

The format of the returned packages. The valid package types are:

", + "documentation":"

The format of the returned packages. The valid package types are:

", "location":"querystring", "locationName":"format" }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

", "location":"querystring", "locationName":"namespace" }, @@ -2128,11 +2128,11 @@ }, "format":{ "shape":"PackageFormat", - "documentation":"

A format of the package. Valid package format values are:

" + "documentation":"

A format of the package. Valid package format values are:

" }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

" + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

" }, "package":{ "shape":"PackageName", @@ -2162,7 +2162,7 @@ "members":{ "domain":{ "shape":"DomainName", - "documentation":"

The domain that contains the repository that contains the requested list of packages.

", + "documentation":"

The name of the domain that contains the repository that contains the requested list of packages.

", "location":"querystring", "locationName":"domain" }, @@ -2180,13 +2180,13 @@ }, "format":{ "shape":"PackageFormat", - "documentation":"

The format of the packages. The valid package types are:

", + "documentation":"

The format of the packages. The valid package types are:

", "location":"querystring", "locationName":"format" }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

", "location":"querystring", "locationName":"namespace" }, @@ -2352,7 +2352,7 @@ "members":{ "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

" + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

" }, "package":{ "shape":"PackageName", @@ -2399,11 +2399,11 @@ "members":{ "format":{ "shape":"PackageFormat", - "documentation":"

The format of the package. Valid values are:

" + "documentation":"

The format of the package. Valid values are:

" }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

" + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

" }, "package":{ "shape":"PackageName", @@ -2427,11 +2427,11 @@ "members":{ "format":{ "shape":"PackageFormat", - "documentation":"

The format of the package version. The valid package formats are:

" + "documentation":"

The format of the package version. The valid package formats are:

" }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

" + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

" }, "packageName":{ "shape":"PackageName", @@ -2706,7 +2706,7 @@ }, "packageFormat":{ "shape":"PackageFormat", - "documentation":"

The package format associated with a repository's external connection. The valid package formats are:

" + "documentation":"

The package format associated with a repository's external connection. The valid package formats are:

" }, "status":{ "shape":"ExternalConnectionStatus", @@ -2967,7 +2967,7 @@ "members":{ "domain":{ "shape":"DomainName", - "documentation":"

The domain that contains the repository that contains the package versions with a status to be updated.

", + "documentation":"

The name of the domain that contains the repository that contains the package versions with a status to be updated.

", "location":"querystring", "locationName":"domain" }, @@ -2985,13 +2985,13 @@ }, "format":{ "shape":"PackageFormat", - "documentation":"

A format that specifies the type of the package with the statuses to update. The valid values are:

", + "documentation":"

A format that specifies the type of the package with the statuses to update. The valid values are:

", "location":"querystring", "locationName":"format" }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

", "location":"querystring", "locationName":"namespace" }, @@ -3130,5 +3130,5 @@ ] } }, - "documentation":"

AWS CodeArtifact is a fully managed artifact repository compatible with language-native package managers and build tools such as npm, Apache Maven, NuGet, and pip. You can use CodeArtifact to share packages with development teams and pull packages. Packages can be pulled from both public and CodeArtifact repositories. You can also create an upstream relationship between a CodeArtifact repository and another repository, which effectively merges their contents from the point of view of a package manager client.

AWS CodeArtifact Components

Use the information in this guide to help you work with the following CodeArtifact components:

CodeArtifact supports these operations:

" + "documentation":"

AWS CodeArtifact is a fully managed artifact repository compatible with language-native package managers and build tools such as npm, Apache Maven, and pip. You can use CodeArtifact to share packages with development teams and pull packages. Packages can be pulled from both public and CodeArtifact repositories. You can also create an upstream relationship between a CodeArtifact repository and another repository, which effectively merges their contents from the point of view of a package manager client.

AWS CodeArtifact Components

Use the information in this guide to help you work with the following CodeArtifact components:

CodeArtifact supports these operations:

" } diff --git a/botocore/data/codebuild/2016-10-06/service-2.json b/botocore/data/codebuild/2016-10-06/service-2.json index 208512ea..31dd2cd9 100644 --- a/botocore/data/codebuild/2016-10-06/service-2.json +++ b/botocore/data/codebuild/2016-10-06/service-2.json @@ -268,7 +268,8 @@ "errors":[ {"shape":"InvalidInputException"}, {"shape":"ResourceNotFoundException"} - ] + ], + "documentation":"

Analyzes and accumulates test report values for the specified test reports.

" }, "GetResourcePolicy":{ "name":"GetResourcePolicy", @@ -365,7 +366,7 @@ {"shape":"InvalidInputException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Gets a list of build IDs for the specified build project, with each build ID representing a single build.

" + "documentation":"

Gets a list of build identifiers for the specified build project, with each build identifier representing a single build.

" }, "ListCuratedEnvironmentImages":{ "name":"ListCuratedEnvironmentImages", @@ -841,7 +842,7 @@ }, "resolvedSourceVersion":{ "shape":"NonEmptyString", - "documentation":"

An identifier for the version of this build's source code.

" + "documentation":"

An identifier for the version of this build's source code.

" }, "projectName":{ "shape":"NonEmptyString", @@ -861,7 +862,7 @@ }, "secondarySourceVersions":{ "shape":"ProjectSecondarySourceVersions", - "documentation":"

An array of ProjectSourceVersion objects. Each ProjectSourceVersion must be one of:

" + "documentation":"

An array of ProjectSourceVersion objects. Each ProjectSourceVersion must be one of:

" }, "artifacts":{ "shape":"BuildArtifacts", @@ -1007,7 +1008,7 @@ }, "resolvedSourceVersion":{ "shape":"NonEmptyString", - "documentation":"

The identifier of the resolved version of this batch build's source code.

" + "documentation":"

The identifier of the resolved version of this batch build's source code.

" }, "projectName":{ "shape":"NonEmptyString", @@ -1024,7 +1025,7 @@ }, "secondarySourceVersions":{ "shape":"ProjectSecondarySourceVersions", - "documentation":"

An array of ProjectSourceVersion objects. Each ProjectSourceVersion must be one of:

" + "documentation":"

An array of ProjectSourceVersion objects. Each ProjectSourceVersion must be one of:

" }, "artifacts":{ "shape":"BuildArtifacts", @@ -1074,6 +1075,10 @@ "buildGroups":{ "shape":"BuildGroups", "documentation":"

An array of BuildGroup objects that define the build groups for the batch build.

" + }, + "debugSessionEnabled":{ + "shape":"WrapperBoolean", + "documentation":"

Specifies if session debugging is enabled for this batch build. For more information, see Viewing a running build in Session Manager. Batch session debugging is not supported for matrix batch builds.

" } }, "documentation":"

Contains information about a batch build.

" @@ -1119,7 +1124,7 @@ }, "contexts":{ "shape":"PhaseContexts", - "documentation":"

Additional information about the batch build phase. Especially to help troubleshoot a failed btach build.

" + "documentation":"

Additional information about the batch build phase. Especially to help troubleshoot a failed batch build.

" } }, "documentation":"

Contains information about a stage for a batch build.

" @@ -1458,7 +1463,7 @@ }, "sourceVersion":{ "shape":"String", - "documentation":"

A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of:

If sourceVersion is specified at the build level, then that version takes precedence over this sourceVersion (at the project level).

For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.

" + "documentation":"

A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of:

If sourceVersion is specified at the build level, then that version takes precedence over this sourceVersion (at the project level).

For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.

" }, "secondarySourceVersions":{ "shape":"ProjectSecondarySourceVersions", @@ -1494,7 +1499,7 @@ }, "encryptionKey":{ "shape":"NonEmptyString", - "documentation":"

The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build output artifacts.

You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>).

" + "documentation":"

The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build output artifacts.

You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>).

" }, "tags":{ "shape":"TagList", @@ -1519,6 +1524,10 @@ "buildBatchConfig":{ "shape":"ProjectBuildBatchConfig", "documentation":"

A ProjectBuildBatchConfig object that defines the batch build options for the project.

" + }, + "concurrentBuildLimit":{ + "shape":"WrapperInt", + "documentation":"

The maximum number of concurrent builds that are allowed for this project.

New builds are only started if the current number of builds is less than or equal to this limit. If the current build count meets this limit, new builds are throttled and are not run.

" } } }, @@ -1961,16 +1970,31 @@ "trendField" ], "members":{ - "reportGroupArn":{"shape":"NonEmptyString"}, - "numOfReports":{"shape":"PageSize"}, - "trendField":{"shape":"ReportGroupTrendFieldType"} + "reportGroupArn":{ + "shape":"NonEmptyString", + "documentation":"

The ARN of the report group that contains the reports to analyze.

" + }, + "numOfReports":{ + "shape":"PageSize", + "documentation":"

The number of reports to analyze. This operation always retrieves the most recent reports.

If this parameter is omitted, the most recent 100 reports are analyzed.

" + }, + "trendField":{ + "shape":"ReportGroupTrendFieldType", + "documentation":"

The test report value to accumulate. This must be one of the following values:

Test reports:
DURATION

Accumulate the test run times for the specified reports.

PASS_RATE

Accumulate the percentage of tests that passed for the specified test reports.

TOTAL

Accumulate the total number of tests for the specified test reports.

Code coverage reports:
BRANCH_COVERAGE

Accumulate the branch coverage percentages for the specified test reports.

BRANCHES_COVERED

Accumulate the branches covered values for the specified test reports.

BRANCHES_MISSED

Accumulate the branches missed values for the specified test reports.

LINE_COVERAGE

Accumulate the line coverage percentages for the specified test reports.

LINES_COVERED

Accumulate the lines covered values for the specified test reports.

LINES_MISSED

Accumulate the lines not covered values for the specified test reports.

" + } } }, "GetReportGroupTrendOutput":{ "type":"structure", "members":{ - "stats":{"shape":"ReportGroupTrendStats"}, - "rawData":{"shape":"ReportGroupTrendRawDataList"} + "stats":{ + "shape":"ReportGroupTrendStats", + "documentation":"

Contains the accumulated trend data.

" + }, + "rawData":{ + "shape":"ReportGroupTrendRawDataList", + "documentation":"

An array that contains the raw data for each report.

" + } } }, "GetResourcePolicyInput":{ @@ -2186,7 +2210,7 @@ }, "sortOrder":{ "shape":"SortOrderType", - "documentation":"

The order to list build IDs. Valid values include:

" + "documentation":"

The order to list results in. The results are sorted by build number, not the build identifier.

Valid values include:

If the project has more than 100 builds, setting the sort order will result in an error.

" }, "nextToken":{ "shape":"String", @@ -2616,7 +2640,7 @@ }, "sourceVersion":{ "shape":"String", - "documentation":"

A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of:

If sourceVersion is specified at the build level, then that version takes precedence over this sourceVersion (at the project level).

For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.

" + "documentation":"

A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of:

If sourceVersion is specified at the build level, then that version takes precedence over this sourceVersion (at the project level).

For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.

" }, "secondarySourceVersions":{ "shape":"ProjectSecondarySourceVersions", @@ -2652,7 +2676,7 @@ }, "encryptionKey":{ "shape":"NonEmptyString", - "documentation":"

The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build output artifacts.

You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>).

" + "documentation":"

The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build output artifacts.

You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>).

" }, "tags":{ "shape":"TagList", @@ -2689,6 +2713,10 @@ "buildBatchConfig":{ "shape":"ProjectBuildBatchConfig", "documentation":"

A ProjectBuildBatchConfig object that defines the batch build options for the project.

" + }, + "concurrentBuildLimit":{ + "shape":"WrapperInt", + "documentation":"

The maximum number of concurrent builds that are allowed for this project.

New builds are only started if the current number of builds is less than or equal to this limit. If the current build count meets this limit, new builds are throttled and are not run.

" } }, "documentation":"

Information about a build project.

" @@ -2705,7 +2733,7 @@ "members":{ "type":{ "shape":"ArtifactsType", - "documentation":"

The type of build output artifact. Valid values include:

" + "documentation":"

The type of build output artifact. Valid values include:

" }, "location":{ "shape":"String", @@ -2733,7 +2761,7 @@ }, "encryptionDisabled":{ "shape":"WrapperBoolean", - "documentation":"

Set to true if you do not want your output artifacts encrypted. This option is valid only if your artifacts type is Amazon Simple Storage Service (Amazon S3). If this is set with another artifacts type, an invalidInputException is thrown.

" + "documentation":"

Set to true if you do not want your output artifacts encrypted. This option is valid only if your artifacts type is Amazon S3. If this is set with another artifacts type, an invalidInputException is thrown.

" }, "artifactIdentifier":{ "shape":"String", @@ -2757,7 +2785,7 @@ }, "badgeRequestUrl":{ "shape":"String", - "documentation":"

The publicly-accessible URL through which you can access the build badge for your project.

The publicly accessible URL through which you can access the build badge for your project.

" + "documentation":"

The publicly-accessible URL through which you can access the build badge for your project.

" } }, "documentation":"

Information about the build badge for the build project.

" @@ -2842,7 +2870,7 @@ }, "certificate":{ "shape":"String", - "documentation":"

The ARN of the Amazon Simple Storage Service (Amazon S3) bucket, path prefix, and object key that contains the PEM-encoded certificate for the build project. For more information, see certificate in the AWS CodeBuild User Guide.

" + "documentation":"

The ARN of the Amazon S3 bucket, path prefix, and object key that contains the PEM-encoded certificate for the build project. For more information, see certificate in the AWS CodeBuild User Guide.

" }, "registryCredential":{ "shape":"RegistryCredential", @@ -2917,11 +2945,11 @@ "members":{ "type":{ "shape":"SourceType", - "documentation":"

The type of repository that contains the source code to be built. Valid values include:

" + "documentation":"

The type of repository that contains the source code to be built. Valid values include:

" }, "location":{ "shape":"String", - "documentation":"

Information about the location of the source code to be built. Valid values include:

" + "documentation":"

Information about the location of the source code to be built. Valid values include:

" }, "gitCloneDepth":{ "shape":"GitCloneDepth", @@ -2941,7 +2969,7 @@ }, "reportBuildStatus":{ "shape":"WrapperBoolean", - "documentation":"

Set to true to report the status of a build's start and finish to your source provider. This option is valid only when your source provider is GitHub, GitHub Enterprise, or Bitbucket. If this is set and you use a different source provider, an invalidInputException is thrown.

The status of a build triggered by a webhook is always reported to your source provider.

" + "documentation":"

Set to true to report the status of a build's start and finish to your source provider. This option is valid only when your source provider is GitHub, GitHub Enterprise, or Bitbucket. If this is set and you use a different source provider, an invalidInputException is thrown.

To be able to report the build status to the source provider, the user associated with the source provider must have write access to the repo. If the user does not have write access, the build status cannot be updated. For more information, see Source provider access in the AWS CodeBuild User Guide.

The status of a build triggered by a webhook is always reported to your source provider.

" }, "buildStatusConfig":{ "shape":"BuildStatusConfig", @@ -2953,7 +2981,7 @@ }, "sourceIdentifier":{ "shape":"String", - "documentation":"

An identifier for this project source.

" + "documentation":"

An identifier for this project source. The identifier can only contain alphanumeric characters and underscores, and must be less than 128 characters in length.

" } }, "documentation":"

Information about the build input source code for the build project.

" @@ -2967,11 +2995,11 @@ "members":{ "sourceIdentifier":{ "shape":"String", - "documentation":"

An identifier for a source in the build project.

" + "documentation":"

An identifier for a source in the build project. The identifier can only contain alphanumeric characters and underscores, and must be less than 128 characters in length.

" }, "sourceVersion":{ "shape":"String", - "documentation":"

The source version for the corresponding source identifier. If specified, must be one of:

For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.

" + "documentation":"

The source version for the corresponding source identifier. If specified, must be one of:

For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.

" } }, "documentation":"

A source identifier and its corresponding version.

" @@ -3133,35 +3161,38 @@ "members":{ "arn":{ "shape":"NonEmptyString", - "documentation":"

The ARN of a ReportGroup.

" + "documentation":"

The ARN of the ReportGroup.

" }, "name":{ "shape":"ReportGroupName", - "documentation":"

The name of a ReportGroup.

" + "documentation":"

The name of the ReportGroup.

" }, "type":{ "shape":"ReportType", - "documentation":"

The type of the ReportGroup. The one valid value is TEST.

" + "documentation":"

The type of the ReportGroup. This can be one of the following values:

CODE_COVERAGE

The report group contains code coverage reports.

TEST

The report group contains test reports.

" }, "exportConfig":{ "shape":"ReportExportConfig", - "documentation":"

Information about the destination where the raw data of this ReportGroup is exported.

" + "documentation":"

Information about the destination where the raw data of this ReportGroup is exported.

" }, "created":{ "shape":"Timestamp", - "documentation":"

The date and time this ReportGroup was created.

" + "documentation":"

The date and time this ReportGroup was created.

" }, "lastModified":{ "shape":"Timestamp", - "documentation":"

The date and time this ReportGroup was last modified.

" + "documentation":"

The date and time this ReportGroup was last modified.

" }, "tags":{ "shape":"TagList", - "documentation":"

A list of tag key and value pairs associated with this report group.

These tags are available for use by AWS services that support AWS CodeBuild report group tags.

" + "documentation":"

A list of tag key and value pairs associated with this report group.

These tags are available for use by AWS services that support AWS CodeBuild report group tags.

" }, - "status":{"shape":"ReportGroupStatusType"} + "status":{ + "shape":"ReportGroupStatusType", + "documentation":"

The status of the report group. This property is read-only.

This can be one of the following values:

ACTIVE

The report group is active.

DELETING

The report group is in the process of being deleted.

" + } }, - "documentation":"

A series of reports. Each report contains information about the results from running a series of test cases. You specify the test cases for a report group in the buildspec for a build project using one or more paths to the test case files.

" + "documentation":"

A series of reports. Each report contains information about the results from running a series of test cases. You specify the test cases for a report group in the buildspec for a build project using one or more paths to the test case files.

" }, "ReportGroupArns":{ "type":"list", @@ -3210,10 +3241,20 @@ "ReportGroupTrendStats":{ "type":"structure", "members":{ - "average":{"shape":"String"}, - "max":{"shape":"String"}, - "min":{"shape":"String"} - } + "average":{ + "shape":"String", + "documentation":"

Contains the average of all values analyzed.

" + }, + "max":{ + "shape":"String", + "documentation":"

Contains the maximum value analyzed.

" + }, + "min":{ + "shape":"String", + "documentation":"

Contains the minimum value analyzed.

" + } + }, + "documentation":"

Contains trend statistics for a set of reports. The actual values depend on the type of trend being collected. For more information, see .

" }, "ReportGroups":{ "type":"list", @@ -3253,9 +3294,16 @@ "ReportWithRawData":{ "type":"structure", "members":{ - "reportArn":{"shape":"NonEmptyString"}, - "data":{"shape":"String"} - } + "reportArn":{ + "shape":"NonEmptyString", + "documentation":"

The ARN of the report.

" + }, + "data":{ + "shape":"String", + "documentation":"

The value of the requested data field from the report.

" + } + }, + "documentation":"

Contains the unmodified data for the report. For more information, see .

" }, "Reports":{ "type":"list", @@ -3279,7 +3327,7 @@ "documentation":"

The identifier of the artifact.

" } }, - "documentation":"

Represents a resolved build artifact. A resolve artifact is an artifact that is built and deployed to the destination, such as Amazon Simple Storage Service (Amazon S3).

" + "documentation":"

Represents a resolved build artifact. A resolve artifact is an artifact that is built and deployed to the destination, such as Amazon S3.

" }, "ResolvedSecondaryArtifacts":{ "type":"list", @@ -3374,6 +3422,10 @@ "shape":"NonEmptyString", "documentation":"

The name of the S3 bucket where the raw data of a report are exported.

" }, + "bucketOwner":{ + "shape":"String", + "documentation":"

The AWS account identifier of the owner of the Amazon S3 bucket. This allows report data to be exported to an Amazon S3 bucket that is owned by an account other than the account running the build.

" + }, "path":{ "shape":"String", "documentation":"

The path to the exported report's raw data results.

" @@ -3496,7 +3548,7 @@ }, "sourceVersion":{ "shape":"String", - "documentation":"

The version of the batch build input to be built, for this build only. If not specified, the latest version is used. If specified, the contents depends on the source provider:

AWS CodeCommit

The commit ID, branch, or Git tag to use.

GitHub

The commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

Bitbucket

The commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

Amazon Simple Storage Service (Amazon S3)

The version ID of the object that represents the build input ZIP file to use.

If sourceVersion is specified at the project level, then this sourceVersion (at the build level) takes precedence.

For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.

" + "documentation":"

The version of the batch build input to be built, for this build only. If not specified, the latest version is used. If specified, the contents depends on the source provider:

AWS CodeCommit

The commit ID, branch, or Git tag to use.

GitHub

The commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

Bitbucket

The commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

Amazon S3

The version ID of the object that represents the build input ZIP file to use.

If sourceVersion is specified at the project level, then this sourceVersion (at the build level) takes precedence.

For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.

" }, "artifactsOverride":{ "shape":"ProjectArtifacts", @@ -3601,6 +3653,10 @@ "buildBatchConfigOverride":{ "shape":"ProjectBuildBatchConfig", "documentation":"

A BuildBatchConfigOverride object that contains batch build configuration overrides.

" + }, + "debugSessionEnabled":{ + "shape":"WrapperBoolean", + "documentation":"

Specifies if session debugging is enabled for this batch build. For more information, see Viewing a running build in Session Manager. Batch session debugging is not supported for matrix batch builds.

" } } }, @@ -3631,7 +3687,7 @@ }, "sourceVersion":{ "shape":"String", - "documentation":"

The version of the build input to be built, for this build only. If not specified, the latest version is used. If specified, the contents depends on the source provider:

AWS CodeCommit

The commit ID, branch, or Git tag to use.

GitHub

The commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

Bitbucket

The commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

Amazon Simple Storage Service (Amazon S3)

The version ID of the object that represents the build input ZIP file to use.

If sourceVersion is specified at the project level, then this sourceVersion (at the build level) takes precedence.

For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.

" + "documentation":"

The version of the build input to be built, for this build only. If not specified, the latest version is used. If specified, the contents depends on the source provider:

AWS CodeCommit

The commit ID, branch, or Git tag to use.

GitHub

The commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

Bitbucket

The commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

Amazon S3

The version ID of the object that represents the build input ZIP file to use.

If sourceVersion is specified at the project level, then this sourceVersion (at the build level) takes precedence.

For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.

" }, "artifactsOverride":{ "shape":"ProjectArtifacts", @@ -3675,7 +3731,7 @@ }, "reportBuildStatusOverride":{ "shape":"WrapperBoolean", - "documentation":"

Set to true to report to your source provider the status of a build's start and completion. If you use this option with a source provider other than GitHub, GitHub Enterprise, or Bitbucket, an invalidInputException is thrown.

The status of a build triggered by a webhook is always reported to your source provider.

" + "documentation":"

Set to true to report to your source provider the status of a build's start and completion. If you use this option with a source provider other than GitHub, GitHub Enterprise, or Bitbucket, an invalidInputException is thrown.

To be able to report the build status to the source provider, the user associated with the source provider must have write access to the repo. If the user does not have write access, the build status cannot be updated. For more information, see Source provider access in the AWS CodeBuild User Guide.

The status of a build triggered by a webhook is always reported to your source provider.

" }, "buildStatusConfigOverride":{ "shape":"BuildStatusConfig", @@ -3931,7 +3987,7 @@ }, "sourceVersion":{ "shape":"String", - "documentation":"

A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of:

If sourceVersion is specified at the build level, then that version takes precedence over this sourceVersion (at the project level).

For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.

" + "documentation":"

A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of:

If sourceVersion is specified at the build level, then that version takes precedence over this sourceVersion (at the project level).

For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.

" }, "secondarySourceVersions":{ "shape":"ProjectSecondarySourceVersions", @@ -3967,7 +4023,7 @@ }, "encryptionKey":{ "shape":"NonEmptyString", - "documentation":"

The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build output artifacts.

You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>).

" + "documentation":"

The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build output artifacts.

You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>).

" }, "tags":{ "shape":"TagList", @@ -3989,7 +4045,11 @@ "shape":"ProjectFileSystemLocations", "documentation":"

An array of ProjectFileSystemLocation objects for a CodeBuild build project. A ProjectFileSystemLocation object specifies the identifier, location, mountOptions, mountPoint, and type of a file system created using Amazon Elastic File System.

" }, - "buildBatchConfig":{"shape":"ProjectBuildBatchConfig"} + "buildBatchConfig":{"shape":"ProjectBuildBatchConfig"}, + "concurrentBuildLimit":{ + "shape":"WrapperInt", + "documentation":"

The maximum number of concurrent builds that are allowed for this project.

New builds are only started if the current number of builds is less than or equal to this limit. If the current build count meets this limit, new builds are throttled and are not run.

To remove this limit, set this value to -1.

" + } } }, "UpdateProjectOutput":{ @@ -4165,5 +4225,5 @@ "WrapperInt":{"type":"integer"}, "WrapperLong":{"type":"long"} }, - "documentation":"AWS CodeBuild

AWS CodeBuild is a fully managed build service in the cloud. AWS CodeBuild compiles your source code, runs unit tests, and produces artifacts that are ready to deploy. AWS CodeBuild eliminates the need to provision, manage, and scale your own build servers. It provides prepackaged build environments for the most popular programming languages and build tools, such as Apache Maven, Gradle, and more. You can also fully customize build environments in AWS CodeBuild to use your own build tools. AWS CodeBuild scales automatically to meet peak build requests. You pay only for the build time you consume. For more information about AWS CodeBuild, see the AWS CodeBuild User Guide.

AWS CodeBuild supports these operations:

" + "documentation":"AWS CodeBuild

AWS CodeBuild is a fully managed build service in the cloud. AWS CodeBuild compiles your source code, runs unit tests, and produces artifacts that are ready to deploy. AWS CodeBuild eliminates the need to provision, manage, and scale your own build servers. It provides prepackaged build environments for the most popular programming languages and build tools, such as Apache Maven, Gradle, and more. You can also fully customize build environments in AWS CodeBuild to use your own build tools. AWS CodeBuild scales automatically to meet peak build requests. You pay only for the build time you consume. For more information about AWS CodeBuild, see the AWS CodeBuild User Guide.

" } diff --git a/botocore/data/codedeploy/2014-10-06/service-2.json b/botocore/data/codedeploy/2014-10-06/service-2.json index be66e861..82bf32b4 100644 --- a/botocore/data/codedeploy/2014-10-06/service-2.json +++ b/botocore/data/codedeploy/2014-10-06/service-2.json @@ -1535,6 +1535,10 @@ "shape":"AutoRollbackConfiguration", "documentation":"

Configuration information for an automatic rollback that is added when a deployment group is created.

" }, + "outdatedInstancesStrategy":{ + "shape":"OutdatedInstancesStrategy", + "documentation":"

Indicates what happens when new EC2 instances are launched mid-deployment and do not receive the deployed application revision.

If this option is set to UPDATE or is unspecified, CodeDeploy initiates one or more 'auto-update outdated instances' deployments to apply the deployed application revision to the new EC2 instances.

If this option is set to IGNORE, CodeDeploy does not initiate a deployment to update the new EC2 instances. This may result in instances having different revisions.

" + }, "deploymentStyle":{ "shape":"DeploymentStyle", "documentation":"

Information about the type of deployment, in-place or blue/green, that you want to run and whether to route deployment traffic behind a load balancer.

" @@ -1813,6 +1817,7 @@ "autoscaling", "codeDeployRollback", "CodeDeploy", + "CodeDeployAutoUpdate", "CloudFormation", "CloudFormationRollback" ] @@ -1894,6 +1899,10 @@ "shape":"DeploymentStyle", "documentation":"

Information about the type of deployment, either in-place or blue/green, you want to run and whether to route deployment traffic behind a load balancer.

" }, + "outdatedInstancesStrategy":{ + "shape":"OutdatedInstancesStrategy", + "documentation":"

Indicates what happens when new EC2 instances are launched mid-deployment and do not receive the deployed application revision.

If this option is set to UPDATE or is unspecified, CodeDeploy initiates one or more 'auto-update outdated instances' deployments to apply the deployed application revision to the new EC2 instances.

If this option is set to IGNORE, CodeDeploy does not initiate a deployment to update the new EC2 instances. This may result in instances having different revisions.

" + }, "blueGreenDeploymentConfiguration":{ "shape":"BlueGreenDeploymentConfiguration", "documentation":"

Information about blue/green deployment options for a deployment group.

" @@ -2021,7 +2030,7 @@ }, "creator":{ "shape":"DeploymentCreator", - "documentation":"

The means by which the deployment was created:

" + "documentation":"

The means by which the deployment was created:

" }, "ignoreApplicationStopFailures":{ "shape":"Boolean", @@ -2078,7 +2087,8 @@ "externalId":{ "shape":"ExternalId", "documentation":"

The unique ID for an external resource (for example, a CloudFormation stack ID) that is linked to this deployment.

" - } + }, + "relatedDeployments":{"shape":"RelatedDeployments"} }, "documentation":"

Information about a deployment.

" }, @@ -3120,13 +3130,6 @@ "documentation":"

The deployed state filter was specified in an invalid format.

", "exception":true }, - "InvalidDeploymentConfigIdException":{ - "type":"structure", - "members":{ - }, - "documentation":"

The ID of the deployment configuration is invalid.

", - "exception":true - }, "InvalidDeploymentConfigNameException":{ "type":"structure", "members":{ @@ -3970,13 +3973,13 @@ "MinimumHealthyHosts":{ "type":"structure", "members":{ - "value":{ - "shape":"MinimumHealthyHostsValue", - "documentation":"

The minimum healthy instance value.

" - }, "type":{ "shape":"MinimumHealthyHostsType", "documentation":"

The minimum healthy instance type:

In an example of nine instances, if a HOST_COUNT of six is specified, deploy to up to three instances at a time. The deployment is successful if six or more instances are deployed to successfully. Otherwise, the deployment fails. If a FLEET_PERCENT of 40 is specified, deploy to up to five instances at a time. The deployment is successful if four or more instances are deployed to successfully. Otherwise, the deployment fails.

In a call to the GetDeploymentConfig, CodeDeployDefault.OneAtATime returns a minimum healthy instance type of MOST_CONCURRENCY and a value of 1. This means a deployment to only one instance at a time. (You cannot set the type to MOST_CONCURRENCY, only to HOST_COUNT or FLEET_PERCENT.) In addition, with CodeDeployDefault.OneAtATime, AWS CodeDeploy attempts to ensure that all instances but one are kept in a healthy state during the deployment. Although this allows one instance at a time to be taken offline for a new deployment, it also means that if the deployment to the last instance fails, the overall deployment is still successful.

For more information, see AWS CodeDeploy Instance Health in the AWS CodeDeploy User Guide.

" + }, + "value":{ + "shape":"MinimumHealthyHostsValue", + "documentation":"

The minimum healthy instance value.

" } }, "documentation":"

Information about minimum healthy instance.

" @@ -4019,6 +4022,13 @@ "documentation":"

The API used does not support the deployment.

", "exception":true }, + "OutdatedInstancesStrategy":{ + "type":"string", + "enum":[ + "UPDATE", + "IGNORE" + ] + }, "Percentage":{"type":"integer"}, "PutLifecycleEventHookExecutionStatusInput":{ "type":"structure", @@ -4033,7 +4043,7 @@ }, "status":{ "shape":"LifecycleEventStatus", - "documentation":"

The result of a Lambda function that validates a deployment lifecycle event (Succeeded or Failed).

" + "documentation":"

The result of a Lambda function that validates a deployment lifecycle event. Succeeded and Failed are the only valid values for status.

" } } }, @@ -4112,6 +4122,20 @@ "Deregistered" ] }, + "RelatedDeployments":{ + "type":"structure", + "members":{ + "autoUpdateOutdatedInstancesRootDeploymentId":{ + "shape":"DeploymentId", + "documentation":"

The deployment ID of the root deployment that triggered this deployment.

" + }, + "autoUpdateOutdatedInstancesDeploymentIds":{ + "shape":"DeploymentsList", + "documentation":"

The deployment IDs of 'auto-update outdated instances' deployments triggered by this deployment.

" + } + }, + "documentation":"

Information about deployments related to the specified deployment.

" + }, "RemoveTagsFromOnPremisesInstancesInput":{ "type":"structure", "required":[ @@ -4742,6 +4766,10 @@ "shape":"AutoRollbackConfiguration", "documentation":"

Information for an automatic rollback configuration that is added or changed when a deployment group is updated.

" }, + "outdatedInstancesStrategy":{ + "shape":"OutdatedInstancesStrategy", + "documentation":"

Indicates what happens when new EC2 instances are launched mid-deployment and do not receive the deployed application revision.

If this option is set to UPDATE or is unspecified, CodeDeploy initiates one or more 'auto-update outdated instances' deployments to apply the deployed application revision to the new EC2 instances.

If this option is set to IGNORE, CodeDeploy does not initiate a deployment to update the new EC2 instances. This may result in instances having different revisions.

" + }, "deploymentStyle":{ "shape":"DeploymentStyle", "documentation":"

Information about the type of deployment, either in-place or blue/green, you want to run and whether to route deployment traffic behind a load balancer.

" diff --git a/botocore/data/codeguruprofiler/2019-07-18/service-2.json b/botocore/data/codeguruprofiler/2019-07-18/service-2.json index 308588c1..7ff42e96 100644 --- a/botocore/data/codeguruprofiler/2019-07-18/service-2.json +++ b/botocore/data/codeguruprofiler/2019-07-18/service-2.json @@ -63,7 +63,7 @@ {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Used by profiler agents to report their current state and to receive remote configuration updates. For example, ConfigureAgent can be used to tell and agent whether to profile or not and for how long to return profiling data.

" + "documentation":"

Used by profiler agents to report their current state and to receive remote configuration updates. For example, ConfigureAgent can be used to tell an agent whether to profile or not and for how long to return profiling data.

" }, "CreateProfilingGroup":{ "name":"CreateProfilingGroup", @@ -95,6 +95,7 @@ "output":{"shape":"DeleteProfilingGroupResponse"}, "errors":[ {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, {"shape":"ValidationException"}, {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} @@ -480,11 +481,11 @@ "AgentParameterField":{ "type":"string", "enum":[ - "MaxStackDepth", - "MemoryUsageLimitPercent", - "MinimumTimeForReportingInMilliseconds", + "SamplingIntervalInMilliseconds", "ReportingIntervalInMilliseconds", - "SamplingIntervalInMilliseconds" + "MinimumTimeForReportingInMilliseconds", + "MemoryUsageLimitPercent", + "MaxStackDepth" ] }, "AgentParameters":{ @@ -511,9 +512,9 @@ "AggregationPeriod":{ "type":"string", "enum":[ - "P1D", + "PT5M", "PT1H", - "PT5M" + "P1D" ] }, "Anomalies":{ @@ -705,8 +706,8 @@ "ComputePlatform":{ "type":"string", "enum":[ - "AWSLambda", - "Default" + "Default", + "AWSLambda" ] }, "ConfigureAgentRequest":{ @@ -861,8 +862,8 @@ "FeedbackType":{ "type":"string", "enum":[ - "Negative", - "Positive" + "Positive", + "Negative" ] }, "FindingsReportId":{ @@ -946,9 +947,13 @@ }, "documentation":"

Information about a frame metric and its values.

" }, + "FrameMetricValue":{ + "type":"double", + "box":true + }, "FrameMetricValues":{ "type":"list", - "member":{"shape":"Double"} + "member":{"shape":"FrameMetricValue"} }, "FrameMetrics":{ "type":"list", @@ -1199,7 +1204,8 @@ "documentation":"

The server encountered an internal error and is unable to complete the request.

", "error":{"httpStatusCode":500}, "exception":true, - "fault":true + "fault":true, + "retryable":{"throttling":false} }, "ListFindingsReportsRequest":{ "type":"structure", @@ -1446,15 +1452,15 @@ "MetadataField":{ "type":"string", "enum":[ + "ComputePlatform", "AgentId", "AwsRequestId", - "ComputePlatform", "ExecutionEnvironment", "LambdaFunctionArn", "LambdaMemoryLimitInMB", - "LambdaPreviousExecutionTimeInMilliseconds", "LambdaRemainingTimeInMilliseconds", - "LambdaTimeGapBetweenInvokesInMilliseconds" + "LambdaTimeGapBetweenInvokesInMilliseconds", + "LambdaPreviousExecutionTimeInMilliseconds" ] }, "Metric":{ @@ -1497,8 +1503,8 @@ "OrderBy":{ "type":"string", "enum":[ - "TimestampAscending", - "TimestampDescending" + "TimestampDescending", + "TimestampAscending" ] }, "PaginationToken":{ @@ -1882,7 +1888,8 @@ "httpStatusCode":402, "senderFault":true }, - "exception":true + "exception":true, + "retryable":{"throttling":false} }, "String":{"type":"string"}, "Strings":{ @@ -1982,7 +1989,8 @@ "httpStatusCode":429, "senderFault":true }, - "exception":true + "exception":true, + "retryable":{"throttling":false} }, "Timestamp":{ "type":"timestamp", @@ -2087,5 +2095,5 @@ "exception":true } }, - "documentation":"

This section provides documentation for the Amazon CodeGuru Profiler API operations.

 <p>Amazon CodeGuru Profiler collects runtime performance data from your live applications, and provides recommendations that can help you fine-tune your application performance. Using machine learning algorithms, CodeGuru Profiler can help you find your most expensive lines of code and suggest ways you can improve efficiency and remove CPU bottlenecks. </p> <p>Amazon CodeGuru Profiler provides different visualizations of profiling data to help you identify what code is running on the CPU, see how much time is consumed, and suggest ways to reduce CPU utilization. </p> <note> <p>Amazon CodeGuru Profiler currently supports applications written in all Java virtual machine (JVM) languages. While CodeGuru Profiler supports both visualizations and recommendations for applications written in Java, it can also generate visualizations and a subset of recommendations for applications written in other JVM languages.</p> </note> <p> For more information, see <a href="https://docs.aws.amazon.com/codeguru/latest/profiler-ug/what-is-codeguru-profiler.html">What is Amazon CodeGuru Profiler</a> in the <i>Amazon CodeGuru Profiler User Guide</i>. </p> 
" + "documentation":"

This section provides documentation for the Amazon CodeGuru Profiler API operations.

Amazon CodeGuru Profiler collects runtime performance data from your live applications, and provides recommendations that can help you fine-tune your application performance. Using machine learning algorithms, CodeGuru Profiler can help you find your most expensive lines of code and suggest ways you can improve efficiency and remove CPU bottlenecks.

Amazon CodeGuru Profiler provides different visualizations of profiling data to help you identify what code is running on the CPU, see how much time is consumed, and suggest ways to reduce CPU utilization.

Amazon CodeGuru Profiler currently supports applications written in all Java virtual machine (JVM) languages and Python. While CodeGuru Profiler supports both visualizations and recommendations for applications written in Java, it can also generate visualizations and a subset of recommendations for applications written in other JVM languages and Python.

For more information, see What is Amazon CodeGuru Profiler in the Amazon CodeGuru Profiler User Guide.

" } diff --git a/botocore/data/codepipeline/2015-07-09/paginators-1.json b/botocore/data/codepipeline/2015-07-09/paginators-1.json index a3a7a46c..dca90cb7 100644 --- a/botocore/data/codepipeline/2015-07-09/paginators-1.json +++ b/botocore/data/codepipeline/2015-07-09/paginators-1.json @@ -14,7 +14,8 @@ "ListPipelines": { "input_token": "nextToken", "output_token": "nextToken", - "result_key": "pipelines" + "result_key": "pipelines", + "limit_key": "maxResults" }, "ListWebhooks": { "input_token": "NextToken", diff --git a/botocore/data/codepipeline/2015-07-09/service-2.json b/botocore/data/codepipeline/2015-07-09/service-2.json index db218d82..9f696d57 100644 --- a/botocore/data/codepipeline/2015-07-09/service-2.json +++ b/botocore/data/codepipeline/2015-07-09/service-2.json @@ -165,6 +165,20 @@ ], "documentation":"

Enables artifacts in a pipeline to transition to a stage in a pipeline.

" }, + "GetActionType":{ + "name":"GetActionType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetActionTypeInput"}, + "output":{"shape":"GetActionTypeOutput"}, + "errors":[ + {"shape":"ActionTypeNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns information about an action type created for an external provider, where the action is to be used by customers of the external provider. The action can be created with any supported integration model.

" + }, "GetJobDetails":{ "name":"GetJobDetails", "http":{ @@ -568,6 +582,20 @@ ], "documentation":"

Removes tags from an AWS resource.

" }, + "UpdateActionType":{ + "name":"UpdateActionType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateActionTypeInput"}, + "errors":[ + {"shape":"RequestFailedException"}, + {"shape":"ValidationException"}, + {"shape":"ActionTypeNotFoundException"} + ], + "documentation":"

Updates an action type that was created with any supported integration model, where the action type is to be used by customers of the action type provider. Use a JSON file with the action definition and UpdateActionType to provide the full structure.

" + }, "UpdatePipeline":{ "name":"UpdatePipeline", "http":{ @@ -617,7 +645,10 @@ "documentation":"

Represents an AWS session credentials object. These credentials are temporary credentials that are issued by AWS Secure Token Service (STS). They can be used to access input and output artifacts in the S3 bucket used to store artifact for the pipeline in AWS CodePipeline.

", "sensitive":true }, - "AccessKeyId":{"type":"string"}, + "AccessKeyId":{ + "type":"string", + "sensitive":true + }, "AccountId":{ "type":"string", "pattern":"[0-9]{12}" @@ -1051,7 +1082,7 @@ }, "ActionProvider":{ "type":"string", - "max":25, + "max":35, "min":1, "pattern":"[0-9A-Za-z_-]+" }, @@ -1144,6 +1175,106 @@ }, "documentation":"

Returns information about the details of an action type.

" }, + "ActionTypeAlreadyExistsException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The specified action type already exists with a different definition.

", + "exception":true + }, + "ActionTypeArtifactDetails":{ + "type":"structure", + "required":[ + "minimumCount", + "maximumCount" + ], + "members":{ + "minimumCount":{ + "shape":"MinimumActionTypeArtifactCount", + "documentation":"

The minimum number of artifacts that can be used with the action type. For example, you should specify a minimum and maximum of zero input artifacts for an action type with a category of source.

" + }, + "maximumCount":{ + "shape":"MaximumActionTypeArtifactCount", + "documentation":"

The maximum number of artifacts that can be used with the actiontype. For example, you should specify a minimum and maximum of zero input artifacts for an action type with a category of source.

" + } + }, + "documentation":"

Information about parameters for artifacts associated with the action type, such as the minimum and maximum artifacts allowed.

" + }, + "ActionTypeDeclaration":{ + "type":"structure", + "required":[ + "executor", + "id", + "inputArtifactDetails", + "outputArtifactDetails" + ], + "members":{ + "description":{ + "shape":"ActionTypeDescription", + "documentation":"

The description for the action type to be updated.

" + }, + "executor":{ + "shape":"ActionTypeExecutor", + "documentation":"

Information about the executor for an action type that was created with any supported integration model.

" + }, + "id":{ + "shape":"ActionTypeIdentifier", + "documentation":"

The action category, owner, provider, and version of the action type to be updated.

" + }, + "inputArtifactDetails":{ + "shape":"ActionTypeArtifactDetails", + "documentation":"

Details for the artifacts, such as application files, to be worked on by the action. For example, the minimum and maximum number of input artifacts allowed.

" + }, + "outputArtifactDetails":{ + "shape":"ActionTypeArtifactDetails", + "documentation":"

Details for the output artifacts, such as a built application, that are the result of the action. For example, the minimum and maximum number of output artifacts allowed.

" + }, + "permissions":{ + "shape":"ActionTypePermissions", + "documentation":"

Details identifying the accounts with permissions to use the action type.

" + }, + "properties":{ + "shape":"ActionTypeProperties", + "documentation":"

The properties of the action type to be updated.

" + }, + "urls":{ + "shape":"ActionTypeUrls", + "documentation":"

The links associated with the action type to be updated.

" + } + }, + "documentation":"

The parameters for the action type definition that are provided when the action type is created or updated.

" + }, + "ActionTypeDescription":{ + "type":"string", + "max":1024, + "min":1 + }, + "ActionTypeExecutor":{ + "type":"structure", + "required":[ + "configuration", + "type" + ], + "members":{ + "configuration":{ + "shape":"ExecutorConfiguration", + "documentation":"

The action configuration properties for the action type. These properties are specified in the action definition when the action type is created.

" + }, + "type":{ + "shape":"ExecutorType", + "documentation":"

The integration model used to create and update the action type, Lambda or JobWorker.

" + }, + "policyStatementsTemplate":{ + "shape":"PolicyStatementsTemplate", + "documentation":"

The policy statement that specifies the permissions in the CodePipeline customer’s account that are needed to successfully run an action.

To grant permission to another account, specify the account ID as the Principal, a domain-style identifier defined by the service, for example codepipeline.amazonaws.com.

The size of the passed JSON policy document cannot exceed 2048 characters.

" + }, + "jobTimeout":{ + "shape":"JobTimeout", + "documentation":"

The timeout in seconds for the job. An action execution can have multiple jobs. This is the timeout for a single job, not the entire action execution.

" + } + }, + "documentation":"

The action engine, or executor, for an action type created for a provider, where the action is to be used by customers of the provider. The action engine is associated with the model used to create and update the action, such as the Lambda integration model.

" + }, "ActionTypeId":{ "type":"structure", "required":[ @@ -1172,6 +1303,34 @@ }, "documentation":"

Represents information about an action type.

" }, + "ActionTypeIdentifier":{ + "type":"structure", + "required":[ + "category", + "owner", + "provider", + "version" + ], + "members":{ + "category":{ + "shape":"ActionCategory", + "documentation":"

Defines what kind of action can be taken in the stage, one of the following:

" + }, + "owner":{ + "shape":"ActionTypeOwner", + "documentation":"

The creator of the action type being called: AWS or ThirdParty.

" + }, + "provider":{ + "shape":"ActionProvider", + "documentation":"

The provider of the action type being called. The provider name is supplied when the action type is created.

" + }, + "version":{ + "shape":"Version", + "documentation":"

A string that describes the action type version.

" + } + }, + "documentation":"

Specifies the category, owner, provider, and version of the action type.

" + }, "ActionTypeList":{ "type":"list", "member":{"shape":"ActionType"} @@ -1183,6 +1342,62 @@ "documentation":"

The specified action type cannot be found.

", "exception":true }, + "ActionTypeOwner":{ + "type":"string", + "pattern":"AWS|ThirdParty" + }, + "ActionTypePermissions":{ + "type":"structure", + "required":["allowedAccounts"], + "members":{ + "allowedAccounts":{ + "shape":"AllowedAccounts", + "documentation":"

A list of AWS account IDs with access to use the action type in their pipelines.

" + } + }, + "documentation":"

Details identifying the users with permissions to use the action type.

" + }, + "ActionTypeProperties":{ + "type":"list", + "member":{"shape":"ActionTypeProperty"}, + "max":10 + }, + "ActionTypeProperty":{ + "type":"structure", + "required":[ + "name", + "optional", + "key", + "noEcho" + ], + "members":{ + "name":{ + "shape":"ActionConfigurationKey", + "documentation":"

The property name that is displayed to users.

" + }, + "optional":{ + "shape":"Boolean", + "documentation":"

Whether the configuration property is an optional value.

" + }, + "key":{ + "shape":"Boolean", + "documentation":"

Whether the configuration property is a key.

" + }, + "noEcho":{ + "shape":"Boolean", + "documentation":"

Whether to omit the field value entered by the customer in the log. If true, the value is not saved in CloudTrail logs for the action execution.

" + }, + "queryable":{ + "shape":"Boolean", + "documentation":"

Indicates that the property is used with polling. An action type can have up to one queryable property. If it has one, that property must be both required and not secret.

" + }, + "description":{ + "shape":"PropertyDescription", + "documentation":"

The description of the property that is displayed to users.

" + } + }, + "documentation":"

Represents information about each property specified in the action configuration, such as the description and key name that display for the customer using the action type.

" + }, "ActionTypeSettings":{ "type":"structure", "members":{ @@ -1205,6 +1420,38 @@ }, "documentation":"

Returns information about the settings for an action type.

" }, + "ActionTypeUrls":{ + "type":"structure", + "members":{ + "configurationUrl":{ + "shape":"Url", + "documentation":"

The URL returned to the CodePipeline console that contains a link to the page where customers can configure the external action.

" + }, + "entityUrlTemplate":{ + "shape":"UrlTemplate", + "documentation":"

The URL returned to the CodePipeline console that provides a deep link to the resources of the external system, such as a status page. This link is provided as part of the action display in the pipeline.

" + }, + "executionUrlTemplate":{ + "shape":"UrlTemplate", + "documentation":"

The link to an execution page for the action type in progress. For example, for a CodeDeploy action, this link is shown on the pipeline view page in the CodePipeline console, and it links to a CodeDeploy status page.

" + }, + "revisionUrlTemplate":{ + "shape":"UrlTemplate", + "documentation":"

The URL returned to the CodePipeline console that contains a link to the page where customers can update or change the configuration of the external action.

" + } + }, + "documentation":"

Returns information about URLs for web pages that display to customers as links on the pipeline view, such as an external configuration page for the action type.

" + }, + "AllowedAccount":{ + "type":"string", + "pattern":"[0-9]{12}|\\*" + }, + "AllowedAccounts":{ + "type":"list", + "member":{"shape":"AllowedAccount"}, + "max":1000, + "min":1 + }, "ApprovalAlreadyCompletedException":{ "type":"structure", "members":{ @@ -1796,6 +2043,27 @@ }, "documentation":"

The interaction or event that started a pipeline execution.

" }, + "ExecutorConfiguration":{ + "type":"structure", + "members":{ + "lambdaExecutorConfiguration":{ + "shape":"LambdaExecutorConfiguration", + "documentation":"

Details about the Lambda executor of the action type.

" + }, + "jobWorkerExecutorConfiguration":{ + "shape":"JobWorkerExecutorConfiguration", + "documentation":"

Details about the JobWorker executor of the action type.

" + } + }, + "documentation":"

The action engine, or executor, related to the supported integration model used to create and update the action type. The available executor types are Lambda and JobWorker.

" + }, + "ExecutorType":{ + "type":"string", + "enum":[ + "JobWorker", + "Lambda" + ] + }, "ExternalExecutionId":{"type":"string"}, "ExternalExecutionSummary":{"type":"string"}, "FailureDetails":{ @@ -1831,6 +2099,42 @@ "SystemUnavailable" ] }, + "GetActionTypeInput":{ + "type":"structure", + "required":[ + "category", + "owner", + "provider", + "version" + ], + "members":{ + "category":{ + "shape":"ActionCategory", + "documentation":"

Defines what kind of action can be taken in the stage. The following are the valid values:

" + }, + "owner":{ + "shape":"ActionTypeOwner", + "documentation":"

The creator of an action type that was created with any supported integration model. There are two valid values: AWS and ThirdParty.

" + }, + "provider":{ + "shape":"ActionProvider", + "documentation":"

The provider of the action type being called. The provider name is specified when the action type is created.

" + }, + "version":{ + "shape":"Version", + "documentation":"

A string that describes the action type version.

" + } + } + }, + "GetActionTypeOutput":{ + "type":"structure", + "members":{ + "actionType":{ + "shape":"ActionTypeDeclaration", + "documentation":"

The action type information for the requested action type, such as the action type ID.

" + } + } + }, "GetJobDetailsInput":{ "type":"structure", "required":["jobId"], @@ -2194,11 +2498,47 @@ "Failed" ] }, + "JobTimeout":{ + "type":"integer", + "max":43200, + "min":60 + }, + "JobWorkerExecutorConfiguration":{ + "type":"structure", + "members":{ + "pollingAccounts":{ + "shape":"PollingAccountList", + "documentation":"

The accounts in which the job worker is configured and might poll for jobs as part of the action execution.

" + }, + "pollingServicePrincipals":{ + "shape":"PollingServicePrincipalList", + "documentation":"

The service Principals in which the job worker is configured and might poll for jobs as part of the action execution.

" + } + }, + "documentation":"

Details about the polling configuration for the JobWorker action engine, or executor.

" + }, "JsonPath":{ "type":"string", "max":150, "min":1 }, + "LambdaExecutorConfiguration":{ + "type":"structure", + "required":["lambdaFunctionArn"], + "members":{ + "lambdaFunctionArn":{ + "shape":"LambdaFunctionArn", + "documentation":"

The ARN of the Lambda function used by the action engine.

" + } + }, + "documentation":"

Details about the configuration for the Lambda action engine, or executor.

" + }, + "LambdaFunctionArn":{ + "type":"string", + "max":140, + "min":1, + "pattern":"arn:aws(-[\\w]+)*:lambda:.+:[0-9]{12}:function:.+" + }, "LastChangedAt":{"type":"timestamp"}, "LastChangedBy":{"type":"string"}, "LastUpdatedBy":{"type":"string"}, @@ -2254,6 +2594,10 @@ "nextToken":{ "shape":"NextToken", "documentation":"

An identifier that was returned from the previous list action types call, which can be used to return the next set of action types in the list.

" + }, + "regionFilter":{ + "shape":"AWSRegionName", + "documentation":"

The Region to filter on for the list of action types.

" } }, "documentation":"

Represents the input of a ListActionTypes action.

" @@ -2312,6 +2656,10 @@ "nextToken":{ "shape":"NextToken", "documentation":"

An identifier that was returned from the previous list pipelines call. It can be used to return the next set of pipelines in the list.

" + }, + "maxResults":{ + "shape":"MaxPipelines", + "documentation":"

The maximum number of pipelines to return in a single call. To retrieve the remaining pipelines, make another call with the returned nextToken value. The minimum value you can specify is 1. The maximum accepted value is 1000.

" } }, "documentation":"

Represents the input of a ListPipelines action.

" @@ -2434,11 +2782,21 @@ "type":"integer", "min":1 }, + "MaxPipelines":{ + "type":"integer", + "max":1000, + "min":1 + }, "MaxResults":{ "type":"integer", "max":100, "min":1 }, + "MaximumActionTypeArtifactCount":{ + "type":"integer", + "max":10, + "min":0 + }, "MaximumArtifactCount":{ "type":"integer", "max":5, @@ -2449,6 +2807,11 @@ "max":5000, "min":1 }, + "MinimumActionTypeArtifactCount":{ + "type":"integer", + "max":10, + "min":0 + }, "MinimumArtifactCount":{ "type":"integer", "max":5, @@ -2753,6 +3116,11 @@ "documentation":"

The pipeline version was specified in an invalid format or cannot be found.

", "exception":true }, + "PolicyStatementsTemplate":{ + "type":"string", + "max":2048, + "min":1 + }, "PollForJobsInput":{ "type":"structure", "required":["actionTypeId"], @@ -2807,6 +3175,23 @@ }, "documentation":"

Represents the output of a PollForThirdPartyJobs action.

" }, + "PollingAccountList":{ + "type":"list", + "member":{"shape":"AccountId"}, + "max":1000, + "min":1 + }, + "PollingServicePrincipalList":{ + "type":"list", + "member":{"shape":"ServicePrincipal"}, + "max":10, + "min":1 + }, + "PropertyDescription":{ + "type":"string", + "max":250, + "min":1 + }, "PutActionRevisionInput":{ "type":"structure", "required":[ @@ -3034,6 +3419,14 @@ "members":{ } }, + "RequestFailedException":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + }, + "documentation":"

The request failed because of an unknown error, exception, or failure.

", + "exception":true + }, "ResolvedActionConfigurationMap":{ "type":"map", "key":{"shape":"String"}, @@ -3152,8 +3545,19 @@ "documentation":"

The Amazon S3 artifact location for an action's artifacts.

" }, "S3ObjectKey":{"type":"string"}, - "SecretAccessKey":{"type":"string"}, - "SessionToken":{"type":"string"}, + "SecretAccessKey":{ + "type":"string", + "sensitive":true + }, + "ServicePrincipal":{ + "type":"string", + "max":128, + "min":1 + }, + "SessionToken":{ + "type":"string", + "sensitive":true + }, "SourceRevision":{ "type":"structure", "required":["actionName"], @@ -3590,6 +3994,16 @@ "members":{ } }, + "UpdateActionTypeInput":{ + "type":"structure", + "required":["actionType"], + "members":{ + "actionType":{ + "shape":"ActionTypeDeclaration", + "documentation":"

The action type definition for the action type to be updated.

" + } + } + }, "UpdatePipelineInput":{ "type":"structure", "required":["pipeline"], diff --git a/botocore/data/comprehend/2017-11-27/service-2.json b/botocore/data/comprehend/2017-11-27/service-2.json index 8b0597aa..5d57c10e 100644 --- a/botocore/data/comprehend/2017-11-27/service-2.json +++ b/botocore/data/comprehend/2017-11-27/service-2.json @@ -113,6 +113,22 @@ ], "documentation":"

Creates a new document classification request to analyze a single document in real-time, using a previously created and trained custom model and an endpoint.

" }, + "ContainsPiiEntities":{ + "name":"ContainsPiiEntities", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ContainsPiiEntitiesRequest"}, + "output":{"shape":"ContainsPiiEntitiesResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"TextSizeLimitExceededException"}, + {"shape":"UnsupportedLanguageException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Analyzes input text for the presence of personally identifiable information (PII) and returns the labels of identified PII entity types such as name, address, bank account number, or phone number.

" + }, "CreateDocumentClassifier":{ "name":"CreateDocumentClassifier", "http":{ @@ -1429,6 +1445,32 @@ "documentation":"

Concurrent modification of the tags associated with an Amazon Comprehend resource is not supported.

", "exception":true }, + "ContainsPiiEntitiesRequest":{ + "type":"structure", + "required":[ + "Text", + "LanguageCode" + ], + "members":{ + "Text":{ + "shape":"String", + "documentation":"

Creates a new document classification request to analyze a single document in real-time, returning personally identifiable information (PII) entity labels.

" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

The language of the input documents.

" + } + } + }, + "ContainsPiiEntitiesResponse":{ + "type":"structure", + "members":{ + "Labels":{ + "shape":"ListOfEntityLabels", + "documentation":"

The labels used in the document being analyzed. Individual labels represent personally identifiable information (PII) entity types.

" + } + } + }, "CreateDocumentClassifierRequest":{ "type":"structure", "required":[ @@ -2538,6 +2580,20 @@ }, "documentation":"

Provides information about an entity.

" }, + "EntityLabel":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"PiiEntityType", + "documentation":"

The name of the label.

" + }, + "Score":{ + "shape":"Float", + "documentation":"

The level of confidence that Amazon Comprehend has in the accuracy of the detection.

" + } + }, + "documentation":"

Specifies one of the label or labels that categorize the personally identifiable information (PII) entity being analyzed.

" + }, "EntityRecognizerAnnotations":{ "type":"structure", "required":["S3Uri"], @@ -2783,7 +2839,7 @@ "EntityTypeName":{ "type":"string", "max":64, - "pattern":"^(?:(?!\\\\n+|\\\\t+|\\\\r+|[\\r\\t\\n\\s,]).)+$" + "pattern":"^(?:(?!\\\\n+|\\\\t+|\\\\r+|[\\r\\t\\n,]).)+$" }, "EntityTypesEvaluationMetrics":{ "type":"structure", @@ -3399,6 +3455,10 @@ "type":"list", "member":{"shape":"Entity"} }, + "ListOfEntityLabels":{ + "type":"list", + "member":{"shape":"EntityLabel"} + }, "ListOfKeyPhrases":{ "type":"list", "member":{"shape":"KeyPhrase"} @@ -4800,7 +4860,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

Amazon Comprehend can't process the language of the input text. For all custom entity recognition APIs (such as CreateEntityRecognizer), only English, Spanish, French, Italian, German, or Portuguese are accepted. For most other APIs, such as those for Custom Classification, Amazon Comprehend accepts text in all supported languages. For a list of supported languages, see supported-languages.

", + "documentation":"

Amazon Comprehend can't process the language of the input text. For custom entity recognition APIs, only English, Spanish, French, Italian, German, or Portuguese are accepted. For a list of supported languages, see supported-languages.

", "exception":true }, "UntagResourceRequest":{ diff --git a/botocore/data/compute-optimizer/2019-11-01/service-2.json b/botocore/data/compute-optimizer/2019-11-01/service-2.json index 1c83df5e..f88eb245 100644 --- a/botocore/data/compute-optimizer/2019-11-01/service-2.json +++ b/botocore/data/compute-optimizer/2019-11-01/service-2.json @@ -208,7 +208,7 @@ {"shape":"MissingAuthenticationToken"}, {"shape":"ThrottlingException"} ], - "documentation":"

Returns the optimization findings for an account.

For example, it returns the number of Amazon EC2 instances in an account that are under-provisioned, over-provisioned, or optimized. It also returns the number of Auto Scaling groups in an account that are not optimized, or optimized.

" + "documentation":"

Returns the optimization findings for an account.

It returns the number of:

" }, "UpdateEnrollmentStatus":{ "name":"UpdateEnrollmentStatus", @@ -226,7 +226,7 @@ {"shape":"MissingAuthenticationToken"}, {"shape":"ThrottlingException"} ], - "documentation":"

Updates the enrollment (opt in) status of an account to the AWS Compute Optimizer service.

If the account is a management account of an organization, this action can also be used to enroll member accounts within the organization.

" + "documentation":"

Updates the enrollment (opt in and opt out) status of an account to the AWS Compute Optimizer service.

If the account is a management account of an organization, this action can also be used to enroll member accounts within the organization.

You must have the appropriate permissions to opt in to Compute Optimizer, to view its recommendations, and to opt out. For more information, see Controlling access with AWS Identity and Access Management in the Compute Optimizer User Guide.

When you opt in, Compute Optimizer automatically creates a Service-Linked Role in your account to access its data. For more information, see Using Service-Linked Roles for AWS Compute Optimizer in the Compute Optimizer User Guide.

" } }, "shapes":{ @@ -432,7 +432,7 @@ }, "statistic":{ "shape":"MetricStatistic", - "documentation":"

The statistic of the utilization metric.

The following statistics are available:

" + "documentation":"

The statistic of the utilization metric.

The Compute Optimizer API, AWS Command Line Interface (AWS CLI), and SDKs return utilization metrics using only the Maximum statistic, which is the highest value observed during the specified period.

The Compute Optimizer console displays graphs for some utilization metrics using the Average statistic, which is the value of Sum / SampleCount during the specified period. For more information, see Viewing resource recommendations in the AWS Compute Optimizer User Guide. You can also get averaged utilization metric data for your resources using Amazon CloudWatch. For more information, see the Amazon CloudWatch User Guide.

" }, "value":{ "shape":"MetricValue", @@ -452,7 +452,7 @@ "members":{ "accountIds":{ "shape":"AccountIds", - "documentation":"

The IDs of the AWS accounts for which to export Auto Scaling group recommendations.

If your account is the management account of an organization, use this parameter to specify the member accounts for which you want to export recommendations.

This parameter cannot be specified together with the include member accounts parameter. The parameters are mutually exclusive.

Recommendations for member accounts are not included in the export if this parameter, or the include member accounts parameter, is omitted.

You can specify multiple account IDs per request.

" + "documentation":"

The IDs of the AWS accounts for which to export Auto Scaling group recommendations.

If your account is the management account of an organization, use this parameter to specify the member account for which you want to export recommendations.

This parameter cannot be specified together with the include member accounts parameter. The parameters are mutually exclusive.

Recommendations for member accounts are not included in the export if this parameter, or the include member accounts parameter, is omitted.

You can specify multiple account IDs per request.

" }, "filters":{ "shape":"Filters", @@ -505,7 +505,7 @@ "members":{ "accountIds":{ "shape":"AccountIds", - "documentation":"

The IDs of the AWS accounts for which to export instance recommendations.

If your account is the management account of an organization, use this parameter to specify the member accounts for which you want to export recommendations.

This parameter cannot be specified together with the include member accounts parameter. The parameters are mutually exclusive.

Recommendations for member accounts are not included in the export if this parameter, or the include member accounts parameter, is omitted.

You can specify multiple account IDs per request.

" + "documentation":"

The IDs of the AWS accounts for which to export instance recommendations.

If your account is the management account of an organization, use this parameter to specify the member account for which you want to export recommendations.

This parameter cannot be specified together with the include member accounts parameter. The parameters are mutually exclusive.

Recommendations for member accounts are not included in the export if this parameter, or the include member accounts parameter, is omitted.

You can specify multiple account IDs per request.

" }, "filters":{ "shape":"Filters", @@ -692,7 +692,7 @@ "members":{ "accountIds":{ "shape":"AccountIds", - "documentation":"

The IDs of the AWS accounts for which to return Auto Scaling group recommendations.

If your account is the management account of an organization, use this parameter to specify the member accounts for which you want to return Auto Scaling group recommendations.

Only one account ID can be specified per request.

" + "documentation":"

The ID of the AWS account for which to return Auto Scaling group recommendations.

If your account is the management account of an organization, use this parameter to specify the member account for which you want to return Auto Scaling group recommendations.

Only one account ID can be specified per request.

" }, "autoScalingGroupArns":{ "shape":"AutoScalingGroupArns", @@ -750,7 +750,7 @@ }, "accountIds":{ "shape":"AccountIds", - "documentation":"

The IDs of the AWS accounts for which to return volume recommendations.

If your account is the management account of an organization, use this parameter to specify the member accounts for which you want to return volume recommendations.

Only one account ID can be specified per request.

" + "documentation":"

The ID of the AWS account for which to return volume recommendations.

If your account is the management account of an organization, use this parameter to specify the member account for which you want to return volume recommendations.

Only one account ID can be specified per request.

" } } }, @@ -792,7 +792,7 @@ }, "accountIds":{ "shape":"AccountIds", - "documentation":"

The IDs of the AWS accounts for which to return instance recommendations.

If your account is the management account of an organization, use this parameter to specify the member accounts for which you want to return instance recommendations.

Only one account ID can be specified per request.

" + "documentation":"

The ID of the AWS account for which to return instance recommendations.

If your account is the management account of an organization, use this parameter to specify the member account for which you want to return instance recommendations.

Only one account ID can be specified per request.

" } } }, @@ -885,7 +885,7 @@ }, "accountIds":{ "shape":"AccountIds", - "documentation":"

The IDs of the AWS accounts for which to return function recommendations.

If your account is the management account of an organization, use this parameter to specify the member accounts for which you want to return function recommendations.

Only one account ID can be specified per request.

" + "documentation":"

The ID of the AWS account for which to return function recommendations.

If your account is the management account of an organization, use this parameter to specify the member account for which you want to return function recommendations.

Only one account ID can be specified per request.

" }, "filters":{ "shape":"LambdaFunctionRecommendationFilters", @@ -941,7 +941,7 @@ "members":{ "accountIds":{ "shape":"AccountIds", - "documentation":"

The IDs of the AWS accounts for which to return recommendation summaries.

If your account is the management account of an organization, use this parameter to specify the member accounts for which you want to return recommendation summaries.

Only one account ID can be specified per request.

" + "documentation":"

The ID of the AWS account for which to return recommendation summaries.

If your account is the management account of an organization, use this parameter to specify the member account for which you want to return recommendation summaries.

Only one account ID can be specified per request.

" }, "nextToken":{ "shape":"NextToken", @@ -1215,7 +1215,7 @@ }, "findingReasonCodes":{ "shape":"LambdaFunctionRecommendationFindingReasonCodes", - "documentation":"

The reason for the finding classification of the function.

Functions that have a finding classification of Optimized don't have a finding reason code.

Reason codes include:

" + "documentation":"

The reason for the finding classification of the function.

Functions that have a finding classification of Optimized don't have a finding reason code.

Reason codes include:

" }, "memorySizeRecommendationOptions":{ "shape":"LambdaFunctionMemoryRecommendationOptions", @@ -1279,11 +1279,11 @@ "members":{ "name":{ "shape":"LambdaFunctionMetricName", - "documentation":"

The name of the utilization metric.

" + "documentation":"

The name of the utilization metric.

The following utilization metrics are available:

" }, "statistic":{ "shape":"LambdaFunctionMetricStatistic", - "documentation":"

The statistic of the utilization metric.

" + "documentation":"

The statistic of the utilization metric.

The Compute Optimizer API, AWS Command Line Interface (AWS CLI), and SDKs return utilization metrics using only the Maximum statistic, which is the highest value observed during the specified period.

The Compute Optimizer console displays graphs for some utilization metrics using the Average statistic, which is the value of Sum / SampleCount during the specified period. For more information, see Viewing resource recommendations in the AWS Compute Optimizer User Guide. You can also get averaged utilization metric data for your resources using Amazon CloudWatch. For more information, see the Amazon CloudWatch User Guide.

" }, "value":{ "shape":"MetricValue", @@ -1638,11 +1638,11 @@ "members":{ "status":{ "shape":"Status", - "documentation":"

The new enrollment status of the account.

Accepted options are Active or Inactive. You will get an error if Pending or Failed are specified.

" + "documentation":"

The new enrollment status of the account.

The following status options are available:

The Pending and Failed options cannot be used to update the enrollment status of an account. They are returned in the response of a request to update the enrollment status of an account.

" }, "includeMemberAccounts":{ "shape":"IncludeMemberAccounts", - "documentation":"

Indicates whether to enroll member accounts of the organization if the your account is the management account of an organization.

" + "documentation":"

Indicates whether to enroll member accounts of the organization if the account is the management account of an organization.

" } } }, @@ -1668,7 +1668,7 @@ }, "statistic":{ "shape":"MetricStatistic", - "documentation":"

The statistic of the utilization metric.

The following statistics are available:

" + "documentation":"

The statistic of the utilization metric.

The Compute Optimizer API, AWS Command Line Interface (AWS CLI), and SDKs return utilization metrics using only the Maximum statistic, which is the highest value observed during the specified period.

The Compute Optimizer console displays graphs for some utilization metrics using the Average statistic, which is the value of Sum / SampleCount during the specified period. For more information, see Viewing resource recommendations in the AWS Compute Optimizer User Guide. You can also get averaged utilization metric data for your resources using Amazon CloudWatch. For more information, see the Amazon CloudWatch User Guide.

" }, "value":{ "shape":"MetricValue", diff --git a/botocore/data/config/2014-11-12/service-2.json b/botocore/data/config/2014-11-12/service-2.json index cae71e9d..f976f905 100644 --- a/botocore/data/config/2014-11-12/service-2.json +++ b/botocore/data/config/2014-11-12/service-2.json @@ -236,7 +236,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Deletes the stored query for an AWS account in an AWS Region.

" + "documentation":"

Deletes the stored query for a single AWS account and a single AWS Region.

" }, "DeliverConfigSnapshot":{ "name":"DeliverConfigSnapshot", @@ -875,7 +875,7 @@ {"shape":"ValidationException"}, {"shape":"InvalidNextTokenException"} ], - "documentation":"

List the stored queries for an AWS account in an AWS Region. The default is 100.

" + "documentation":"

Lists the stored queries for a single AWS account and a single AWS Region. The default is 100.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -938,7 +938,7 @@ {"shape":"NoAvailableOrganizationException"}, {"shape":"OrganizationAllFeaturesNotEnabledException"} ], - "documentation":"

Creates and updates the configuration aggregator with the selected source accounts and regions. The source account can be individual account(s) or an organization.

AWS Config should be enabled in source accounts and regions you want to aggregate.

If your source type is an organization, you must be signed in to the master account and all features must be enabled in your organization. AWS Config calls EnableAwsServiceAccess API to enable integration between AWS Config and AWS Organizations.

" + "documentation":"

Creates and updates the configuration aggregator with the selected source accounts and regions. The source account can be individual account(s) or an organization.

accountIds that are passed will be replaced with existing accounts. If you want to add additional accounts into the aggregator, call DescribeAggregator to get the previous accounts and then append new ones.

AWS Config should be enabled in source accounts and regions you want to aggregate.

If your source type is an organization, you must be signed in to the management account or a registered delegated administrator and all the features must be enabled in your organization. If the caller is a management account, AWS Config calls EnableAwsServiceAccess API to enable integration between AWS Config and AWS Organizations. If the caller is a registered delegated administrator, AWS Config calls ListDelegatedAdministrators API to verify whether the caller is a valid delegated administrator.

To register a delegated administrator, see Register a Delegated Administrator in the AWS Config developer guide.

" }, "PutConfigurationRecorder":{ "name":"PutConfigurationRecorder", @@ -985,6 +985,7 @@ {"shape":"InvalidDeliveryChannelNameException"}, {"shape":"NoSuchBucketException"}, {"shape":"InvalidS3KeyPrefixException"}, + {"shape":"InvalidS3KmsKeyArnException"}, {"shape":"InvalidSNSTopicARNException"}, {"shape":"InsufficientDeliveryPolicyException"} ], @@ -1016,7 +1017,8 @@ "errors":[ {"shape":"NoSuchConfigRuleException"}, {"shape":"InvalidParameterValueException"} - ] + ], + "documentation":"

Add or updates the evaluations for process checks. This API checks if the rule is a process check when the name of the AWS Config rule is provided.

" }, "PutOrganizationConfigRule":{ "name":"PutOrganizationConfigRule", @@ -1084,7 +1086,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InsufficientPermissionsException"} ], - "documentation":"

A remediation exception is when a specific resource is no longer considered for auto-remediation. This API adds a new exception or updates an exisiting exception for a specific resource with a specific AWS Config rule.

AWS Config generates a remediation exception when a problem occurs executing a remediation action to a specific resource. Remediation exceptions blocks auto-remediation until the exception is cleared.

" + "documentation":"

A remediation exception is when a specific resource is no longer considered for auto-remediation. This API adds a new exception or updates an existing exception for a specific resource with a specific AWS Config rule.

AWS Config generates a remediation exception when a problem occurs executing a remediation action to a specific resource. Remediation exceptions blocks auto-remediation until the exception is cleared.

" }, "PutResourceConfig":{ "name":"PutResourceConfig", @@ -1128,7 +1130,7 @@ {"shape":"TooManyTagsException"}, {"shape":"ResourceConcurrentModificationException"} ], - "documentation":"

Saves a new query or updates an existing saved query. The QueryName must be unique for an AWS account in an AWS Region. You can create upto 300 queries in an AWS account in an AWS Region.

" + "documentation":"

Saves a new query or updates an existing saved query. The QueryName must be unique for a single AWS account and a single AWS Region. You can create upto 300 queries in a single AWS account and a single AWS Region.

" }, "SelectAggregateResourceConfig":{ "name":"SelectAggregateResourceConfig", @@ -2094,7 +2096,7 @@ }, "arn":{ "shape":"ARN", - "documentation":"

accoun

" + "documentation":"

Amazon Resource Name (ARN) associated with the resource.

" }, "resourceType":{ "shape":"ResourceType", @@ -2281,7 +2283,8 @@ "type":"string", "enum":[ "COMPLIANT", - "NON_COMPLIANT" + "NON_COMPLIANT", + "INSUFFICIENT_DATA" ] }, "ConformancePackConfigRuleNames":{ @@ -2312,11 +2315,11 @@ }, "DeliveryS3Bucket":{ "shape":"DeliveryS3Bucket", - "documentation":"

Conformance pack template that is used to create a pack. The delivery bucket name should start with awsconfigconforms. For example: \"Resource\": \"arn:aws:s3:::your_bucket_name/*\".

" + "documentation":"

Amazon S3 bucket where AWS Config stores conformance pack templates.

This field is optional.

" }, "DeliveryS3KeyPrefix":{ "shape":"DeliveryS3KeyPrefix", - "documentation":"

The prefix for the Amazon S3 bucket.

" + "documentation":"

The prefix for the Amazon S3 bucket.

This field is optional.

" }, "ConformancePackInputParameters":{ "shape":"ConformancePackInputParameters", @@ -2790,6 +2793,10 @@ "shape":"String", "documentation":"

The prefix for the specified Amazon S3 bucket.

" }, + "s3KmsKeyArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the AWS Key Management Service (KMS) customer managed key (CMK) used to encrypt objects delivered by AWS Config. Must belong to the same Region as the destination S3 bucket.

" + }, "snsTopicARN":{ "shape":"String", "documentation":"

The Amazon Resource Name (ARN) of the Amazon SNS topic to which AWS Config sends notifications about configuration changes.

If you choose a topic from another account, the topic must have policies that grant access permissions to AWS Config. For more information, see Permissions for the Amazon SNS Topic in the AWS Config Developer Guide.

" @@ -3714,12 +3721,28 @@ "OrderingTimestamp" ], "members":{ - "ComplianceResourceType":{"shape":"StringWithCharLimit256"}, - "ComplianceResourceId":{"shape":"BaseResourceId"}, - "ComplianceType":{"shape":"ComplianceType"}, - "Annotation":{"shape":"StringWithCharLimit256"}, - "OrderingTimestamp":{"shape":"OrderingTimestamp"} - } + "ComplianceResourceType":{ + "shape":"StringWithCharLimit256", + "documentation":"

The evaluated compliance resource type. AWS Config accepts AWS::::Account resource type.

" + }, + "ComplianceResourceId":{ + "shape":"BaseResourceId", + "documentation":"

The evaluated compliance resource ID. AWS Config accepts only AWS account ID.

" + }, + "ComplianceType":{ + "shape":"ComplianceType", + "documentation":"

The compliance of the AWS resource. The valid values are COMPLIANT, NON_COMPLIANT, and NOT_APPLICABLE.

" + }, + "Annotation":{ + "shape":"StringWithCharLimit256", + "documentation":"

Supplementary information about the reason of compliance. For example, this task was completed on a specific date.

" + }, + "OrderingTimestamp":{ + "shape":"OrderingTimestamp", + "documentation":"

The time when the compliance was recorded.

" + } + }, + "documentation":"

Identifies an AWS resource and indicates whether it complies with the AWS Config rule that it was evaluated against.

" }, "FailedDeleteRemediationExceptionsBatch":{ "type":"structure", @@ -4430,6 +4453,13 @@ "documentation":"

The specified Amazon S3 key prefix is not valid.

", "exception":true }, + "InvalidS3KmsKeyArnException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The specified Amazon KMS Key ARN is not valid.

", + "exception":true + }, "InvalidSNSTopicARNException":{ "type":"structure", "members":{ @@ -4485,7 +4515,7 @@ }, "Limit":{ "shape":"Limit", - "documentation":"

The maximum number of resource identifiers returned on each page. The default is 100. You cannot specify a number greater than 100. If you specify 0, AWS Config uses the default.

" + "documentation":"

The maximum number of resource identifiers returned on each page. You cannot specify a number greater than 100. If you specify 0, AWS Config uses the default.

" }, "NextToken":{ "shape":"NextToken", @@ -4854,7 +4884,7 @@ "type":"structure", "members":{ }, - "documentation":"

For PutConfigAggregator API, no permission to call EnableAWSServiceAccess API.

For all OrganizationConfigRule and OrganizationConformancePack APIs, AWS Config throws an exception if APIs are called from member accounts. All APIs must be called from organization master account.

", + "documentation":"

For PutConfigurationAggregator API, you can see this exception for the following reasons:

For all OrganizationConfigRule and OrganizationConformancePack APIs, AWS Config throws an exception if APIs are called from member accounts. All APIs must be called from organization master account.

", "exception":true }, "OrganizationAggregationSource":{ @@ -5001,11 +5031,11 @@ }, "DeliveryS3Bucket":{ "shape":"DeliveryS3Bucket", - "documentation":"

Location of an Amazon S3 bucket where AWS Config can deliver evaluation results and conformance pack template that is used to create a pack.

" + "documentation":"

Amazon S3 bucket where AWS Config stores conformance pack templates.

This field is optional.

" }, "DeliveryS3KeyPrefix":{ "shape":"DeliveryS3KeyPrefix", - "documentation":"

Any folder structure you want to add to an Amazon S3 bucket.

" + "documentation":"

Any folder structure you want to add to an Amazon S3 bucket.

This field is optional.

" }, "ConformancePackInputParameters":{ "shape":"ConformancePackInputParameters", @@ -5416,11 +5446,11 @@ }, "DeliveryS3Bucket":{ "shape":"DeliveryS3Bucket", - "documentation":"

AWS Config stores intermediate files while processing conformance pack template.

" + "documentation":"

Amazon S3 bucket where AWS Config stores conformance pack templates.

This field is optional.

" }, "DeliveryS3KeyPrefix":{ "shape":"DeliveryS3KeyPrefix", - "documentation":"

The prefix for the Amazon S3 bucket.

" + "documentation":"

The prefix for the Amazon S3 bucket.

This field is optional.

" }, "ConformancePackInputParameters":{ "shape":"ConformancePackInputParameters", @@ -5484,8 +5514,14 @@ "ExternalEvaluation" ], "members":{ - "ConfigRuleName":{"shape":"ConfigRuleName"}, - "ExternalEvaluation":{"shape":"ExternalEvaluation"} + "ConfigRuleName":{ + "shape":"ConfigRuleName", + "documentation":"

The name of the AWS Config rule.

" + }, + "ExternalEvaluation":{ + "shape":"ExternalEvaluation", + "documentation":"

An ExternalEvaluation object that provides details about compliance.

" + } } }, "PutExternalEvaluationResponse":{ @@ -5542,11 +5578,11 @@ }, "DeliveryS3Bucket":{ "shape":"DeliveryS3Bucket", - "documentation":"

Location of an Amazon S3 bucket where AWS Config can deliver evaluation results. AWS Config stores intermediate files while processing conformance pack template.

The delivery bucket name should start with awsconfigconforms. For example: \"Resource\": \"arn:aws:s3:::your_bucket_name/*\". For more information, see Permissions for cross account bucket access.

" + "documentation":"

Amazon S3 bucket where AWS Config stores conformance pack templates.

This field is optional.

" }, "DeliveryS3KeyPrefix":{ "shape":"DeliveryS3KeyPrefix", - "documentation":"

The prefix for the Amazon S3 bucket.

" + "documentation":"

The prefix for the Amazon S3 bucket.

This field is optional.

" }, "ConformancePackInputParameters":{ "shape":"ConformancePackInputParameters", @@ -5680,7 +5716,7 @@ "members":{ "StoredQuery":{ "shape":"StoredQuery", - "documentation":"

A list of StoredQuery objects. The mandatory fields are QueryName and Expression.

" + "documentation":"

A list of StoredQuery objects. The mandatory fields are QueryName and Expression.

When you are creating a query, you must provide a query name and an expression. When you are updating a query, you must provide a query name but updating the description is optional.

" }, "Tags":{ "shape":"TagsList", @@ -5693,7 +5729,7 @@ "members":{ "QueryArn":{ "shape":"QueryArn", - "documentation":"

Amazon Resource Name (ARN) of the query. For example, arn:partition:service:region:account-id:resource-type/resource-id.

" + "documentation":"

Amazon Resource Name (ARN) of the query. For example, arn:partition:service:region:account-id:resource-type/resource-name/resource-id.

" } } }, @@ -5763,7 +5799,7 @@ }, "resourceTypes":{ "shape":"ResourceTypeList", - "documentation":"

A comma-separated list that specifies the types of AWS resources for which AWS Config records configuration changes (for example, AWS::EC2::Instance or AWS::CloudTrail::Trail).

Before you can set this option to true, you must set the allSupported option to false.

If you set this option to true, when AWS Config adds support for a new type of resource, it will not record resources of that type unless you manually add that type to your recording group.

For a list of valid resourceTypes values, see the resourceType Value column in Supported AWS Resource Types.

" + "documentation":"

A comma-separated list that specifies the types of AWS resources for which AWS Config records configuration changes (for example, AWS::EC2::Instance or AWS::CloudTrail::Trail).

To record all configuration changes, you must set the allSupported option to false.

If you set this option to true, when AWS Config adds support for a new type of resource, it will not record resources of that type unless you manually add that type to your recording group.

For a list of valid resourceTypes values, see the resourceType Value column in Supported AWS Resource Types.

" } }, "documentation":"

Specifies the types of AWS resource for which AWS Config records configuration changes.

In the recording group, you specify whether all supported types or specific types of resources are recorded.

By default, AWS Config records configuration changes for all supported types of regional resources that AWS Config discovers in the region in which it is running. Regional resources are tied to a region and can be used only in that region. Examples of regional resources are EC2 instances and EBS volumes.

You can also have AWS Config record configuration changes for supported types of global resources (for example, IAM resources). Global resources are not tied to an individual region and can be used in all regions.

The configuration details for any global resource are the same in all regions. If you customize AWS Config in multiple regions to record global resources, it will create multiple configuration items each time a global resource changes: one configuration item for each region. These configuration items will contain identical data. To prevent duplicate configuration items, you should consider customizing AWS Config in only one region to record global resources, unless you want the configuration items to be available in multiple regions.

If you don't want AWS Config to record all resources, you can specify which types of resources it will record with the resourceTypes parameter.

For a list of supported resource types, see Supported Resource Types.

For more information, see Selecting Which Resources AWS Config Records.

" @@ -6663,7 +6699,7 @@ }, "QueryArn":{ "shape":"QueryArn", - "documentation":"

Amazon Resource Name (ARN) of the query. For example, arn:partition:service:region:account-id:resource-type/resource-id.

", + "documentation":"

Amazon Resource Name (ARN) of the query. For example, arn:partition:service:region:account-id:resource-type/resource-name/resource-id.

", "box":true }, "QueryName":{ @@ -6697,7 +6733,7 @@ }, "QueryArn":{ "shape":"QueryArn", - "documentation":"

Amazon Resource Name (ARN) of the query. For example, arn:partition:service:region:account-id:resource-type/resource-id.

" + "documentation":"

Amazon Resource Name (ARN) of the query. For example, arn:partition:service:region:account-id:resource-type/resource-name/resource-id.

" }, "QueryName":{ "shape":"QueryName", @@ -6864,7 +6900,7 @@ "type":"structure", "members":{ }, - "documentation":"

The requested action is not valid.

", + "documentation":"

The requested action is not valid.

For PutStoredQuery, you will see this exception if there are missing required fields or if the input value fails the validation, or if you are trying to create more than 300 queries.

For GetStoredQuery, ListStoredQuery, and DeleteStoredQuery you will see this exception if there are missing required fields or if the input value fails the validation.

", "exception":true }, "Value":{"type":"string"}, diff --git a/botocore/data/connect/2017-08-08/service-2.json b/botocore/data/connect/2017-08-08/service-2.json index 45937722..913ff967 100644 --- a/botocore/data/connect/2017-08-08/service-2.json +++ b/botocore/data/connect/2017-08-08/service-2.json @@ -1969,7 +1969,7 @@ }, "Concurrency":{ "type":"integer", - "max":5, + "max":10, "min":1 }, "ContactFlow":{ diff --git a/botocore/data/cur/2017-01-06/service-2.json b/botocore/data/cur/2017-01-06/service-2.json index 84fe1318..eee4ed9a 100644 --- a/botocore/data/cur/2017-01-06/service-2.json +++ b/botocore/data/cur/2017-01-06/service-2.json @@ -115,6 +115,11 @@ "member":{"shape":"AdditionalArtifact"}, "documentation":"

A list of additional artifacts.

" }, + "BillingViewArn":{ + "type":"string", + "max":128, + "pattern":"(arn:aws(-cn)?:billing::[0-9]{12}:billingview/)?[a-zA-Z0-9_\\+=\\.\\-@].{1,30}" + }, "CompressionFormat":{ "type":"string", "documentation":"

The compression format that AWS uses for the report.

", @@ -270,6 +275,10 @@ "ReportVersioning":{ "shape":"ReportVersioning", "documentation":"

Whether you want Amazon Web Services to overwrite the previous version of each report or to deliver the report in addition to the previous versions.

" + }, + "BillingViewArn":{ + "shape":"BillingViewArn", + "documentation":"

The Amazon resource name of the billing view. You can get this value by using the billing view service public APIs.

" } }, "documentation":"

The definition of AWS Cost and Usage Report. You can specify the report name, time unit, report format, compression format, S3 bucket, additional artifacts, and schema elements in the definition.

" diff --git a/botocore/data/databrew/2017-07-25/service-2.json b/botocore/data/databrew/2017-07-25/service-2.json index 30a3b1d7..29240a5f 100644 --- a/botocore/data/databrew/2017-07-25/service-2.json +++ b/botocore/data/databrew/2017-07-25/service-2.json @@ -25,7 +25,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ValidationException"} ], - "documentation":"

Deletes one or more versions of a recipe at a time.

The entire request will be rejected if:

The request will complete successfully, but with partial failures, if:

The LATEST_WORKING version will only be deleted if the recipe has no other versions. If you try to delete LATEST_WORKING while other versions exist (or if they can't be deleted), then LATEST_WORKING will be listed as partial failure in the response.

" + "documentation":"

Deletes one or more versions of a recipe at a time.

The entire request will be rejected if:

The request will complete successfully, but with partial failures, if:

The LATEST_WORKING version will only be deleted if the recipe has no other versions. If you try to delete LATEST_WORKING while other versions exist (or if they can't be deleted), then LATEST_WORKING will be listed as partial failure in the response.

" }, "CreateDataset":{ "name":"CreateDataset", @@ -225,6 +225,20 @@ ], "documentation":"

Returns the definition of a specific DataBrew job.

" }, + "DescribeJobRun":{ + "name":"DescribeJobRun", + "http":{ + "method":"GET", + "requestUri":"/jobs/{name}/jobRun/{runId}" + }, + "input":{"shape":"DescribeJobRunRequest"}, + "output":{"shape":"DescribeJobRunResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Represents one run of a DataBrew job.

" + }, "DescribeProject":{ "name":"DescribeProject", "http":{ @@ -725,6 +739,10 @@ "shape":"DatasetName", "documentation":"

The name of the dataset to be created. Valid characters are alphanumeric (A-Z, a-z, 0-9), hyphen (-), period (.), and space.

" }, + "Format":{ + "shape":"InputFormat", + "documentation":"

Specifies the file format of a dataset created from an S3 file or folder.

" + }, "FormatOptions":{"shape":"FormatOptions"}, "Input":{"shape":"Input"}, "Tags":{ @@ -762,7 +780,7 @@ }, "EncryptionMode":{ "shape":"EncryptionMode", - "documentation":"

The encryption mode for the job, which can be one of the following:

" + "documentation":"

The encryption mode for the job, which can be one of the following:

" }, "Name":{ "shape":"JobName", @@ -792,6 +810,10 @@ "Timeout":{ "shape":"Timeout", "documentation":"

The job's timeout in minutes. A job that attempts to run longer than this timeout period ends with a status of TIMEOUT.

" + }, + "JobSample":{ + "shape":"JobSample", + "documentation":"

Sample configuration for profile jobs only. Determines the number of rows on which the profile job will be executed. If a JobSample value is not provided, the default value will be used. The default value is CUSTOM_ROWS for the mode parameter and 20000 for the size parameter.

" } } }, @@ -865,7 +887,7 @@ }, "EncryptionMode":{ "shape":"EncryptionMode", - "documentation":"

The encryption mode for the job, which can be one of the following:

" + "documentation":"

The encryption mode for the job, which can be one of the following:

" }, "Name":{ "shape":"JobName", @@ -998,6 +1020,10 @@ "Delimiter":{ "shape":"Delimiter", "documentation":"

A single character that specifies the delimiter being used in the Csv file.

" + }, + "HeaderRow":{ + "shape":"HeaderRow", + "documentation":"

A variable that specifies whether the first row in the file will be parsed as the header. If false, column names will be auto-generated.

" } }, "documentation":"

Options that define how DataBrew will read a Csv file when creating a dataset from that file.

" @@ -1010,7 +1036,7 @@ "documentation":"

A single character that specifies the delimiter used to create Csv job output.

" } }, - "documentation":"

Options that define how DataBrew will write a Csv file a.

" + "documentation":"

Options that define how DataBrew will write a Csv file.

" }, "DataCatalogInputDefinition":{ "type":"structure", @@ -1066,6 +1092,10 @@ "shape":"DatasetName", "documentation":"

The unique name of the dataset.

" }, + "Format":{ + "shape":"InputFormat", + "documentation":"

Specifies the file format of a dataset created from an S3 file or folder.

" + }, "FormatOptions":{ "shape":"FormatOptions", "documentation":"

Options that define how DataBrew interprets the data in the dataset.

" @@ -1269,6 +1299,10 @@ "shape":"DatasetName", "documentation":"

The name of the dataset.

" }, + "Format":{ + "shape":"InputFormat", + "documentation":"

Specifies the file format of a dataset created from an S3 file or folder.

" + }, "FormatOptions":{"shape":"FormatOptions"}, "Input":{"shape":"Input"}, "LastModifiedDate":{ @@ -1327,7 +1361,7 @@ }, "EncryptionMode":{ "shape":"EncryptionMode", - "documentation":"

The encryption mode for the job, which can be one of the following:

" + "documentation":"

The encryption mode for the job, which can be one of the following:

" }, "Name":{ "shape":"JobName", @@ -1381,6 +1415,94 @@ "Timeout":{ "shape":"Timeout", "documentation":"

The job's timeout in minutes. A job that attempts to run longer than this timeout period ends with a status of TIMEOUT.

" + }, + "JobSample":{ + "shape":"JobSample", + "documentation":"

Sample configuration for profile jobs only. Determines the number of rows on which the profile job will be executed.

" + } + } + }, + "DescribeJobRunRequest":{ + "type":"structure", + "required":[ + "Name", + "RunId" + ], + "members":{ + "Name":{ + "shape":"JobName", + "documentation":"

The name of the job being processed during this run.

", + "location":"uri", + "locationName":"name" + }, + "RunId":{ + "shape":"JobRunId", + "documentation":"

The unique identifier of the job run.

", + "location":"uri", + "locationName":"runId" + } + } + }, + "DescribeJobRunResponse":{ + "type":"structure", + "required":["JobName"], + "members":{ + "Attempt":{ + "shape":"Attempt", + "documentation":"

The number of times that DataBrew has attempted to run the job.

" + }, + "CompletedOn":{ + "shape":"Date", + "documentation":"

The date and time when the job completed processing.

" + }, + "DatasetName":{ + "shape":"DatasetName", + "documentation":"

The name of the dataset for the job to process.

" + }, + "ErrorMessage":{ + "shape":"JobRunErrorMessage", + "documentation":"

A message indicating an error (if any) that was encountered when the job ran.

" + }, + "ExecutionTime":{ + "shape":"ExecutionTime", + "documentation":"

The amount of time, in seconds, during which the job run consumed resources.

" + }, + "JobName":{ + "shape":"JobName", + "documentation":"

The name of the job being processed during this run.

" + }, + "RunId":{ + "shape":"JobRunId", + "documentation":"

The unique identifier of the job run.

" + }, + "State":{ + "shape":"JobRunState", + "documentation":"

The current state of the job run entity itself.

" + }, + "LogSubscription":{ + "shape":"LogSubscription", + "documentation":"

The current status of Amazon CloudWatch logging for the job run.

" + }, + "LogGroupName":{ + "shape":"LogGroupName", + "documentation":"

The name of an Amazon CloudWatch log group, where the job writes diagnostic messages when it runs.

" + }, + "Outputs":{ + "shape":"OutputList", + "documentation":"

One or more output artifacts from a job run.

" + }, + "RecipeReference":{"shape":"RecipeReference"}, + "StartedBy":{ + "shape":"StartedBy", + "documentation":"

The Amazon Resource Name (ARN) of the user who started the job run.

" + }, + "StartedOn":{ + "shape":"Date", + "documentation":"

The date and time when the job run began.

" + }, + "JobSample":{ + "shape":"JobSample", + "documentation":"

Sample configuration for profile jobs only. Determines the number of rows on which the profile job will be executed. If a JobSample value is not provided, the default value will be used. The default value is CUSTOM_ROWS for the mode parameter and 20000 for the size parameter.

" } } }, @@ -1611,6 +1733,10 @@ "SheetIndexes":{ "shape":"SheetIndexList", "documentation":"

Specifies one or more sheet numbers in the Excel file, which will be included in the dataset.

" + }, + "HeaderRow":{ + "shape":"HeaderRow", + "documentation":"

A variable that specifies whether the first row in the file will be parsed as the header. If false, column names will be auto-generated.

" } }, "documentation":"

Options that define how DataBrew will interpret a Microsoft Excel file, when creating a dataset from that file.

" @@ -1634,6 +1760,7 @@ }, "documentation":"

Options that define the structure of either Csv, Excel, or JSON input.

" }, + "HeaderRow":{"type":"boolean"}, "HiddenColumnList":{ "type":"list", "member":{"shape":"ColumnName"} @@ -1652,6 +1779,15 @@ }, "documentation":"

Information on how DataBrew can find data, in either the AWS Glue Data Catalog or Amazon S3.

" }, + "InputFormat":{ + "type":"string", + "enum":[ + "CSV", + "JSON", + "PARQUET", + "EXCEL" + ] + }, "InternalServerException":{ "type":"structure", "members":{ @@ -1744,6 +1880,10 @@ "Tags":{ "shape":"TagMap", "documentation":"

Metadata tags that have been applied to the job.

" + }, + "JobSample":{ + "shape":"JobSample", + "documentation":"

Sample configuration for profile jobs only. Determines the number of rows on which the profile job will be executed. If a JobSample value is not provided, the default value will be used. The default value is CUSTOM_ROWS for the mode parameter and 20000 for the size parameter.

" } }, "documentation":"

Represents all of the attributes of a DataBrew job.

" @@ -1820,6 +1960,10 @@ "StartedOn":{ "shape":"Date", "documentation":"

The date and time when the job run began.

" + }, + "JobSample":{ + "shape":"JobSample", + "documentation":"

Sample configuration for profile jobs only. Determines the number of rows on which the profile job will be executed. If a JobSample value is not provided, the default value will be used. The default value is CUSTOM_ROWS for the mode parameter and 20000 for the size parameter.

" } }, "documentation":"

Represents one run of a DataBrew job.

" @@ -1846,6 +1990,21 @@ "TIMEOUT" ] }, + "JobSample":{ + "type":"structure", + "members":{ + "Mode":{ + "shape":"SampleMode", + "documentation":"

Determines whether the profile job will be executed on the entire dataset or on a specified number of rows. Must be one of the following:

" + }, + "Size":{ + "shape":"JobSize", + "documentation":"

Size parameter is only required when the mode is CUSTOM_ROWS. Profile job will be executed on the the specified number of rows. The maximum value for size is Long.MAX_VALUE.

Long.MAX_VALUE = 9223372036854775807

" + } + }, + "documentation":"

Sample configuration for Profile Jobs only. Determines the number of rows on which the Profile job will be executed. If a JobSample value is not provided for profile jobs, the default value will be used. The default value is CUSTOM_ROWS for the mode parameter and 20000 for the size parameter.

" + }, + "JobSize":{"type":"long"}, "JobType":{ "type":"string", "enum":[ @@ -2253,7 +2412,7 @@ }, "ParameterValue":{ "type":"string", - "max":8192, + "max":12288, "min":1 }, "Preview":{"type":"boolean"}, @@ -2554,6 +2713,13 @@ }, "documentation":"

Represents the sample size and sampling type for DataBrew to use for interactive data analysis.

" }, + "SampleMode":{ + "type":"string", + "enum":[ + "FULL_DATASET", + "CUSTOM_ROWS" + ] + }, "SampleSize":{ "type":"integer", "max":5000, @@ -2911,6 +3077,10 @@ "location":"uri", "locationName":"name" }, + "Format":{ + "shape":"InputFormat", + "documentation":"

Specifies the file format of a dataset created from an S3 file or folder.

" + }, "FormatOptions":{"shape":"FormatOptions"}, "Input":{"shape":"Input"} } @@ -2939,7 +3109,7 @@ }, "EncryptionMode":{ "shape":"EncryptionMode", - "documentation":"

The encryption mode for the job, which can be one of the following:

" + "documentation":"

The encryption mode for the job, which can be one of the following:

" }, "Name":{ "shape":"JobName", @@ -2967,6 +3137,10 @@ "Timeout":{ "shape":"Timeout", "documentation":"

The job's timeout in minutes. A job that attempts to run longer than this timeout period ends with a status of TIMEOUT.

" + }, + "JobSample":{ + "shape":"JobSample", + "documentation":"

Sample configuration for Profile Jobs only. Determines the number of rows on which the Profile job will be executed. If a JobSample value is not provided for profile jobs, the default value will be used. The default value is CUSTOM_ROWS for the mode parameter and 20000 for the size parameter.

" } } }, @@ -3028,7 +3202,7 @@ }, "EncryptionMode":{ "shape":"EncryptionMode", - "documentation":"

The encryption mode for the job, which can be one of the following:

" + "documentation":"

The encryption mode for the job, which can be one of the following:

" }, "Name":{ "shape":"JobName", diff --git a/botocore/data/dataexchange/2017-07-25/service-2.json b/botocore/data/dataexchange/2017-07-25/service-2.json index 7074c6b0..b62c05d2 100644 --- a/botocore/data/dataexchange/2017-07-25/service-2.json +++ b/botocore/data/dataexchange/2017-07-25/service-2.json @@ -1379,6 +1379,50 @@ "RevisionId" ] }, + "ExportRevisionsToS3RequestDetails": { + "type": "structure", + "members": { + "DataSetId": { + "shape": "Id", + "documentation": "

The unique identifier for the data set associated with this export job.

" + }, + "Encryption": { + "shape": "ExportServerSideEncryption", + "documentation": "

Encryption configuration for the export job.

" + }, + "RevisionDestinations": { + "shape": "ListOfRevisionDestinationEntry", + "documentation": "

The destination for the revision.

" + } + }, + "documentation": "

Details of the operation to be performed by the job.

", + "required": [ + "RevisionDestinations", + "DataSetId" + ] + }, + "ExportRevisionsToS3ResponseDetails": { + "type": "structure", + "members": { + "DataSetId": { + "shape": "Id", + "documentation": "

The unique identifier for the data set associated with this export job.

" + }, + "Encryption": { + "shape": "ExportServerSideEncryption", + "documentation": "

Encryption configuration of the export job.

" + }, + "RevisionDestinations": { + "shape": "ListOfRevisionDestinationEntry", + "documentation": "

The destination in Amazon S3 where the revision is exported.

" + } + }, + "documentation": "

Details about the export revisions to Amazon S3 response.

", + "required": [ + "RevisionDestinations", + "DataSetId" + ] + }, "ExportServerSideEncryption": { "type": "structure", "members": { @@ -2034,6 +2078,13 @@ "shape": "AssetSourceEntry" } }, + "ListOfRevisionDestinationEntry": { + "type": "list", + "documentation": "

The destination where the assets in the revision will be exported.

", + "member": { + "shape": "RevisionDestinationEntry" + } + }, "ListRevisionAssetsRequest": { "type": "structure", "members": { @@ -2147,6 +2198,10 @@ "shape": "ExportAssetsToS3RequestDetails", "documentation": "

Details about the export to Amazon S3 request.

" }, + "ExportRevisionsToS3": { + "shape": "ExportRevisionsToS3RequestDetails", + "documentation": "

Details about the export to Amazon S3 request.

" + }, "ImportAssetFromSignedUrl": { "shape": "ImportAssetFromSignedUrlRequestDetails", "documentation": "

Details about the import from signed URL request.

" @@ -2203,6 +2258,10 @@ "shape": "ExportAssetsToS3ResponseDetails", "documentation": "

Details for the export to Amazon S3 response.

" }, + "ExportRevisionsToS3": { + "shape": "ExportRevisionsToS3ResponseDetails", + "documentation": "

Details for the export revisions to Amazon S3 response.

" + }, "ImportAssetFromSignedUrl": { "shape": "ImportAssetFromSignedUrlResponseDetails", "documentation": "

Details for the import from signed URL response.

" @@ -2214,6 +2273,28 @@ }, "documentation": "

Details for the response.

" }, + "RevisionDestinationEntry": { + "type": "structure", + "members": { + "Bucket": { + "shape": "__string", + "documentation": "

The S3 bucket that is the destination for the assets in the revision.

" + }, + "KeyPattern": { + "shape": "__string", + "documentation": "

A string representing the pattern for generated names of the individual assets in the revision. For more information about key patterns, see Key patterns when exporting revisions.

" + }, + "RevisionId": { + "shape": "Id", + "documentation": "

The unique identifier for the revision.

" + } + }, + "documentation": "

The destination where the assets in the revision will be exported.

", + "required": [ + "Bucket", + "RevisionId" + ] + }, "RevisionEntry": { "type": "structure", "members": { @@ -2383,7 +2464,8 @@ "IMPORT_ASSETS_FROM_S3", "IMPORT_ASSET_FROM_SIGNED_URL", "EXPORT_ASSETS_TO_S3", - "EXPORT_ASSET_TO_SIGNED_URL" + "EXPORT_ASSET_TO_SIGNED_URL", + "EXPORT_REVISIONS_TO_S3" ] }, "UntagResourceRequest": { @@ -2704,7 +2786,7 @@ "type": "string", "min": 24, "max": 24, - "pattern": "/^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$/" + "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$" } }, "documentation": "

AWS Data Exchange is a service that makes it easy for AWS customers to exchange data in the cloud. You can use the AWS Data Exchange APIs to create, update, manage, and access file-based data set in the AWS Cloud.

As a subscriber, you can view and access the data sets that you have an entitlement to through a subscription. You can use the APIS to download or copy your entitled data sets to Amazon S3 for use across a variety of AWS analytics and machine learning services.

As a provider, you can create and manage your data sets that you would like to publish to a product. Being able to package and provide your data sets into products requires a few steps to determine eligibility. For more information, visit the AWS Data Exchange User Guide.

A data set is a collection of data that can be changed or updated over time. Data sets can be updated using revisions, which represent a new version or incremental change to a data set. A revision contains one or more assets. An asset in AWS Data Exchange is a piece of data that can be stored as an Amazon S3 object. The asset can be a structured data file, an image file, or some other data file. Jobs are asynchronous import or export operations used to create or copy assets.

" diff --git a/botocore/data/datasync/2018-11-09/service-2.json b/botocore/data/datasync/2018-11-09/service-2.json index bfbe17ba..0728ca3d 100644 --- a/botocore/data/datasync/2018-11-09/service-2.json +++ b/botocore/data/datasync/2018-11-09/service-2.json @@ -68,7 +68,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Creates an endpoint for an Amazon FSx for Windows file system.

" + "documentation":"

Creates an endpoint for an Amazon FSx for Windows File Server file system.

" }, "CreateLocationNfs":{ "name":"CreateLocationNfs", @@ -138,7 +138,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Creates a task. A task is a set of two locations (source and destination) and a set of Options that you use to control the behavior of a task. If you don't specify Options when you create a task, AWS DataSync populates them with service defaults.

When you create a task, it first enters the CREATING state. During CREATING AWS DataSync attempts to mount the on-premises Network File System (NFS) location. The task transitions to the AVAILABLE state without waiting for the AWS location to become mounted. If required, AWS DataSync mounts the AWS location before each task execution.

If an agent that is associated with a source (NFS) location goes offline, the task transitions to the UNAVAILABLE status. If the status of the task remains in the CREATING status for more than a few minutes, it means that your agent might be having trouble mounting the source NFS file system. Check the task's ErrorCode and ErrorDetail. Mount issues are often caused by either a misconfigured firewall or a mistyped NFS server hostname.

" + "documentation":"

Creates a task.

A task includes a source location and a destination location, and a configuration that specifies how data is transferred. A task always transfers data from the source location to the destination location. The configuration specifies options such as task scheduling, bandwidth limits, etc. A task is the complete definition of a data transfer.

When you create a task that transfers data between AWS services in different AWS Regions, one of the two locations that you specify must reside in the Region where DataSync is being used. The other location must be specified in a different Region.

You can transfer data between commercial AWS Regions except for China, or between AWS GovCloud (US-East and US-West) Regions.

When you use DataSync to copy files or objects between AWS Regions, you pay for data transfer between Regions. This is billed as data transfer OUT from your source Region to your destination Region. For more information, see Data Transfer pricing.

" }, "DeleteAgent":{ "name":"DeleteAgent", @@ -222,7 +222,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Returns metadata, such as the path information about an Amazon FSx for Windows location.

" + "documentation":"

Returns metadata, such as the path information about an Amazon FSx for Windows File Server location.

" }, "DescribeLocationNfs":{ "name":"DescribeLocationNfs", @@ -434,6 +434,48 @@ ], "documentation":"

Updates the name of an agent.

" }, + "UpdateLocationNfs":{ + "name":"UpdateLocationNfs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateLocationNfsRequest"}, + "output":{"shape":"UpdateLocationNfsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} + ], + "documentation":"

Updates some of the parameters of a previously created location for Network File System (NFS) access. For information about creating an NFS location, see create-nfs-location.

" + }, + "UpdateLocationObjectStorage":{ + "name":"UpdateLocationObjectStorage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateLocationObjectStorageRequest"}, + "output":{"shape":"UpdateLocationObjectStorageResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} + ], + "documentation":"

Updates some of the parameters of a previously created location for self-managed object storage server access. For information about creating a self-managed object storage location, see create-object-location.

" + }, + "UpdateLocationSmb":{ + "name":"UpdateLocationSmb", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateLocationSmbRequest"}, + "output":{"shape":"UpdateLocationSmbResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} + ], + "documentation":"

Updates some of the parameters of a previously created location for Server Message Block (SMB) file system access. For information about creating an SMB location, see create-smb-location.

" + }, "UpdateTask":{ "name":"UpdateTask", "http":{ @@ -460,7 +502,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Updates execution of a task.

You can modify bandwidth throttling for a task execution that is running or queued. For more information, see Adjusting Bandwidth Throttling for a Task Execution.

The only Option that can be modified by UpdateTaskExecution is BytesPerSecond .

" + "documentation":"

Updates execution of a task.

You can modify bandwidth throttling for a task execution that is running or queued. For more information, see Adjusting Bandwidth Throttling for a Task Execution.

The only Option that can be modified by UpdateTaskExecution is BytesPerSecond .

" } }, "shapes":{ @@ -624,15 +666,15 @@ "members":{ "Subdirectory":{ "shape":"FsxWindowsSubdirectory", - "documentation":"

A subdirectory in the location’s path. This subdirectory in the Amazon FSx for Windows file system is used to read data from the Amazon FSx for Windows source location or write data to the FSx for Windows destination.

" + "documentation":"

A subdirectory in the location’s path. This subdirectory in the Amazon FSx for Windows File Server file system is used to read data from the Amazon FSx for Windows File Server source location or write data to the FSx for Windows File Server destination.

" }, "FsxFilesystemArn":{ "shape":"FsxFilesystemArn", - "documentation":"

The Amazon Resource Name (ARN) for the FSx for Windows file system.

" + "documentation":"

The Amazon Resource Name (ARN) for the FSx for Windows File Server file system.

" }, "SecurityGroupArns":{ "shape":"Ec2SecurityGroupArnList", - "documentation":"

The Amazon Resource Names (ARNs) of the security groups that are to use to configure the FSx for Windows file system.

" + "documentation":"

The Amazon Resource Names (ARNs) of the security groups that are to use to configure the FSx for Windows File Server file system.

" }, "Tags":{ "shape":"InputTagList", @@ -640,15 +682,15 @@ }, "User":{ "shape":"SmbUser", - "documentation":"

The user who has the permissions to access files and folders in the FSx for Windows file system.

" + "documentation":"

The user who has the permissions to access files and folders in the FSx for Windows File Server file system.

" }, "Domain":{ "shape":"SmbDomain", - "documentation":"

The name of the Windows domain that the FSx for Windows server belongs to.

" + "documentation":"

The name of the Windows domain that the FSx for Windows File Server belongs to.

" }, "Password":{ "shape":"SmbPassword", - "documentation":"

The password of the user who has the permissions to access files and folders in the FSx for Windows file system.

" + "documentation":"

The password of the user who has the permissions to access files and folders in the FSx for Windows File Server file system.

" } } }, @@ -657,7 +699,7 @@ "members":{ "LocationArn":{ "shape":"LocationArn", - "documentation":"

The Amazon Resource Name (ARN) of the FSx for Windows file system location that is created.

" + "documentation":"

The Amazon Resource Name (ARN) of the FSx for Windows File Server file system location that is created.

" } } }, @@ -1036,7 +1078,7 @@ "members":{ "LocationArn":{ "shape":"LocationArn", - "documentation":"

The Amazon Resource Name (ARN) of the FSx for Windows location to describe.

" + "documentation":"

The Amazon Resource Name (ARN) of the FSx for Windows File Server location to describe.

" } } }, @@ -1045,27 +1087,27 @@ "members":{ "LocationArn":{ "shape":"LocationArn", - "documentation":"

The Amazon Resource Name (ARN) of the FSx for Windows location that was described.

" + "documentation":"

The Amazon Resource Name (ARN) of the FSx for Windows File Server location that was described.

" }, "LocationUri":{ "shape":"LocationUri", - "documentation":"

The URL of the FSx for Windows location that was described.

" + "documentation":"

The URL of the FSx for Windows File Server location that was described.

" }, "SecurityGroupArns":{ "shape":"Ec2SecurityGroupArnList", - "documentation":"

The Amazon Resource Names (ARNs) of the security groups that are configured for the FSx for Windows file system.

" + "documentation":"

The Amazon Resource Names (ARNs) of the security groups that are configured for the FSx for Windows File Server file system.

" }, "CreationTime":{ "shape":"Time", - "documentation":"

The time that the FSx for Windows location was created.

" + "documentation":"

The time that the FSx for Windows File Server location was created.

" }, "User":{ "shape":"SmbUser", - "documentation":"

The user who has the permissions to access files and folders in the FSx for Windows file system.

" + "documentation":"

The user who has the permissions to access files and folders in the FSx for Windows File Server file system.

" }, "Domain":{ "shape":"SmbDomain", - "documentation":"

The name of the Windows domain that the FSx for Windows server belongs to.

" + "documentation":"

The name of the Windows domain that the FSx for Windows File Server belongs to.

" } } }, @@ -2396,6 +2438,102 @@ "members":{ } }, + "UpdateLocationNfsRequest":{ + "type":"structure", + "required":["LocationArn"], + "members":{ + "LocationArn":{ + "shape":"LocationArn", + "documentation":"

The Amazon Resource Name (ARN) of the NFS location to update.

" + }, + "Subdirectory":{ + "shape":"NfsSubdirectory", + "documentation":"

The subdirectory in the NFS file system that is used to read data from the NFS source location or write data to the NFS destination. The NFS path should be a path that's exported by the NFS server, or a subdirectory of that path. The path should be such that it can be mounted by other NFS clients in your network.

To see all the paths exported by your NFS server, run \"showmount -e nfs-server-name\" from an NFS client that has access to your server. You can specify any directory that appears in the results, and any subdirectory of that directory. Ensure that the NFS export is accessible without Kerberos authentication.

To transfer all the data in the folder that you specified, DataSync must have permissions to read all the data. To ensure this, either configure the NFS export with no_root_squash, or ensure that the files you want DataSync to access have permissions that allow read access for all users. Doing either option enables the agent to read the files. For the agent to access directories, you must additionally enable all execute access.

If you are copying data to or from your AWS Snowcone device, see NFS Server on AWS Snowcone for more information.

For information about NFS export configuration, see 18.7. The /etc/exports Configuration File in the Red Hat Enterprise Linux documentation.

" + }, + "OnPremConfig":{"shape":"OnPremConfig"}, + "MountOptions":{"shape":"NfsMountOptions"} + } + }, + "UpdateLocationNfsResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateLocationObjectStorageRequest":{ + "type":"structure", + "required":["LocationArn"], + "members":{ + "LocationArn":{ + "shape":"LocationArn", + "documentation":"

The Amazon Resource Name (ARN) of the self-managed object storage server location to be updated.

" + }, + "ServerPort":{ + "shape":"ObjectStorageServerPort", + "documentation":"

The port that your self-managed object storage server accepts inbound network traffic on. The server port is set by default to TCP 80 (HTTP) or TCP 443 (HTTPS). You can specify a custom port if your self-managed object storage server requires one.

" + }, + "ServerProtocol":{ + "shape":"ObjectStorageServerProtocol", + "documentation":"

The protocol that the object storage server uses to communicate. Valid values are HTTP or HTTPS.

" + }, + "Subdirectory":{ + "shape":"S3Subdirectory", + "documentation":"

The subdirectory in the self-managed object storage server that is used to read data from.

" + }, + "AccessKey":{ + "shape":"ObjectStorageAccessKey", + "documentation":"

Optional. The access key is used if credentials are required to access the self-managed object storage server. If your object storage requires a user name and password to authenticate, use AccessKey and SecretKey to provide the user name and password, respectively.

" + }, + "SecretKey":{ + "shape":"ObjectStorageSecretKey", + "documentation":"

Optional. The secret key is used if credentials are required to access the self-managed object storage server. If your object storage requires a user name and password to authenticate, use AccessKey and SecretKey to provide the user name and password, respectively.

" + }, + "AgentArns":{ + "shape":"AgentArnList", + "documentation":"

The Amazon Resource Name (ARN) of the agents associated with the self-managed object storage server location.

" + } + } + }, + "UpdateLocationObjectStorageResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateLocationSmbRequest":{ + "type":"structure", + "required":["LocationArn"], + "members":{ + "LocationArn":{ + "shape":"LocationArn", + "documentation":"

The Amazon Resource Name (ARN) of the SMB location to update.

" + }, + "Subdirectory":{ + "shape":"SmbSubdirectory", + "documentation":"

The subdirectory in the SMB file system that is used to read data from the SMB source location or write data to the SMB destination. The SMB path should be a path that's exported by the SMB server, or a subdirectory of that path. The path should be such that it can be mounted by other SMB clients in your network.

Subdirectory must be specified with forward slashes. For example, /path/to/folder.

To transfer all the data in the folder that you specified, DataSync must have permissions to mount the SMB share and to access all the data in that share. To ensure this, do either of the following:

Doing either of these options enables the agent to access the data. For the agent to access directories, you must also enable all execute access.

" + }, + "User":{ + "shape":"SmbUser", + "documentation":"

The user who can mount the share has the permissions to access files and folders in the SMB share.

" + }, + "Domain":{ + "shape":"SmbDomain", + "documentation":"

The name of the Windows domain that the SMB server belongs to.

" + }, + "Password":{ + "shape":"SmbPassword", + "documentation":"

The password of the user who can mount the share has the permissions to access files and folders in the SMB share.

" + }, + "AgentArns":{ + "shape":"AgentArnList", + "documentation":"

The Amazon Resource Names (ARNs) of agents to use for a Simple Message Block (SMB) location.

" + }, + "MountOptions":{"shape":"SmbMountOptions"} + } + }, + "UpdateLocationSmbResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateTaskExecutionRequest":{ "type":"structure", "required":[ diff --git a/botocore/data/detective/2018-10-26/service-2.json b/botocore/data/detective/2018-10-26/service-2.json index 67d527b7..a6919577 100644 --- a/botocore/data/detective/2018-10-26/service-2.json +++ b/botocore/data/detective/2018-10-26/service-2.json @@ -39,7 +39,7 @@ {"shape":"InternalServerException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Creates a new behavior graph for the calling account, and sets that account as the master account. This operation is called by the account that is enabling Detective.

Before you try to enable Detective, make sure that your account has been enrolled in Amazon GuardDuty for at least 48 hours. If you do not meet this requirement, you cannot enable Detective. If you do meet the GuardDuty prerequisite, then when you make the request to enable Detective, it checks whether your data volume is within the Detective quota. If it exceeds the quota, then you cannot enable Detective.

The operation also enables Detective for the calling account in the currently selected Region. It returns the ARN of the new behavior graph.

CreateGraph triggers a process to create the corresponding data tables for the new behavior graph.

An account can only be the master account for one behavior graph within a Region. If the same account calls CreateGraph with the same master account, it always returns the same behavior graph ARN. It does not create a new behavior graph.

" + "documentation":"

Creates a new behavior graph for the calling account, and sets that account as the administrator account. This operation is called by the account that is enabling Detective.

Before you try to enable Detective, make sure that your account has been enrolled in Amazon GuardDuty for at least 48 hours. If you do not meet this requirement, you cannot enable Detective. If you do meet the GuardDuty prerequisite, then when you make the request to enable Detective, it checks whether your data volume is within the Detective quota. If it exceeds the quota, then you cannot enable Detective.

The operation also enables Detective for the calling account in the currently selected Region. It returns the ARN of the new behavior graph.

CreateGraph triggers a process to create the corresponding data tables for the new behavior graph.

An account can only be the administrator account for one behavior graph within a Region. If the same account calls CreateGraph with the same administrator account, it always returns the same behavior graph ARN. It does not create a new behavior graph.

" }, "CreateMembers":{ "name":"CreateMembers", @@ -55,7 +55,7 @@ {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Sends a request to invite the specified AWS accounts to be member accounts in the behavior graph. This operation can only be called by the master account for a behavior graph.

CreateMembers verifies the accounts and then sends invitations to the verified accounts.

The request provides the behavior graph ARN and the list of accounts to invite.

The response separates the requested accounts into two lists:

" + "documentation":"

Sends a request to invite the specified AWS accounts to be member accounts in the behavior graph. This operation can only be called by the administrator account for a behavior graph.

CreateMembers verifies the accounts and then invites the verified accounts. The administrator can optionally specify to not send invitation emails to the member accounts. This would be used when the administrator manages their member accounts centrally.

The request provides the behavior graph ARN and the list of accounts to invite.

The response separates the requested accounts into two lists:

" }, "DeleteGraph":{ "name":"DeleteGraph", @@ -69,7 +69,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ValidationException"} ], - "documentation":"

Disables the specified behavior graph and queues it to be deleted. This operation removes the graph from each member account's list of behavior graphs.

DeleteGraph can only be called by the master account for a behavior graph.

" + "documentation":"

Disables the specified behavior graph and queues it to be deleted. This operation removes the graph from each member account's list of behavior graphs.

DeleteGraph can only be called by the administrator account for a behavior graph.

" }, "DeleteMembers":{ "name":"DeleteMembers", @@ -85,7 +85,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ValidationException"} ], - "documentation":"

Deletes one or more member accounts from the master account behavior graph. This operation can only be called by a Detective master account. That account cannot use DeleteMembers to delete their own account from the behavior graph. To disable a behavior graph, the master account uses the DeleteGraph API method.

" + "documentation":"

Deletes one or more member accounts from the administrator account's behavior graph. This operation can only be called by a Detective administrator account. That account cannot use DeleteMembers to delete their own account from the behavior graph. To disable a behavior graph, the administrator account uses the DeleteGraph API method.

" }, "DisassociateMembership":{ "name":"DisassociateMembership", @@ -129,7 +129,7 @@ {"shape":"InternalServerException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns the list of behavior graphs that the calling account is a master of. This operation can only be called by a master account.

Because an account can currently only be the master of one behavior graph within a Region, the results always contain a single graph.

" + "documentation":"

Returns the list of behavior graphs that the calling account is an administrator account of. This operation can only be called by an administrator account.

Because an account can currently only be the administrator of one behavior graph within a Region, the results always contain a single behavior graph.

" }, "ListInvitations":{ "name":"ListInvitations", @@ -219,7 +219,7 @@ "documentation":"

The AWS account root user email address for the AWS account.

" } }, - "documentation":"

An AWS account that is the master of or a member of a behavior graph.

" + "documentation":"

An AWS account that is the administrator account of or a member of a behavior graph.

" }, "AccountId":{ "type":"string", @@ -239,6 +239,7 @@ "max":50, "min":1 }, + "Boolean":{"type":"boolean"}, "ConflictException":{ "type":"structure", "members":{ @@ -272,6 +273,10 @@ "shape":"EmailMessage", "documentation":"

Customized message text to include in the invitation email message to the invited member accounts.

" }, + "DisableEmailNotification":{ + "shape":"Boolean", + "documentation":"

if set to true, then the member accounts do not receive email notifications. By default, this is set to false, and the member accounts receive email notifications.

" + }, "Accounts":{ "shape":"AccountList", "documentation":"

The list of AWS accounts to invite to become member accounts in the behavior graph. For each invited account, the account list contains the account identifier and the AWS account root user email address.

" @@ -432,7 +437,7 @@ "members":{ "GraphList":{ "shape":"GraphList", - "documentation":"

A list of behavior graphs that the account is a master for.

" + "documentation":"

A list of behavior graphs that the account is an administrator account for.

" }, "NextToken":{ "shape":"PaginationToken", @@ -514,7 +519,13 @@ }, "MasterId":{ "shape":"AccountId", - "documentation":"

The AWS account identifier of the master account for the behavior graph.

" + "documentation":"

Deprecated. Instead of MasterId, use AdministratorId.

The AWS account identifier of the administrator account for the behavior graph.

", + "deprecated":true, + "deprecatedMessage":"This property is deprecated, use AdministratorId instead." + }, + "AdministratorId":{ + "shape":"AccountId", + "documentation":"

The AWS account identifier of the administrator account for the behavior graph.

" }, "Status":{ "shape":"MemberStatus", @@ -621,7 +632,10 @@ } } }, - "Timestamp":{"type":"timestamp"}, + "Timestamp":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, "UnprocessedAccount":{ "type":"structure", "members":{ @@ -651,5 +665,5 @@ "exception":true } }, - "documentation":"

Detective uses machine learning and purpose-built visualizations to help you analyze and investigate security issues across your Amazon Web Services (AWS) workloads. Detective automatically extracts time-based events such as login attempts, API calls, and network traffic from AWS CloudTrail and Amazon Virtual Private Cloud (Amazon VPC) flow logs. It also extracts findings detected by Amazon GuardDuty.

The Detective API primarily supports the creation and management of behavior graphs. A behavior graph contains the extracted data from a set of member accounts, and is created and managed by a master account.

Every behavior graph is specific to a Region. You can only use the API to manage graphs that belong to the Region that is associated with the currently selected endpoint.

A Detective master account can use the Detective API to do the following:

A member account can use the Detective API to do the following:

All API actions are logged as CloudTrail events. See Logging Detective API Calls with CloudTrail.

" + "documentation":"

Detective uses machine learning and purpose-built visualizations to help you analyze and investigate security issues across your Amazon Web Services (AWS) workloads. Detective automatically extracts time-based events such as login attempts, API calls, and network traffic from AWS CloudTrail and Amazon Virtual Private Cloud (Amazon VPC) flow logs. It also extracts findings detected by Amazon GuardDuty.

The Detective API primarily supports the creation and management of behavior graphs. A behavior graph contains the extracted data from a set of member accounts, and is created and managed by an administrator account.

Every behavior graph is specific to a Region. You can only use the API to manage graphs that belong to the Region that is associated with the currently selected endpoint.

A Detective administrator account can use the Detective API to do the following:

A member account can use the Detective API to do the following:

All API actions are logged as CloudTrail events. See Logging Detective API Calls with CloudTrail.

We replaced the term \"master account\" with the term \"administrator account.\" An administrator account is used to centrally manage multiple accounts. In the case of Detective, the administrator account manages the accounts in their behavior graph.

" } diff --git a/botocore/data/devops-guru/2020-12-01/service-2.json b/botocore/data/devops-guru/2020-12-01/service-2.json index d4125f04..4110706d 100644 --- a/botocore/data/devops-guru/2020-12-01/service-2.json +++ b/botocore/data/devops-guru/2020-12-01/service-2.json @@ -84,6 +84,24 @@ ], "documentation":"

Returns details about an anomaly that you specify using its ID.

" }, + "DescribeFeedback":{ + "name":"DescribeFeedback", + "http":{ + "method":"POST", + "requestUri":"/feedback", + "responseCode":200 + }, + "input":{"shape":"DescribeFeedbackRequest"}, + "output":{"shape":"DescribeFeedbackResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns the most recent feedback submitted in the current AWS account and Region.

" + }, "DescribeInsight":{ "name":"DescribeInsight", "http":{ @@ -643,6 +661,21 @@ } } }, + "DescribeFeedbackRequest":{ + "type":"structure", + "members":{ + "InsightId":{ + "shape":"InsightId", + "documentation":"

The ID of the insight for which the feedback was provided.

" + } + } + }, + "DescribeFeedbackResponse":{ + "type":"structure", + "members":{ + "InsightFeedback":{"shape":"InsightFeedback"} + } + }, "DescribeInsightRequest":{ "type":"structure", "required":["Id"], diff --git a/botocore/data/directconnect/2012-10-25/service-2.json b/botocore/data/directconnect/2012-10-25/service-2.json index 2329cae8..fb8fcfa0 100644 --- a/botocore/data/directconnect/2012-10-25/service-2.json +++ b/botocore/data/directconnect/2012-10-25/service-2.json @@ -215,7 +215,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Creates a BGP peer on the specified virtual interface.

You must create a BGP peer for the corresponding address family (IPv4/IPv6) in order to access AWS resources that also use that address family.

If logical redundancy is not supported by the connection, interconnect, or LAG, the BGP peer cannot be in the same address family as an existing BGP peer on the virtual interface.

When creating a IPv6 BGP peer, omit the Amazon address and customer address. IPv6 addresses are automatically assigned from the Amazon pool of IPv6 addresses; you cannot specify custom IPv6 addresses.

For a public virtual interface, the Autonomous System Number (ASN) must be private or already whitelisted for the virtual interface.

" + "documentation":"

Creates a BGP peer on the specified virtual interface.

You must create a BGP peer for the corresponding address family (IPv4/IPv6) in order to access AWS resources that also use that address family.

If logical redundancy is not supported by the connection, interconnect, or LAG, the BGP peer cannot be in the same address family as an existing BGP peer on the virtual interface.

When creating a IPv6 BGP peer, omit the Amazon address and customer address. IPv6 addresses are automatically assigned from the Amazon pool of IPv6 addresses; you cannot specify custom IPv6 addresses.

For a public virtual interface, the Autonomous System Number (ASN) must be private or already on the allow list for the virtual interface.

" }, "CreateConnection":{ "name":"CreateConnection", @@ -537,7 +537,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Lists the associations between your Direct Connect gateways and virtual private gateways. You must specify a Direct Connect gateway, a virtual private gateway, or both. If you specify a Direct Connect gateway, the response contains all virtual private gateways associated with the Direct Connect gateway. If you specify a virtual private gateway, the response contains all Direct Connect gateways associated with the virtual private gateway. If you specify both, the response contains the association between the Direct Connect gateway and the virtual private gateway.

" + "documentation":"

Lists the associations between your Direct Connect gateways and virtual private gateways and transit gateways. You must specify one of the following:

" }, "DescribeDirectConnectGatewayAttachments":{ "name":"DescribeDirectConnectGatewayAttachments", @@ -1899,7 +1899,7 @@ }, "virtualGatewayId":{ "shape":"VirtualGatewayId", - "documentation":"

The ID of the virtual private gateway.

" + "documentation":"

The ID of the virtual private gateway or transit gateway.

" } } }, diff --git a/botocore/data/dlm/2018-01-12/service-2.json b/botocore/data/dlm/2018-01-12/service-2.json index b155015a..8a15571b 100644 --- a/botocore/data/dlm/2018-01-12/service-2.json +++ b/botocore/data/dlm/2018-01-12/service-2.json @@ -241,6 +241,10 @@ "CreateRule":{ "type":"structure", "members":{ + "Location":{ + "shape":"LocationValues", + "documentation":"

Specifies the destination for snapshots created by the policy. To create snapshots in the same Region as the source resource, specify CLOUD. To create snapshots on the same Outpost as the source resource, specify OUTPOST_LOCAL. If you omit this parameter, CLOUD is used by default.

If the policy targets resources in an AWS Region, then you must create snapshots in the same Region as the source resource.

If the policy targets resources on an Outpost, then you can create snapshots on the same Outpost as the source resource, or in the Region of that Outpost.

" + }, "Interval":{ "shape":"Interval", "documentation":"

The interval between snapshots. The supported values are 1, 2, 3, 4, 6, 8, 12, and 24.

" @@ -307,14 +311,15 @@ }, "CrossRegionCopyRule":{ "type":"structure", - "required":[ - "TargetRegion", - "Encrypted" - ], + "required":["Encrypted"], "members":{ "TargetRegion":{ "shape":"TargetRegion", - "documentation":"

The target Region.

" + "documentation":"

The target Region for the snapshot copies.

If you specify a target Region, you must omit Target. You cannot specify a target Region and a target Outpost in the same rule.

" + }, + "Target":{ + "shape":"Target", + "documentation":"

The Amazon Resource Name (ARN) of the target AWS Outpost for the snapshot copies.

If you specify an ARN, you must omit TargetRegion. You cannot specify a target Region and a target Outpost in the same rule.

" }, "Encrypted":{ "shape":"Encrypted", @@ -678,6 +683,13 @@ } } }, + "LocationValues":{ + "type":"string", + "enum":[ + "CLOUD", + "OUTPOST_LOCAL" + ] + }, "NoReboot":{"type":"boolean"}, "Parameter":{"type":"string"}, "ParameterList":{ @@ -721,6 +733,10 @@ "shape":"ResourceTypeValuesList", "documentation":"

The target resource type for snapshot and AMI lifecycle policies. Use VOLUME to create snapshots of individual volumes or use INSTANCE to create multi-volume snapshots from the volumes for an instance.

This parameter is required for snapshot and AMI policies only. If you are creating an event-based policy, omit this parameter.

" }, + "ResourceLocations":{ + "shape":"ResourceLocationList", + "documentation":"

The location of the resources to backup. If the source resources are located in an AWS Region, specify CLOUD. If the source resources are located on an AWS Outpost in your account, specify OUTPOST.

If you specify OUTPOST, Amazon Data Lifecycle Manager backs up all resources of the specified type with matching target tags across all of the Outposts in your account.

" + }, "TargetTags":{ "shape":"TargetTagList", "documentation":"

The single tag that identifies targeted resources for this policy.

This parameter is required for snapshot and AMI policies only. If you are creating an event-based policy, omit this parameter.

" @@ -762,6 +778,19 @@ "EVENT_BASED_POLICY" ] }, + "ResourceLocationList":{ + "type":"list", + "member":{"shape":"ResourceLocationValues"}, + "max":1, + "min":1 + }, + "ResourceLocationValues":{ + "type":"string", + "enum":[ + "CLOUD", + "OUTPOST" + ] + }, "ResourceNotFoundException":{ "type":"structure", "members":{ @@ -853,7 +882,7 @@ }, "CrossRegionCopyRules":{ "shape":"CrossRegionCopyRules", - "documentation":"

The rule for cross-Region snapshot copies.

" + "documentation":"

The rule for cross-Region snapshot copies.

You can only specify cross-Region copy rules for policies that create snapshots in a Region. If the policy creates snapshots on an Outpost, then you cannot copy the snapshots to a Region or to an Outpost. If the policy creates snapshots in a Region, then snapshots can be copied to up to three Regions or Outposts.

" }, "ShareRules":{ "shape":"ShareRules", @@ -1015,9 +1044,9 @@ }, "Target":{ "type":"string", - "max":16, + "max":2048, "min":0, - "pattern":"^[\\\\w:\\\\-\\\\/\\\\*]+$" + "pattern":"^[\\w:\\-\\/\\*]+$" }, "TargetRegion":{ "type":"string", diff --git a/botocore/data/ec2/2016-11-15/paginators-1.json b/botocore/data/ec2/2016-11-15/paginators-1.json index b07ca371..12fd5084 100644 --- a/botocore/data/ec2/2016-11-15/paginators-1.json +++ b/botocore/data/ec2/2016-11-15/paginators-1.json @@ -583,6 +583,12 @@ "limit_key": "MaxResults", "output_token": "NextToken", "result_key": "TransitGatewayConnects" + }, + "DescribeAddressesAttribute": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Addresses" } } } diff --git a/botocore/data/ec2/2016-11-15/service-2.json b/botocore/data/ec2/2016-11-15/service-2.json index 11c37da1..e77b23ea 100644 --- a/botocore/data/ec2/2016-11-15/service-2.json +++ b/botocore/data/ec2/2016-11-15/service-2.json @@ -424,7 +424,7 @@ }, "input":{"shape":"CopyImageRequest"}, "output":{"shape":"CopyImageResult"}, - "documentation":"

Initiates the copy of an AMI from the specified source Region to the current Region. You specify the destination Region by using its endpoint when making the request.

Copies of encrypted backing snapshots for the AMI are encrypted. Copies of unencrypted backing snapshots remain unencrypted, unless you set Encrypted during the copy operation. You cannot create an unencrypted copy of an encrypted backing snapshot.

For more information about the prerequisites and limits when copying an AMI, see Copying an AMI in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Initiates the copy of an AMI. You can copy an AMI from one Region to another, or from a Region to an AWS Outpost. You can't copy an AMI from an Outpost to a Region, from one Outpost to another, or within the same Outpost.

To copy an AMI from one Region to another, specify the source Region using the SourceRegion parameter, and specify the destination Region using its endpoint. Copies of encrypted backing snapshots for the AMI are encrypted. Copies of unencrypted backing snapshots remain unencrypted, unless you set Encrypted during the copy operation. You cannot create an unencrypted copy of an encrypted backing snapshot.

To copy an AMI from a Region to an Outpost, specify the source Region using the SourceRegion parameter, and specify the ARN of the destination Outpost using DestinationOutpostArn. Backing snapshots copied to an Outpost are encrypted by default using the default encryption key for the Region, or a different key that you specify in the request using KmsKeyId. Outposts do not support unencrypted snapshots. For more information, Amazon EBS local snapshots on Outposts in the Amazon Elastic Compute Cloud User Guide.

For more information about the prerequisites and limits when copying an AMI, see Copying an AMI in the Amazon Elastic Compute Cloud User Guide.

" }, "CopySnapshot":{ "name":"CopySnapshot", @@ -434,7 +434,7 @@ }, "input":{"shape":"CopySnapshotRequest"}, "output":{"shape":"CopySnapshotResult"}, - "documentation":"

Copies a point-in-time snapshot of an EBS volume and stores it in Amazon S3. You can copy the snapshot within the same Region or from one Region to another. You can use the snapshot to create EBS volumes or Amazon Machine Images (AMIs).

Copies of encrypted EBS snapshots remain encrypted. Copies of unencrypted snapshots remain unencrypted, unless you enable encryption for the snapshot copy operation. By default, encrypted snapshot copies use the default AWS Key Management Service (AWS KMS) customer master key (CMK); however, you can specify a different CMK.

To copy an encrypted snapshot that has been shared from another account, you must have permissions for the CMK used to encrypt the snapshot.

Snapshots created by copying another snapshot have an arbitrary volume ID that should not be used for any purpose.

For more information, see Copying an Amazon EBS snapshot in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Copies a point-in-time snapshot of an EBS volume and stores it in Amazon S3. You can copy a snapshot within the same Region, from one Region to another, or from a Region to an Outpost. You can't copy a snapshot from an Outpost to a Region, from one Outpost to another, or within the same Outpost.

You can use the snapshot to create EBS volumes or Amazon Machine Images (AMIs).

When copying snapshots to a Region, copies of encrypted EBS snapshots remain encrypted. Copies of unencrypted snapshots remain unencrypted, unless you enable encryption for the snapshot copy operation. By default, encrypted snapshot copies use the default AWS Key Management Service (AWS KMS) customer master key (CMK); however, you can specify a different CMK. To copy an encrypted snapshot that has been shared from another account, you must have permissions for the CMK used to encrypt the snapshot.

Snapshots copied to an Outpost are encrypted by default using the default encryption key for the Region, or a different key that you specify in the request using KmsKeyId. Outposts do not support unencrypted snapshots. For more information, Amazon EBS local snapshots on Outposts in the Amazon Elastic Compute Cloud User Guide.

Snapshots created by copying another snapshot have an arbitrary volume ID that should not be used for any purpose.

For more information, see Copying an Amazon EBS snapshot in the Amazon Elastic Compute Cloud User Guide.

" }, "CreateCapacityReservation":{ "name":"CreateCapacityReservation", @@ -763,7 +763,7 @@ }, "input":{"shape":"CreateSnapshotRequest"}, "output":{"shape":"Snapshot"}, - "documentation":"

Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use snapshots for backups, to make copies of EBS volumes, and to save data before shutting down an instance.

When a snapshot is created, any AWS Marketplace product codes that are associated with the source volume are propagated to the snapshot.

You can take a snapshot of an attached volume that is in use. However, snapshots only capture data that has been written to your EBS volume at the time the snapshot command is issued; this might exclude any data that has been cached by any applications or the operating system. If you can pause any file systems on the volume long enough to take a snapshot, your snapshot should be complete. However, if you cannot pause all file writes to the volume, you should unmount the volume from within the instance, issue the snapshot command, and then remount the volume to ensure a consistent and complete snapshot. You may remount and use your volume while the snapshot status is pending.

To create a snapshot for EBS volumes that serve as root devices, you should stop the instance before taking the snapshot.

Snapshots that are taken from encrypted volumes are automatically encrypted. Volumes that are created from encrypted snapshots are also automatically encrypted. Your encrypted volumes and any associated snapshots always remain protected.

You can tag your snapshots during creation. For more information, see Tagging your Amazon EC2 resources in the Amazon Elastic Compute Cloud User Guide.

For more information, see Amazon Elastic Block Store and Amazon EBS encryption in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use snapshots for backups, to make copies of EBS volumes, and to save data before shutting down an instance.

You can create snapshots of volumes in a Region and volumes on an Outpost. If you create a snapshot of a volume in a Region, the snapshot must be stored in the same Region as the volume. If you create a snapshot of a volume on an Outpost, the snapshot can be stored on the same Outpost as the volume, or in the Region for that Outpost.

When a snapshot is created, any AWS Marketplace product codes that are associated with the source volume are propagated to the snapshot.

You can take a snapshot of an attached volume that is in use. However, snapshots only capture data that has been written to your EBS volume at the time the snapshot command is issued; this might exclude any data that has been cached by any applications or the operating system. If you can pause any file systems on the volume long enough to take a snapshot, your snapshot should be complete. However, if you cannot pause all file writes to the volume, you should unmount the volume from within the instance, issue the snapshot command, and then remount the volume to ensure a consistent and complete snapshot. You may remount and use your volume while the snapshot status is pending.

To create a snapshot for EBS volumes that serve as root devices, you should stop the instance before taking the snapshot.

Snapshots that are taken from encrypted volumes are automatically encrypted. Volumes that are created from encrypted snapshots are also automatically encrypted. Your encrypted volumes and any associated snapshots always remain protected.

You can tag your snapshots during creation. For more information, see Tagging your Amazon EC2 resources in the Amazon Elastic Compute Cloud User Guide.

For more information, see Amazon Elastic Block Store and Amazon EBS encryption in the Amazon Elastic Compute Cloud User Guide.

" }, "CreateSnapshots":{ "name":"CreateSnapshots", @@ -773,7 +773,7 @@ }, "input":{"shape":"CreateSnapshotsRequest"}, "output":{"shape":"CreateSnapshotsResult"}, - "documentation":"

Creates crash-consistent snapshots of multiple EBS volumes and stores the data in S3. Volumes are chosen by specifying an instance. Any attached volumes will produce one snapshot each that is crash-consistent across the instance. Boot volumes can be excluded by changing the parameters.

" + "documentation":"

Creates crash-consistent snapshots of multiple EBS volumes and stores the data in S3. Volumes are chosen by specifying an instance. Any attached volumes will produce one snapshot each that is crash-consistent across the instance. Boot volumes can be excluded by changing the parameters.

You can create multi-volume snapshots of instances in a Region and instances on an Outpost. If you create snapshots from an instance in a Region, the snapshots must be stored in the same Region as the instance. If you create snapshots from an instance on an Outpost, the snapshots can be stored on the same Outpost as the instance, or in the Region for that Outpost.

" }, "CreateSpotDatafeedSubscription":{ "name":"CreateSpotDatafeedSubscription", @@ -1504,7 +1504,7 @@ }, "input":{"shape":"DeleteVpcEndpointsRequest"}, "output":{"shape":"DeleteVpcEndpointsResult"}, - "documentation":"

Deletes one or more specified VPC endpoints. Deleting a gateway endpoint also deletes the endpoint routes in the route tables that were associated with the endpoint. Deleting an interface endpoint or a Gateway Load Balancer endpoint deletes the endpoint network interfaces. Gateway Load Balancer endpoints can only be deleted if the routes that are associated with the endpoint are deleted.

" + "documentation":"

Deletes one or more specified VPC endpoints. You can delete any of the following types of VPC endpoints.

The following rules apply when you delete a VPC endpoint:

" }, "DeleteVpcPeeringConnection":{ "name":"DeleteVpcPeeringConnection", @@ -1612,6 +1612,16 @@ "output":{"shape":"DescribeAddressesResult"}, "documentation":"

Describes the specified Elastic IP addresses or all of your Elastic IP addresses.

An Elastic IP address is for use in either the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

" }, + "DescribeAddressesAttribute":{ + "name":"DescribeAddressesAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAddressesAttributeRequest"}, + "output":{"shape":"DescribeAddressesAttributeResult"}, + "documentation":"

Describes the attributes of the specified Elastic IP addresses. For requirements, see Using reverse DNS for email applications.

" + }, "DescribeAggregateIdFormat":{ "name":"DescribeAggregateIdFormat", "http":{ @@ -2730,7 +2740,7 @@ }, "input":{"shape":"DescribeVpcEndpointServicesRequest"}, "output":{"shape":"DescribeVpcEndpointServicesResult"}, - "documentation":"

Describes available services to which you can create a VPC endpoint.

When the service provider and the consumer have different accounts multiple Availability Zones, and the consumer views the VPC endpoint service information, the response only includes the common Availability Zones. For example, when the service provider account uses us-east-1a and us-east-1c and the consumer uses us-east-1a and us-east-1a and us-east-1b, the response includes the VPC endpoint services in the common Availability Zone, us-east-1a.

" + "documentation":"

Describes available services to which you can create a VPC endpoint.

When the service provider and the consumer have different accounts in multiple Availability Zones, and the consumer views the VPC endpoint service information, the response only includes the common Availability Zones. For example, when the service provider account uses us-east-1a and us-east-1c and the consumer uses us-east-1a and us-east-1b, the response includes the VPC endpoint services in the common Availability Zone, us-east-1a.

" }, "DescribeVpcEndpoints":{ "name":"DescribeVpcEndpoints", @@ -3354,6 +3364,16 @@ "output":{"shape":"ImportVolumeResult"}, "documentation":"

Creates an import volume task using metadata from the specified disk image.For more information, see Importing Disks to Amazon EBS.

For information about the import manifest referenced by this API action, see VM Import Manifest.

" }, + "ModifyAddressAttribute":{ + "name":"ModifyAddressAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyAddressAttributeRequest"}, + "output":{"shape":"ModifyAddressAttributeResult"}, + "documentation":"

Modifies an attribute of the specified Elastic IP address. For requirements, see Using reverse DNS for email applications.

" + }, "ModifyAvailabilityZoneGroup":{ "name":"ModifyAvailabilityZoneGroup", "http":{ @@ -3852,7 +3872,7 @@ }, "input":{"shape":"RegisterImageRequest"}, "output":{"shape":"RegisterImageResult"}, - "documentation":"

Registers an AMI. When you're creating an AMI, this is the final step you must complete before you can launch an instance from the AMI. For more information about creating AMIs, see Creating your own AMIs in the Amazon Elastic Compute Cloud User Guide.

For Amazon EBS-backed instances, CreateImage creates and registers the AMI in a single request, so you don't have to register the AMI yourself.

You can also use RegisterImage to create an Amazon EBS-backed Linux AMI from a snapshot of a root device volume. You specify the snapshot using the block device mapping. For more information, see Launching a Linux instance from a backup in the Amazon Elastic Compute Cloud User Guide.

If any snapshots have AWS Marketplace product codes, they are copied to the new AMI.

Windows and some Linux distributions, such as Red Hat Enterprise Linux (RHEL) and SUSE Linux Enterprise Server (SLES), use the EC2 billing product code associated with an AMI to verify the subscription status for package updates. To create a new AMI for operating systems that require a billing product code, instead of registering the AMI, do the following to preserve the billing product code association:

  1. Launch an instance from an existing AMI with that billing product code.

  2. Customize the instance.

  3. Create an AMI from the instance using CreateImage.

If you purchase a Reserved Instance to apply to an On-Demand Instance that was launched from an AMI with a billing product code, make sure that the Reserved Instance has the matching billing product code. If you purchase a Reserved Instance without the matching billing product code, the Reserved Instance will not be applied to the On-Demand Instance. For information about how to obtain the platform details and billing information of an AMI, see Obtaining billing information in the Amazon Elastic Compute Cloud User Guide.

If needed, you can deregister an AMI at any time. Any modifications you make to an AMI backed by an instance store volume invalidates its registration. If you make changes to an image, deregister the previous image and register the new image.

" + "documentation":"

Registers an AMI. When you're creating an AMI, this is the final step you must complete before you can launch an instance from the AMI. For more information about creating AMIs, see Creating your own AMIs in the Amazon Elastic Compute Cloud User Guide.

For Amazon EBS-backed instances, CreateImage creates and registers the AMI in a single request, so you don't have to register the AMI yourself.

If needed, you can deregister an AMI at any time. Any modifications you make to an AMI backed by an instance store volume invalidates its registration. If you make changes to an image, deregister the previous image and register the new image.

Register a snapshot of a root device volume

You can use RegisterImage to create an Amazon EBS-backed Linux AMI from a snapshot of a root device volume. You specify the snapshot using a block device mapping. You can't set the encryption state of the volume using the block device mapping. If the snapshot is encrypted, or encryption by default is enabled, the root volume of an instance launched from the AMI is encrypted.

For more information, see Create a Linux AMI from a snapshot and Use encryption with EBS-backed AMIs in the Amazon Elastic Compute Cloud User Guide.

AWS Marketplace product codes

If any snapshots have AWS Marketplace product codes, they are copied to the new AMI.

Windows and some Linux distributions, such as Red Hat Enterprise Linux (RHEL) and SUSE Linux Enterprise Server (SLES), use the EC2 billing product code associated with an AMI to verify the subscription status for package updates. To create a new AMI for operating systems that require a billing product code, instead of registering the AMI, do the following to preserve the billing product code association:

  1. Launch an instance from an existing AMI with that billing product code.

  2. Customize the instance.

  3. Create an AMI from the instance using CreateImage.

If you purchase a Reserved Instance to apply to an On-Demand Instance that was launched from an AMI with a billing product code, make sure that the Reserved Instance has the matching billing product code. If you purchase a Reserved Instance without the matching billing product code, the Reserved Instance will not be applied to the On-Demand Instance. For information about how to obtain the platform details and billing information of an AMI, see Obtaining billing information in the Amazon Elastic Compute Cloud User Guide.

" }, "RegisterInstanceEventNotificationAttributes":{ "name":"RegisterInstanceEventNotificationAttributes", @@ -4040,6 +4060,16 @@ "output":{"shape":"RequestSpotInstancesResult"}, "documentation":"

Creates a Spot Instance request.

For more information, see Spot Instance requests in the Amazon EC2 User Guide for Linux Instances.

" }, + "ResetAddressAttribute":{ + "name":"ResetAddressAttribute", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetAddressAttributeRequest"}, + "output":{"shape":"ResetAddressAttributeResult"}, + "documentation":"

Resets the attribute of the specified IP address. For requirements, see Using reverse DNS for email applications.

" + }, "ResetEbsDefaultKmsKeyId":{ "name":"ResetEbsDefaultKmsKeyId", "http":{ @@ -4687,6 +4717,36 @@ }, "documentation":"

Describes an Elastic IP address, or a carrier IP address.

" }, + "AddressAttribute":{ + "type":"structure", + "members":{ + "PublicIp":{ + "shape":"PublicIpAddress", + "documentation":"

The public IP address.

", + "locationName":"publicIp" + }, + "AllocationId":{ + "shape":"AllocationId", + "documentation":"

[EC2-VPC] The allocation ID.

", + "locationName":"allocationId" + }, + "PtrRecord":{ + "shape":"String", + "documentation":"

The pointer (PTR) record for the IP address.

", + "locationName":"ptrRecord" + }, + "PtrRecordUpdate":{ + "shape":"PtrUpdateStatus", + "documentation":"

The updated PTR record for the IP address.

", + "locationName":"ptrRecordUpdate" + } + }, + "documentation":"

The attributes associated with an Elastic IP address.

" + }, + "AddressAttributeName":{ + "type":"string", + "enum":["domain-name"] + }, "AddressList":{ "type":"list", "member":{ @@ -4694,6 +4754,18 @@ "locationName":"item" } }, + "AddressMaxResults":{ + "type":"integer", + "max":1000, + "min":1 + }, + "AddressSet":{ + "type":"list", + "member":{ + "shape":"AddressAttribute", + "locationName":"item" + } + }, "AdvertiseByoipCidrRequest":{ "type":"structure", "required":["Cidr"], @@ -4871,6 +4943,13 @@ "locationName":"AllocationId" } }, + "AllocationIds":{ + "type":"list", + "member":{ + "shape":"AllocationId", + "locationName":"item" + } + }, "AllocationState":{ "type":"string", "enum":[ @@ -4887,7 +4966,8 @@ "enum":[ "lowestPrice", "diversified", - "capacityOptimized" + "capacityOptimized", + "capacityOptimizedPrioritized" ] }, "AllowedPrincipal":{ @@ -5255,7 +5335,7 @@ "members":{ "Ipv6AddressCount":{ "shape":"Integer", - "documentation":"

The number of IPv6 addresses to assign to the network interface. Amazon EC2 automatically selects the IPv6 addresses from the subnet range. You can't use this option if specifying specific IPv6 addresses.

", + "documentation":"

The number of additional IPv6 addresses to assign to the network interface. The specified number of IPv6 addresses are assigned in addition to the existing IPv6 addresses that are already assigned to the network interface. Amazon EC2 automatically selects the IPv6 addresses from the subnet range. You can't use this option if specifying specific IPv6 addresses.

", "locationName":"ipv6AddressCount" }, "Ipv6Addresses":{ @@ -5275,7 +5355,7 @@ "members":{ "AssignedIpv6Addresses":{ "shape":"Ipv6AddressList", - "documentation":"

The IPv6 addresses assigned to the network interface.

", + "documentation":"

The new IPv6 addresses assigned to the network interface. Existing IPv6 addresses that were assigned to the network interface before the request are not included.

", "locationName":"assignedIpv6Addresses" }, "NetworkInterfaceId":{ @@ -5354,11 +5434,11 @@ }, "InstanceId":{ "shape":"InstanceId", - "documentation":"

The ID of the instance. This is required for EC2-Classic. For EC2-VPC, you can specify either the instance ID or the network interface ID, but not both. The operation fails if you specify an instance ID unless exactly one network interface is attached.

" + "documentation":"

The ID of the instance. The instance must have exactly one attached network interface. For EC2-VPC, you can specify either the instance ID or the network interface ID, but not both. For EC2-Classic, you must specify an instance ID and the instance must be in the running state.

" }, "PublicIp":{ "shape":"String", - "documentation":"

The Elastic IP address to associate with the instance. This is required for EC2-Classic.

" + "documentation":"

[EC2-Classic] The Elastic IP address to associate with the instance. This is required for EC2-Classic.

" }, "AllowReassociation":{ "shape":"Boolean", @@ -5482,7 +5562,7 @@ }, "CertificateS3ObjectKey":{ "shape":"String", - "documentation":"

The Amazon S3 object key where the certificate, certificate chain, and encrypted private key bundle are stored. The object key is formatted as follows: certificate_arn/role_arn.

", + "documentation":"

The Amazon S3 object key where the certificate, certificate chain, and encrypted private key bundle are stored. The object key is formatted as follows: role_arn/certificate_arn.

", "locationName":"certificateS3ObjectKey" }, "EncryptionKmsKeyId":{ @@ -5562,19 +5642,19 @@ "AssociateSubnetCidrBlockRequest":{ "type":"structure", "required":[ - "Ipv6CidrBlock", - "SubnetId" + "SubnetId", + "Ipv6CidrBlock" ], "members":{ - "Ipv6CidrBlock":{ - "shape":"String", - "documentation":"

The IPv6 CIDR block for your subnet. The subnet must have a /64 prefix length.

", - "locationName":"ipv6CidrBlock" - }, "SubnetId":{ "shape":"SubnetId", "documentation":"

The ID of your subnet.

", "locationName":"subnetId" + }, + "Ipv6CidrBlock":{ + "shape":"String", + "documentation":"

The IPv6 CIDR block for your subnet. The subnet must have a /64 prefix length.

", + "locationName":"ipv6CidrBlock" } } }, @@ -5726,7 +5806,7 @@ }, "CertificateS3ObjectKey":{ "shape":"String", - "documentation":"

The key of the Amazon S3 object ey where the certificate, certificate chain, and encrypted private key bundle is stored. The object key is formated as follows: certificate_arn/role_arn.

", + "documentation":"

The key of the Amazon S3 object ey where the certificate, certificate chain, and encrypted private key bundle is stored. The object key is formated as follows: role_arn/certificate_arn.

", "locationName":"certificateS3ObjectKey" }, "EncryptionKmsKeyId":{ @@ -6431,6 +6511,27 @@ } }, "Boolean":{"type":"boolean"}, + "BootModeType":{ + "type":"string", + "enum":[ + "legacy-bios", + "uefi" + ] + }, + "BootModeTypeList":{ + "type":"list", + "member":{ + "shape":"BootModeType", + "locationName":"item" + } + }, + "BootModeValues":{ + "type":"string", + "enum":[ + "legacy-bios", + "uefi" + ] + }, "BundleId":{"type":"string"}, "BundleIdStringList":{ "type":"list", @@ -8252,7 +8353,7 @@ "members":{ "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier you provide to ensure idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Unique, case-sensitive identifier you provide to ensure idempotency of the request. For more information, see Ensuring idempotency in the Amazon EC2 API Reference.

" }, "Description":{ "shape":"String", @@ -8280,6 +8381,10 @@ "shape":"String", "documentation":"

The name of the Region that contains the AMI to copy.

" }, + "DestinationOutpostArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the Outpost to which to copy the AMI. Only specify this parameter when copying an AMI from an AWS Region to an Outpost. The AMI must be in the Region of the destination Outpost. You cannot copy an AMI from an Outpost to a Region, from one Outpost to another, or within the same Outpost.

For more information, see Copying AMIs from an AWS Region to an Outpost in the Amazon Elastic Compute Cloud User Guide.

" + }, "DryRun":{ "shape":"Boolean", "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", @@ -8310,6 +8415,10 @@ "shape":"String", "documentation":"

A description for the EBS snapshot.

" }, + "DestinationOutpostArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the Outpost to which to copy the snapshot. Only specify this parameter when copying a snapshot from an AWS Region to an Outpost. The snapshot must be in the Region for the destination Outpost. You cannot copy a snapshot from an Outpost to a Region, from one Outpost to another, or within the same Outpost.

For more information, see Copying snapshots from an AWS Region to an Outpost in the Amazon Elastic Compute Cloud User Guide.

" + }, "DestinationRegion":{ "shape":"String", "documentation":"

The destination Region to use in the PresignedUrl parameter of a snapshot copy operation. This parameter is only valid for specifying the destination Region in a PresignedUrl parameter, where it is required.

The snapshot copy is sent to the regional endpoint that you sent the HTTP request to (for example, ec2.us-east-1.amazonaws.com). With the AWS CLI, this is specified using the --region parameter or the default Region in your AWS configuration file.

", @@ -8949,7 +9058,7 @@ }, "ReplaceUnhealthyInstances":{ "shape":"Boolean", - "documentation":"

Indicates whether EC2 Fleet should replace unhealthy instances.

" + "documentation":"

Indicates whether EC2 Fleet should replace unhealthy Spot Instances. Supported only for fleets of type maintain. For more information, see EC2 Fleet health checks in the Amazon EC2 User Guide.

" }, "TagSpecifications":{ "shape":"TagSpecificationList", @@ -9177,7 +9286,7 @@ }, "ExportToS3Task":{ "shape":"ExportToS3TaskSpecification", - "documentation":"

The format and location for an instance export task.

", + "documentation":"

The format and location for an export instance task.

", "locationName":"exportToS3" }, "InstanceId":{ @@ -9192,7 +9301,7 @@ }, "TagSpecifications":{ "shape":"TagSpecificationList", - "documentation":"

The tags to apply to the instance export task during creation.

", + "documentation":"

The tags to apply to the export instance task during creation.

", "locationName":"TagSpecification" } } @@ -9202,7 +9311,7 @@ "members":{ "ExportTask":{ "shape":"ExportTask", - "documentation":"

Information about the instance export task.

", + "documentation":"

Information about the export instance task.

", "locationName":"exportTask" } } @@ -9366,13 +9475,13 @@ "shape":"LocalGatewayRoutetableId", "documentation":"

The ID of the local gateway route table.

" }, - "LocalGatewayVirtualInterfaceGroupId":{ - "shape":"LocalGatewayVirtualInterfaceGroupId", - "documentation":"

The ID of the virtual interface group.

" - }, "DryRun":{ "shape":"Boolean", "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, + "LocalGatewayVirtualInterfaceGroupId":{ + "shape":"LocalGatewayVirtualInterfaceGroupId", + "documentation":"

The ID of the virtual interface group.

" } } }, @@ -9476,14 +9585,10 @@ "CreateNatGatewayRequest":{ "type":"structure", "required":[ - "AllocationId", - "SubnetId" + "SubnetId", + "AllocationId" ], "members":{ - "AllocationId":{ - "shape":"AllocationId", - "documentation":"

The allocation ID of an Elastic IP address to associate with the NAT gateway. If the Elastic IP address is associated with another resource, you must first disassociate it.

" - }, "ClientToken":{ "shape":"String", "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

Constraint: Maximum 64 ASCII characters.

", @@ -9501,6 +9606,10 @@ "shape":"TagSpecificationList", "documentation":"

The tags to assign to the NAT gateway.

", "locationName":"TagSpecification" + }, + "AllocationId":{ + "shape":"AllocationId", + "documentation":"

The allocation ID of an Elastic IP address to associate with the NAT gateway. If the Elastic IP address is associated with another resource, you must first disassociate it.

" } } }, @@ -10037,6 +10146,10 @@ "shape":"String", "documentation":"

A description for the snapshot.

" }, + "OutpostArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the AWS Outpost on which to create a local snapshot.

For more information, see Creating local snapshots from volumes on an Outpost in the Amazon Elastic Compute Cloud User Guide.

" + }, "VolumeId":{ "shape":"VolumeId", "documentation":"

The ID of the EBS volume.

" @@ -10065,6 +10178,10 @@ "shape":"InstanceSpecification", "documentation":"

The instance to specify which volumes should be included in the snapshots.

" }, + "OutpostArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the AWS Outpost on which to create the local snapshots.

For more information, see Creating multi-volume local snapshots from instances on an Outpost in the Amazon Elastic Compute Cloud User Guide.

" + }, "TagSpecifications":{ "shape":"TagSpecificationList", "documentation":"

Tags to apply to every snapshot specified by the instance.

", @@ -10126,8 +10243,8 @@ "CreateSubnetRequest":{ "type":"structure", "required":[ - "CidrBlock", - "VpcId" + "VpcId", + "CidrBlock" ], "members":{ "TagSpecifications":{ @@ -10143,10 +10260,6 @@ "shape":"String", "documentation":"

The AZ ID or the Local Zone ID of the subnet.

" }, - "CidrBlock":{ - "shape":"String", - "documentation":"

The IPv4 network range for the subnet, in CIDR notation. For example, 10.0.0.0/24. We modify the specified CIDR block to its canonical form; for example, if you specify 100.68.0.18/18, we modify it to 100.68.0.0/18.

" - }, "Ipv6CidrBlock":{ "shape":"String", "documentation":"

The IPv6 network range for the subnet, in CIDR notation. The subnet size must use a /64 prefix length.

" @@ -10163,6 +10276,10 @@ "shape":"Boolean", "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "locationName":"dryRun" + }, + "CidrBlock":{ + "shape":"String", + "documentation":"

The IPv4 network range for the subnet, in CIDR notation. For example, 10.0.0.0/24. We modify the specified CIDR block to its canonical form; for example, if you specify 100.68.0.18/18, we modify it to 100.68.0.0/18.

" } } }, @@ -13009,6 +13126,47 @@ } } }, + "DescribeAddressesAttributeRequest":{ + "type":"structure", + "members":{ + "AllocationIds":{ + "shape":"AllocationIds", + "documentation":"

[EC2-VPC] The allocation IDs.

", + "locationName":"AllocationId" + }, + "Attribute":{ + "shape":"AddressAttributeName", + "documentation":"

The attribute of the IP address.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next page of results.

" + }, + "MaxResults":{ + "shape":"AddressMaxResults", + "documentation":"

The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "DescribeAddressesAttributeResult":{ + "type":"structure", + "members":{ + "Addresses":{ + "shape":"AddressSet", + "documentation":"

Information about the IP addresses.

", + "locationName":"addressSet" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", + "locationName":"nextToken" + } + } + }, "DescribeAddressesRequest":{ "type":"structure", "members":{ @@ -14557,7 +14715,7 @@ "members":{ "Attribute":{ "shape":"ImageAttributeName", - "documentation":"

The AMI attribute.

Note: Depending on your account privileges, the blockDeviceMapping attribute may return a Client.AuthFailure error. If this happens, use DescribeImages to get information about the block device mapping for the AMI.

" + "documentation":"

The AMI attribute.

Note: The blockDeviceMapping attribute is deprecated. Using this attribute returns the Client.AuthFailure error. To get information about the block device mappings for an AMI, use the DescribeImages action.

" }, "ImageId":{ "shape":"ImageId", @@ -14581,7 +14739,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

The filters.

", + "documentation":"

The filters.

", "locationName":"Filter" }, "ImageIds":{ @@ -14886,7 +15044,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters. Filter names and values are case-sensitive.

", + "documentation":"

One or more filters. Filter names and values are case-sensitive.

", "locationName":"Filter" }, "MaxResults":{ @@ -14919,7 +15077,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

The filters.

", + "documentation":"

The filters.

", "locationName":"Filter" }, "InstanceIds":{ @@ -15836,7 +15994,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"filter" }, "DryRun":{ @@ -16461,7 +16619,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

The filters. If using multiple filters for rules, the results include security groups for which any combination of rules - not necessarily a single rule - match all filters.

", + "documentation":"

The filters. If using multiple filters for rules, the results include security groups for which any combination of rules - not necessarily a single rule - match all filters.

", "locationName":"Filter" }, "GroupIds":{ @@ -17957,7 +18115,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "MaxResults":{ @@ -19178,6 +19336,11 @@ "documentation":"

The throughput that the volume supports, in MiB/s.

This parameter is valid only for gp3 volumes.

Valid Range: Minimum value of 125. Maximum value of 1000.

", "locationName":"throughput" }, + "OutpostArn":{ + "shape":"String", + "documentation":"

The ARN of the Outpost on which the snapshot is stored.

", + "locationName":"outpostArn" + }, "Encrypted":{ "shape":"Boolean", "documentation":"

Indicates whether the encryption state of an EBS volume is changed while being restored from a backing snapshot. The effect of setting the encryption state to true depends on the volume origin (new or from a snapshot), starting encryption state, ownership, and whether encryption by default is enabled. For more information, see Amazon EBS encryption in the Amazon EC2 User Guide.

In no case can you remove encryption from an encrypted volume.

Encrypted volumes can only be attached to instances that support Amazon EBS encryption. For more information, see Supported instance types.

This parameter is not returned by .

", @@ -20315,7 +20478,7 @@ }, "TagSpecifications":{ "shape":"TagSpecificationList", - "documentation":"

The tags to apply to the image being exported.

", + "documentation":"

The tags to apply to the export image task during creation.

", "locationName":"TagSpecification" } } @@ -20370,7 +20533,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

Any tags assigned to the image being exported.

", + "documentation":"

Any tags assigned to the export image task.

", "locationName":"tagSet" } } @@ -20415,7 +20578,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

Any tags assigned to the image being exported.

", + "documentation":"

Any tags assigned to the export image task.

", "locationName":"tagSet" } }, @@ -20475,7 +20638,7 @@ "locationName":"tagSet" } }, - "documentation":"

Describes an instance export task.

" + "documentation":"

Describes an export instance task.

" }, "ExportTaskId":{"type":"string"}, "ExportTaskIdStringList":{ @@ -20556,7 +20719,7 @@ "locationName":"s3Key" } }, - "documentation":"

Describes the format and location for an instance export task.

" + "documentation":"

Describes the format and location for the export task.

" }, "ExportToS3TaskSpecification":{ "type":"structure", @@ -20582,7 +20745,7 @@ "locationName":"s3Prefix" } }, - "documentation":"

Describes an instance export task.

" + "documentation":"

Describes an export instance task.

" }, "ExportTransitGatewayRoutesRequest":{ "type":"structure", @@ -20697,7 +20860,7 @@ "locationName":"Value" } }, - "documentation":"

A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:

" + "documentation":"

A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs.

" }, "FilterList":{ "type":"list", @@ -20794,7 +20957,7 @@ }, "ReplaceUnhealthyInstances":{ "shape":"Boolean", - "documentation":"

Indicates whether EC2 Fleet should replace unhealthy instances.

", + "documentation":"

Indicates whether EC2 Fleet should replace unhealthy Spot Instances. Supported only for fleets of type maintain. For more information, see EC2 Fleet health checks in the Amazon EC2 User Guide.

", "locationName":"replaceUnhealthyInstances" }, "SpotOptions":{ @@ -20886,7 +21049,7 @@ }, "Overrides":{ "shape":"FleetLaunchTemplateOverridesListRequest", - "documentation":"

Any parameters that you specify override the same parameters in the launch template.

" + "documentation":"

Any parameters that you specify override the same parameters in the launch template.

For fleets of type request and maintain, a maximum of 300 items is allowed across all launch templates.

" } }, "documentation":"

Describes a launch template and overrides.

" @@ -20921,7 +21084,7 @@ }, "Priority":{ "shape":"Double", - "documentation":"

The priority for the launch template override. If AllocationStrategy is set to prioritized, EC2 Fleet uses priority to determine which launch template override to use first in fulfilling On-Demand capacity. The highest priority is launched first. Valid values are whole numbers starting at 0. The lower the number, the higher the priority. If no number is set, the override has the lowest priority.

", + "documentation":"

The priority for the launch template override. The highest priority is launched first.

If the On-Demand AllocationStrategy is set to prioritized, EC2 Fleet uses priority to determine which launch template override to use first in fulfilling On-Demand capacity.

If the Spot AllocationStrategy is set to capacity-optimized-prioritized, EC2 Fleet uses priority on a best-effort basis to determine which launch template override to use first in fulfilling Spot capacity, but optimizes for capacity first.

Valid values are whole numbers starting at 0. The lower the number, the higher the priority. If no number is set, the override has the lowest priority. You can set the same priority for different launch template overrides.

", "locationName":"priority" }, "Placement":{ @@ -20973,7 +21136,7 @@ }, "Priority":{ "shape":"Double", - "documentation":"

The priority for the launch template override. If AllocationStrategy is set to prioritized, EC2 Fleet uses priority to determine which launch template override to use first in fulfilling On-Demand capacity. The highest priority is launched first. Valid values are whole numbers starting at 0. The lower the number, the higher the priority. If no number is set, the launch template override has the lowest priority.

" + "documentation":"

The priority for the launch template override. The highest priority is launched first.

If the On-Demand AllocationStrategy is set to prioritized, EC2 Fleet uses priority to determine which launch template override to use first in fulfilling On-Demand capacity.

If the Spot AllocationStrategy is set to capacity-optimized-prioritized, EC2 Fleet uses priority on a best-effort basis to determine which launch template override to use first in fulfilling Spot capacity, but optimizes for capacity first.

Valid values are whole numbers starting at 0. The lower the number, the higher the priority. If no number is set, the launch template override has the lowest priority. You can set the same priority for different launch template overrides.

" }, "Placement":{ "shape":"Placement", @@ -23068,6 +23231,11 @@ "shape":"VirtualizationType", "documentation":"

The type of virtualization of the AMI.

", "locationName":"virtualizationType" + }, + "BootMode":{ + "shape":"BootModeValues", + "documentation":"

The boot mode of the image. For more information, see Boot modes in the Amazon Elastic Compute Cloud User Guide.

", + "locationName":"bootMode" } }, "documentation":"

Describes an image.

" @@ -23114,6 +23282,10 @@ "shape":"AttributeValue", "documentation":"

Indicates whether enhanced networking with the Intel 82599 Virtual Function interface is enabled.

", "locationName":"sriovNetSupport" + }, + "BootMode":{ + "shape":"AttributeValue", + "locationName":"bootMode" } }, "documentation":"

Describes an image attribute.

" @@ -23127,7 +23299,8 @@ "launchPermission", "productCodes", "blockDeviceMapping", - "sriovNetSupport" + "sriovNetSupport", + "bootMode" ] }, "ImageDiskContainer":{ @@ -23143,7 +23316,7 @@ }, "Format":{ "shape":"String", - "documentation":"

The format of the disk image being imported.

Valid values: OVA | VHD | VHDX |VMDK

" + "documentation":"

The format of the disk image being imported.

Valid values: OVA | VHD | VHDX | VMDK | RAW

" }, "SnapshotId":{ "shape":"SnapshotId", @@ -23326,7 +23499,7 @@ }, "TagSpecifications":{ "shape":"TagSpecificationList", - "documentation":"

The tags to apply to the image being imported.

", + "documentation":"

The tags to apply to the import image task during creation.

", "locationName":"TagSpecification" } } @@ -23406,7 +23579,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

Any tags assigned to the image being imported.

", + "documentation":"

Any tags assigned to the import image task.

", "locationName":"tagSet" } } @@ -23767,7 +23940,7 @@ }, "TagSpecifications":{ "shape":"TagSpecificationList", - "documentation":"

The tags to apply to the snapshot being imported.

", + "documentation":"

The tags to apply to the import snapshot task during creation.

", "locationName":"TagSpecification" } } @@ -23792,7 +23965,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

Any tags assigned to the snapshot being imported.

", + "documentation":"

Any tags assigned to the import snapshot task.

", "locationName":"tagSet" } } @@ -24149,7 +24322,7 @@ }, "SourceDestCheck":{ "shape":"Boolean", - "documentation":"

Specifies whether to enable an instance launched in a VPC to perform NAT. This controls whether source/destination checking is enabled on the instance. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform NAT. For more information, see NAT instances in the Amazon VPC User Guide.

", + "documentation":"

Indicates whether source/destination checking is enabled.

", "locationName":"sourceDestCheck" }, "SpotInstanceRequestId":{ @@ -24211,6 +24384,11 @@ "shape":"EnclaveOptions", "documentation":"

Indicates whether the instance is enabled for AWS Nitro Enclaves.

", "locationName":"enclaveOptions" + }, + "BootMode":{ + "shape":"BootModeValues", + "documentation":"

The boot mode of the instance. For more information, see Boot modes in the Amazon EC2 User Guide.

", + "locationName":"bootMode" } }, "documentation":"

Describes an instance.

" @@ -24285,7 +24463,7 @@ }, "SourceDestCheck":{ "shape":"AttributeBooleanValue", - "documentation":"

Indicates whether source/destination checking is enabled. A value of true means that checking is enabled, and false means that checking is disabled. This value must be false for a NAT instance to perform NAT.

", + "documentation":"

Enable or disable source/destination checks, which ensure that the instance is either the source or the destination of any traffic that it receives. If the value is true, source/destination checks are enabled; otherwise, they are disabled. The default value is true. You must disable source/destination checks if the instance runs services such as network address translation, routing, or firewalls.

", "locationName":"sourceDestCheck" }, "SriovNetSupport":{ @@ -25608,7 +25786,16 @@ "m6gd.8xlarge", "m6gd.12xlarge", "m6gd.16xlarge", - "mac1.metal" + "mac1.metal", + "x2gd.medium", + "x2gd.large", + "x2gd.xlarge", + "x2gd.2xlarge", + "x2gd.4xlarge", + "x2gd.8xlarge", + "x2gd.12xlarge", + "x2gd.16xlarge", + "x2gd.metal" ] }, "InstanceTypeHypervisor":{ @@ -25735,6 +25922,11 @@ "shape":"AutoRecoveryFlag", "documentation":"

Indicates whether auto recovery is supported.

", "locationName":"autoRecoverySupported" + }, + "SupportedBootModes":{ + "shape":"BootModeTypeList", + "documentation":"

The supported boot modes. For more information, see Boot modes in the Amazon EC2 User Guide.

", + "locationName":"supportedBootModes" } }, "documentation":"

Describes the instance type.

" @@ -26199,7 +26391,7 @@ }, "UserId":{ "shape":"String", - "documentation":"

The AWS account ID.

", + "documentation":"

The AWS account ID.

Constraints: Up to 10 000 account IDs can be specified in a single request.

", "locationName":"userId" } }, @@ -26390,7 +26582,7 @@ }, "NoDevice":{ "shape":"String", - "documentation":"

Suppresses the specified device included in the block device mapping of the AMI.

", + "documentation":"

To omit the device from the block device mapping, specify an empty string.

", "locationName":"noDevice" } }, @@ -26420,7 +26612,7 @@ }, "NoDevice":{ "shape":"String", - "documentation":"

Suppresses the specified device included in the block device mapping of the AMI.

" + "documentation":"

To omit the device from the block device mapping, specify an empty string.

" } }, "documentation":"

Describes a block device mapping.

" @@ -26574,7 +26766,7 @@ }, "Iops":{ "shape":"Integer", - "documentation":"

The number of I/O operations per second (IOPS). For gp3, io1, and io2 volumes, this represents the number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting.

The following are the supported values for each volume type:

For io1 and io2 volumes, we guarantee 64,000 IOPS only for Instances built on the Nitro System. Other instance families guarantee performance up to 32,000 IOPS.

This parameter is required for io1 and io2 volumes. The default for gp3 volumes is 3,000 IOPS. This parameter is not supported for gp2, st1, sc1, or standard volumes.

" + "documentation":"

The number of I/O operations per second (IOPS). For gp3, io1, and io2 volumes, this represents the number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting.

The following are the supported values for each volume type:

For io1 and io2 volumes, we guarantee 64,000 IOPS only for Instances built on the Nitro System. Other instance families guarantee performance up to 32,000 IOPS.

This parameter is supported for io1, io2, and gp3 volumes only. This parameter is not supported for gp2, st1, sc1, or standard volumes.

" }, "KmsKeyId":{ "shape":"KmsKeyId", @@ -26586,11 +26778,11 @@ }, "VolumeSize":{ "shape":"Integer", - "documentation":"

The size of the volume, in GiBs. You must specify either a snapshot ID or a volume size. If you specify a snapshot, the default is the snapshot size. You can specify a volume size that is equal to or larger than the snapshot size.

The following are the supported volumes sizes for each volume type:

" + "documentation":"

The size of the volume, in GiBs. You must specify either a snapshot ID or a volume size. The following are the supported volumes sizes for each volume type:

" }, "VolumeType":{ "shape":"VolumeType", - "documentation":"

The volume type. The default is gp2. For more information, see Amazon EBS volume types in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The volume type. For more information, see Amazon EBS volume types in the Amazon Elastic Compute Cloud User Guide.

" }, "Throughput":{ "shape":"Integer", @@ -27074,7 +27266,7 @@ }, "Priority":{ "shape":"Double", - "documentation":"

The priority for the launch template override. If OnDemandAllocationStrategy is set to prioritized, Spot Fleet uses priority to determine which launch template override to use first in fulfilling On-Demand capacity. The highest priority is launched first. Valid values are whole numbers starting at 0. The lower the number, the higher the priority. If no number is set, the launch template override has the lowest priority.

", + "documentation":"

The priority for the launch template override. The highest priority is launched first.

If OnDemandAllocationStrategy is set to prioritized, Spot Fleet uses priority to determine which launch template override to use first in fulfilling On-Demand capacity.

If the Spot AllocationStrategy is set to capacityOptimizedPrioritized, Spot Fleet uses priority on a best-effort basis to determine which launch template override to use first in fulfilling Spot capacity, but optimizes for capacity first.

Valid values are whole numbers starting at 0. The lower the number, the higher the priority. If no number is set, the launch template override has the lowest priority. You can set the same priority for different launch template overrides.

", "locationName":"priority" } }, @@ -28017,6 +28209,34 @@ }, "MemorySize":{"type":"long"}, "MillisecondDateTime":{"type":"timestamp"}, + "ModifyAddressAttributeRequest":{ + "type":"structure", + "required":["AllocationId"], + "members":{ + "AllocationId":{ + "shape":"AllocationId", + "documentation":"

[EC2-VPC] The allocation ID.

" + }, + "DomainName":{ + "shape":"String", + "documentation":"

The domain name to modify for the IP address.

" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "ModifyAddressAttributeResult":{ + "type":"structure", + "members":{ + "Address":{ + "shape":"AddressAttribute", + "documentation":"

Information about the Elastic IP address.

", + "locationName":"address" + } + } + }, "ModifyAvailabilityZoneGroupRequest":{ "type":"structure", "required":[ @@ -28453,7 +28673,7 @@ "members":{ "SourceDestCheck":{ "shape":"AttributeBooleanValue", - "documentation":"

Specifies whether source/destination checking is enabled. A value of true means that checking is enabled, and false means that checking is disabled. This value must be false for a NAT instance to perform NAT.

" + "documentation":"

Enable or disable source/destination checks, which ensure that the instance is either the source or the destination of any traffic that it receives. If the value is true, source/destination checks are enabled; otherwise, they are disabled. The default value is true. You must disable source/destination checks if the instance runs services such as network address translation, routing, or firewalls.

" }, "Attribute":{ "shape":"InstanceAttributeName", @@ -30550,7 +30770,7 @@ }, "RequesterId":{ "shape":"String", - "documentation":"

The ID of the entity that launched the instance on your behalf (for example, AWS Management Console or Auto Scaling).

", + "documentation":"

The alias or AWS account ID of the principal or service that created the network interface.

", "locationName":"requesterId" }, "RequesterManaged":{ @@ -32093,6 +32313,27 @@ }, "documentation":"

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

" }, + "PtrUpdateStatus":{ + "type":"structure", + "members":{ + "Value":{ + "shape":"String", + "documentation":"

The value for the PTR record update.

", + "locationName":"value" + }, + "Status":{ + "shape":"String", + "documentation":"

The status of the PTR record update.

", + "locationName":"status" + }, + "Reason":{ + "shape":"String", + "documentation":"

The reason for the PTR record update.

", + "locationName":"reason" + } + }, + "documentation":"

The status of an updated pointer (PTR) record for an Elastic IP address.

" + }, "PublicIpAddress":{"type":"string"}, "PublicIpStringList":{ "type":"list", @@ -32524,7 +32765,7 @@ }, "BlockDeviceMappings":{ "shape":"BlockDeviceMappingRequestList", - "documentation":"

The block device mapping entries.

", + "documentation":"

The block device mapping entries.

If you specify an EBS volume using the ID of an EBS snapshot, you can't specify the encryption state of the volume.

If you create an AMI on an Outpost, then all backing snapshots must be on the same Outpost or in the Region of that Outpost. AMIs on an Outpost that include local snapshots can be used to launch instances on the same Outpost only. For more information, Amazon EBS local snapshots on Outposts in the Amazon Elastic Compute Cloud User Guide.

", "locationName":"BlockDeviceMapping" }, "Description":{ @@ -32576,6 +32817,10 @@ "shape":"String", "documentation":"

The type of virtualization (hvm | paravirtual).

Default: paravirtual

", "locationName":"virtualizationType" + }, + "BootMode":{ + "shape":"BootModeValues", + "documentation":"

The boot mode of the AMI. For more information, see Boot modes in the Amazon Elastic Compute Cloud User Guide.

" } }, "documentation":"

Contains the parameters for RegisterImage.

" @@ -33277,7 +33522,7 @@ }, "IamInstanceProfile":{ "shape":"LaunchTemplateIamInstanceProfileSpecificationRequest", - "documentation":"

The IAM instance profile.

" + "documentation":"

The name or Amazon Resource Name (ARN) of an IAM instance profile.

" }, "BlockDeviceMappings":{ "shape":"LaunchTemplateBlockDeviceMappingRequestList", @@ -34132,6 +34377,37 @@ "locationName":"item" } }, + "ResetAddressAttributeRequest":{ + "type":"structure", + "required":[ + "AllocationId", + "Attribute" + ], + "members":{ + "AllocationId":{ + "shape":"AllocationId", + "documentation":"

[EC2-VPC] The allocation ID.

" + }, + "Attribute":{ + "shape":"AddressAttributeName", + "documentation":"

The attribute of the IP address.

" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "ResetAddressAttributeResult":{ + "type":"structure", + "members":{ + "Address":{ + "shape":"AddressAttribute", + "documentation":"

Information about the IP address.

", + "locationName":"address" + } + } + }, "ResetEbsDefaultKmsKeyIdRequest":{ "type":"structure", "members":{ @@ -35120,7 +35396,7 @@ }, "IamInstanceProfile":{ "shape":"IamInstanceProfileSpecification", - "documentation":"

The IAM instance profile.

", + "documentation":"

The name or Amazon Resource Name (ARN) of an IAM instance profile.

", "locationName":"iamInstanceProfile" }, "InstanceInitiatedShutdownBehavior":{ @@ -36324,6 +36600,11 @@ "documentation":"

The AWS owner alias, from an Amazon-maintained list (amazon). This is not the user-configured AWS account alias set using the IAM console.

", "locationName":"ownerAlias" }, + "OutpostArn":{ + "shape":"String", + "documentation":"

The ARN of the AWS Outpost on which the snapshot is stored. For more information, see EBS Local Snapshot on Outposts in the Amazon Elastic Compute Cloud User Guide.

", + "locationName":"outpostArn" + }, "Tags":{ "shape":"TagList", "documentation":"

Any tags assigned to the snapshot.

", @@ -36411,7 +36692,7 @@ }, "Format":{ "shape":"String", - "documentation":"

The format of the disk image being imported.

Valid values: VHD | VMDK

" + "documentation":"

The format of the disk image being imported.

Valid values: VHD | VMDK | RAW

" }, "Url":{ "shape":"String", @@ -36484,6 +36765,11 @@ "shape":"String", "documentation":"

Snapshot id that can be used to describe this snapshot.

", "locationName":"snapshotId" + }, + "OutpostArn":{ + "shape":"String", + "documentation":"

The ARN of the AWS Outpost on which the snapshot is stored. For more information, see EBS Local Snapshot on Outposts in the Amazon Elastic Compute Cloud User Guide.

", + "locationName":"outpostArn" } }, "documentation":"

Information about a snapshot.

" @@ -36576,7 +36862,8 @@ "enum":[ "lowest-price", "diversified", - "capacity-optimized" + "capacity-optimized", + "capacity-optimized-prioritized" ] }, "SpotCapacityRebalance":{ @@ -36773,7 +37060,7 @@ "members":{ "AllocationStrategy":{ "shape":"AllocationStrategy", - "documentation":"

Indicates how to allocate the target Spot Instance capacity across the Spot Instance pools specified by the Spot Fleet request.

If the allocation strategy is lowestPrice, Spot Fleet launches instances from the Spot Instance pools with the lowest price. This is the default allocation strategy.

If the allocation strategy is diversified, Spot Fleet launches instances from all the Spot Instance pools that you specify.

If the allocation strategy is capacityOptimized, Spot Fleet launches instances from Spot Instance pools with optimal capacity for the number of instances that are launching.

", + "documentation":"

Indicates how to allocate the target Spot Instance capacity across the Spot Instance pools specified by the Spot Fleet request.

If the allocation strategy is lowestPrice, Spot Fleet launches instances from the Spot Instance pools with the lowest price. This is the default allocation strategy.

If the allocation strategy is diversified, Spot Fleet launches instances from all the Spot Instance pools that you specify.

If the allocation strategy is capacityOptimized (recommended), Spot Fleet launches instances from Spot Instance pools with optimal capacity for the number of instances that are launching. To give certain instance types a higher chance of launching first, use capacityOptimizedPrioritized. Set a priority for each instance type by using the Priority parameter for LaunchTemplateOverrides. You can assign the same priority to different LaunchTemplateOverrides. EC2 implements the priorities on a best-effort basis, but optimizes for capacity first. capacityOptimizedPrioritized is supported only if your Spot Fleet uses a launch template. Note that if the OnDemandAllocationStrategy is set to prioritized, the same priority is applied when fulfilling On-Demand capacity.

", "locationName":"allocationStrategy" }, "OnDemandAllocationStrategy":{ @@ -37152,7 +37439,7 @@ "members":{ "AllocationStrategy":{ "shape":"SpotAllocationStrategy", - "documentation":"

Indicates how to allocate the target Spot Instance capacity across the Spot Instance pools specified by the EC2 Fleet.

If the allocation strategy is lowest-price, EC2 Fleet launches instances from the Spot Instance pools with the lowest price. This is the default allocation strategy.

If the allocation strategy is diversified, EC2 Fleet launches instances from all of the Spot Instance pools that you specify.

If the allocation strategy is capacity-optimized, EC2 Fleet launches instances from Spot Instance pools with optimal capacity for the number of instances that are launching.

", + "documentation":"

Indicates how to allocate the target Spot Instance capacity across the Spot Instance pools specified by the EC2 Fleet.

If the allocation strategy is lowest-price, EC2 Fleet launches instances from the Spot Instance pools with the lowest price. This is the default allocation strategy.

If the allocation strategy is diversified, EC2 Fleet launches instances from all of the Spot Instance pools that you specify.

If the allocation strategy is capacity-optimized (recommended), EC2 Fleet launches instances from Spot Instance pools with optimal capacity for the number of instances that are launching. To give certain instance types a higher chance of launching first, use capacity-optimized-prioritized. Set a priority for each instance type by using the Priority parameter for LaunchTemplateOverrides. You can assign the same priority to different LaunchTemplateOverrides. EC2 implements the priorities on a best-effort basis, but optimizes for capacity first. capacity-optimized-prioritized is supported only if your fleet uses a launch template. Note that if the On-Demand AllocationStrategy is set to prioritized, the same priority is applied when fulfilling On-Demand capacity.

", "locationName":"allocationStrategy" }, "MaintenanceStrategies":{ @@ -37198,7 +37485,7 @@ "members":{ "AllocationStrategy":{ "shape":"SpotAllocationStrategy", - "documentation":"

Indicates how to allocate the target Spot Instance capacity across the Spot Instance pools specified by the EC2 Fleet.

If the allocation strategy is lowest-price, EC2 Fleet launches instances from the Spot Instance pools with the lowest price. This is the default allocation strategy.

If the allocation strategy is diversified, EC2 Fleet launches instances from all of the Spot Instance pools that you specify.

If the allocation strategy is capacity-optimized, EC2 Fleet launches instances from Spot Instance pools with optimal capacity for the number of instances that are launching.

" + "documentation":"

Indicates how to allocate the target Spot Instance capacity across the Spot Instance pools specified by the EC2 Fleet.

If the allocation strategy is lowest-price, EC2 Fleet launches instances from the Spot Instance pools with the lowest price. This is the default allocation strategy.

If the allocation strategy is diversified, EC2 Fleet launches instances from all of the Spot Instance pools that you specify.

If the allocation strategy is capacity-optimized (recommended), EC2 Fleet launches instances from Spot Instance pools with optimal capacity for the number of instances that are launching. To give certain instance types a higher chance of launching first, use capacity-optimized-prioritized. Set a priority for each instance type by using the Priority parameter for LaunchTemplateOverrides. You can assign the same priority to different LaunchTemplateOverrides. EC2 implements the priorities on a best-effort basis, but optimizes for capacity first. capacity-optimized-prioritized is supported only if your fleet uses a launch template. Note that if the On-Demand AllocationStrategy is set to prioritized, the same priority is applied when fulfilling On-Demand capacity.

" }, "MaintenanceStrategies":{ "shape":"FleetSpotMaintenanceStrategiesRequest", @@ -37751,7 +38038,7 @@ "type":"structure", "members":{ "AssociationId":{ - "shape":"String", + "shape":"SubnetCidrAssociationId", "documentation":"

The association ID for the CIDR block.

", "locationName":"associationId" }, @@ -42034,5 +42321,5 @@ "totalFpgaMemory":{"type":"integer"}, "totalGpuMemory":{"type":"integer"} }, - "documentation":"Amazon Elastic Compute Cloud

Amazon Elastic Compute Cloud (Amazon EC2) provides secure and resizable computing capacity in the AWS cloud. Using Amazon EC2 eliminates the need to invest in hardware up front, so you can develop and deploy applications faster.

To learn more, see the following resources:

" + "documentation":"Amazon Elastic Compute Cloud

Amazon Elastic Compute Cloud (Amazon EC2) provides secure and resizable computing capacity in the AWS Cloud. Using Amazon EC2 eliminates the need to invest in hardware up front, so you can develop and deploy applications faster. Amazon Virtual Private Cloud (Amazon VPC) enables you to provision a logically isolated section of the AWS Cloud where you can launch AWS resources in a virtual network that you've defined. Amazon Elastic Block Store (Amazon EBS) provides block level storage volumes for use with EC2 instances. EBS volumes are highly available and reliable storage volumes that can be attached to any running instance and used like a hard drive.

To learn more, see the following resources:

" } diff --git a/botocore/data/ecr-public/2020-10-30/service-2.json b/botocore/data/ecr-public/2020-10-30/service-2.json index 6901381f..4a4280aa 100644 --- a/botocore/data/ecr-public/2020-10-30/service-2.json +++ b/botocore/data/ecr-public/2020-10-30/service-2.json @@ -78,6 +78,8 @@ "errors":[ {"shape":"ServerException"}, {"shape":"InvalidParameterException"}, + {"shape":"InvalidTagParameterException"}, + {"shape":"TooManyTagsException"}, {"shape":"RepositoryAlreadyExistsException"}, {"shape":"LimitExceededException"} ], @@ -252,6 +254,21 @@ ], "documentation":"

Notifies Amazon ECR that you intend to upload an image layer.

When an image is pushed, the InitiateLayerUpload API is called once per image layer that has not already been uploaded. Whether or not an image layer has been uploaded is determined by the BatchCheckLayerAvailability API action.

This operation is used by the Amazon ECR proxy and is not generally used by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

" }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"}, + {"shape":"ServerException"} + ], + "documentation":"

List the tags for an Amazon ECR Public resource.

" + }, "PutImage":{ "name":"PutImage", "http":{ @@ -320,6 +337,40 @@ ], "documentation":"

Applies a repository policy to the specified public repository to control access permissions. For more information, see Amazon ECR Repository Policies in the Amazon Elastic Container Registry User Guide.

" }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InvalidTagParameterException"}, + {"shape":"TooManyTagsException"}, + {"shape":"RepositoryNotFoundException"}, + {"shape":"ServerException"} + ], + "documentation":"

Associates the specified tags to a resource with the specified resourceArn. If existing tags on a resource are not specified in the request parameters, they are not changed. When a resource is deleted, the tags associated with that resource are deleted as well.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InvalidTagParameterException"}, + {"shape":"TooManyTagsException"}, + {"shape":"RepositoryNotFoundException"}, + {"shape":"ServerException"} + ], + "documentation":"

Deletes specified tags from a resource.

" + }, "UploadLayerPart":{ "name":"UploadLayerPart", "http":{ @@ -512,6 +563,10 @@ "catalogData":{ "shape":"RepositoryCatalogDataInput", "documentation":"

The details about the repository that are publicly visible in the Amazon ECR Public Gallery.

" + }, + "tags":{ + "shape":"TagList", + "documentation":"

The metadata that you apply to the repository to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" } } }, @@ -1074,6 +1129,14 @@ "documentation":"

The specified parameter is invalid. Review the available parameters for the API request.

", "exception":true }, + "InvalidTagParameterException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

An invalid parameter has been specified. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

", + "exception":true + }, "Layer":{ "type":"structure", "members":{ @@ -1185,6 +1248,25 @@ "documentation":"

The operation did not succeed because it would have exceeded a service limit for your account. For more information, see Amazon ECR Service Quotas in the Amazon Elastic Container Registry User Guide.

", "exception":true }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that identifies the resource for which to list the tags. Currently, the supported resource is an Amazon ECR Public repository.

" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagList", + "documentation":"

The tags for the resource.

" + } + } + }, "LogoImageBlob":{ "type":"blob", "max":512000, @@ -1657,6 +1739,72 @@ } } }, + "Tag":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

One part of a key-value pair that make up a tag. A key is a general label that acts like a category for more specific tag values.

" + }, + "Value":{ + "shape":"TagValue", + "documentation":"

The optional part of a key-value pair that make up a tag. A value acts as a descriptor within a tag category (key).

" + } + }, + "documentation":"

The metadata that you apply to a resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the resource to which to add tags. Currently, the supported resource is an Amazon ECR Public repository.

" + }, + "tags":{ + "shape":"TagList", + "documentation":"

The tags to add to the resource. A tag is an array of key-value pairs. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "TooManyTagsException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The list of tags on the repository is over the limit. The maximum number of tags that can be applied to a repository is 50.

", + "exception":true + }, "UnsupportedCommandException":{ "type":"structure", "members":{ @@ -1665,6 +1813,28 @@ "documentation":"

The action is not supported in this Region.

", "exception":true }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the resource from which to delete tags. Currently, the supported resource is an Amazon ECR Public repository.

" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

The keys of the tags to be removed.

" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, "UploadId":{ "type":"string", "pattern":"[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}" diff --git a/botocore/data/ecs/2014-11-13/service-2.json b/botocore/data/ecs/2014-11-13/service-2.json index 9d13d80c..8f4d11d9 100644 --- a/botocore/data/ecs/2014-11-13/service-2.json +++ b/botocore/data/ecs/2014-11-13/service-2.json @@ -348,6 +348,24 @@ ], "documentation":"

This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.

Returns an endpoint for the Amazon ECS agent to poll for updates.

" }, + "ExecuteCommand":{ + "name":"ExecuteCommand", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ExecuteCommandRequest"}, + "output":{"shape":"ExecuteCommandResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ClusterNotFoundException"}, + {"shape":"TargetNotConnectedException"} + ], + "documentation":"

Runs a command remotely on a container within a task.

" + }, "ListAccountSettings":{ "name":"ListAccountSettings", "http":{ @@ -730,6 +748,22 @@ ], "documentation":"

Modifies the parameters for a capacity provider.

" }, + "UpdateCluster":{ + "name":"UpdateCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateClusterRequest"}, + "output":{"shape":"UpdateClusterResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"ClusterNotFoundException"}, + {"shape":"InvalidParameterException"} + ], + "documentation":"

Updates the cluster.

" + }, "UpdateClusterSettings":{ "name":"UpdateClusterSettings", "http":{ @@ -1085,14 +1119,14 @@ }, "weight":{ "shape":"CapacityProviderStrategyItemWeight", - "documentation":"

The weight value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider.

For example, if you have a strategy that contains two capacity providers and both have a weight of 1, then when the base is satisfied, the tasks will be split evenly across the two capacity providers. Using that same logic, if you specify a weight of 1 for capacityProviderA and a weight of 4 for capacityProviderB, then for every one task that is run using capacityProviderA, four tasks would use capacityProviderB.

" + "documentation":"

The weight value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The weight value is taken into consideration after the base value, if defined, is satisfied.

If no weight value is specified, the default value of 0 is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of 0 will not be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of 0, any RunTask or CreateService actions using the capacity provider strategy will fail.

An example scenario for using weights is defining a strategy that contains two capacity providers and both have a weight of 1, then when the base is satisfied, the tasks will be split evenly across the two capacity providers. Using that same logic, if you specify a weight of 1 for capacityProviderA and a weight of 4 for capacityProviderB, then for every one task that is run using capacityProviderA, four tasks would use capacityProviderB.

" }, "base":{ "shape":"CapacityProviderStrategyItemBase", - "documentation":"

The base value designates how many tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a base defined.

" + "documentation":"

The base value designates how many tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a base defined. If no value is specified, the default value of 0 is used.

" } }, - "documentation":"

The details of a capacity provider strategy.

" + "documentation":"

The details of a capacity provider strategy. A capacity provider strategy can be set when using the RunTask or CreateCluster APIs or as the default capacity provider strategy for a cluster with the CreateCluster API.

Only capacity providers that are already associated with a cluster and have an ACTIVE or UPDATING status can be used in a capacity provider strategy. The PutClusterCapacityProviders API is used to associate a capacity provider with a cluster.

If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created. New Auto Scaling group capacity providers can be created with the CreateCapacityProvider API operation.

To use a AWS Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT capacity providers. The AWS Fargate capacity providers are available to all accounts and only need to be associated with a cluster to be used in a capacity provider strategy.

" }, "CapacityProviderStrategyItemBase":{ "type":"integer", @@ -1138,6 +1172,10 @@ "shape":"String", "documentation":"

A user-generated string that you use to identify your cluster.

" }, + "configuration":{ + "shape":"ClusterConfiguration", + "documentation":"

The execute command configuration for the cluster.

" + }, "status":{ "shape":"String", "documentation":"

The status of the cluster. The following are the possible states that will be returned.

ACTIVE

The cluster is ready to accept tasks and if applicable you can register container instances with the cluster.

PROVISIONING

The cluster has capacity providers associated with it and the resources needed for the capacity provider are being created.

DEPROVISIONING

The cluster has capacity providers associated with it and the resources needed for the capacity provider are being deleted.

FAILED

The cluster has capacity providers associated with it and the resources needed for the capacity provider have failed to create.

INACTIVE

The cluster has been deleted. Clusters with an INACTIVE status may remain discoverable in your account for a period of time. However, this behavior is subject to change in the future, so you should not rely on INACTIVE clusters persisting.

" @@ -1189,6 +1227,16 @@ }, "documentation":"

A regional grouping of one or more container instances on which you can run task requests. Each account receives a default cluster the first time you use the Amazon ECS service, but you may also create other clusters. Clusters may contain more than one instance type simultaneously.

" }, + "ClusterConfiguration":{ + "type":"structure", + "members":{ + "executeCommandConfiguration":{ + "shape":"ExecuteCommandConfiguration", + "documentation":"

The details of the execute command configuration.

" + } + }, + "documentation":"

The execute command configuration for the cluster.

" + }, "ClusterContainsContainerInstancesException":{ "type":"structure", "members":{ @@ -1214,6 +1262,7 @@ "type":"string", "enum":[ "ATTACHMENTS", + "CONFIGURATIONS", "SETTINGS", "STATISTICS", "TAGS" @@ -1325,6 +1374,10 @@ "shape":"HealthStatus", "documentation":"

The health status of the container. If health checks are not configured for this container in its task definition, then it reports the health status as UNKNOWN.

" }, + "managedAgents":{ + "shape":"ManagedAgents", + "documentation":"

The details of any Amazon ECS managed agents associated with the container.

" + }, "cpu":{ "shape":"String", "documentation":"

The number of CPU units set for the container. The value will be 0 if no value was specified in the container definition when the task definition was registered.

" @@ -1454,7 +1507,7 @@ }, "privileged":{ "shape":"BoxedBoolean", - "documentation":"

When this parameter is true, the container is given elevated privileges on the host container instance (similar to the root user). This parameter maps to Privileged in the Create a container section of the Docker Remote API and the --privileged option to docker run.

This parameter is not supported for Windows containers or tasks using the Fargate launch type.

" + "documentation":"

When this parameter is true, the container is given elevated privileges on the host container instance (similar to the root user). This parameter maps to Privileged in the Create a container section of the Docker Remote API and the --privileged option to docker run.

This parameter is not supported for Windows containers or tasks run on AWS Fargate.

" }, "readonlyRootFilesystem":{ "shape":"BoxedBoolean", @@ -1766,13 +1819,17 @@ "shape":"ClusterSettings", "documentation":"

The setting to use when creating a cluster. This parameter is used to enable CloudWatch Container Insights for a cluster. If this value is specified, it will override the containerInsights value set with PutAccountSetting or PutAccountSettingDefault.

" }, + "configuration":{ + "shape":"ClusterConfiguration", + "documentation":"

The execute command configuration for the cluster.

" + }, "capacityProviders":{ "shape":"StringList", - "documentation":"

The short name of one or more capacity providers to associate with the cluster.

If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created and not already associated with another cluster. New capacity providers can be created with the CreateCapacityProvider API operation.

To use a AWS Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT capacity providers. The AWS Fargate capacity providers are available to all accounts and only need to be associated with a cluster to be used.

The PutClusterCapacityProviders API operation is used to update the list of available capacity providers for a cluster after the cluster is created.

" + "documentation":"

The short name of one or more capacity providers to associate with the cluster. A capacity provider must be associated with a cluster before it can be included as part of the default capacity provider strategy of the cluster or used in a capacity provider strategy when calling the CreateService or RunTask actions.

If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created and not already associated with another cluster. New Auto Scaling group capacity providers can be created with the CreateCapacityProvider API operation.

To use a AWS Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT capacity providers. The AWS Fargate capacity providers are available to all accounts and only need to be associated with a cluster to be used.

The PutClusterCapacityProviders API operation is used to update the list of available capacity providers for a cluster after the cluster is created.

" }, "defaultCapacityProviderStrategy":{ "shape":"CapacityProviderStrategy", - "documentation":"

The capacity provider strategy to use by default for the cluster.

When creating a service or running a task on a cluster, if no capacity provider or launch type is specified then the default capacity provider strategy for the cluster is used.

A capacity provider strategy consists of one or more capacity providers along with the base and weight to assign to them. A capacity provider must be associated with the cluster to be used in a capacity provider strategy. The PutClusterCapacityProviders API is used to associate a capacity provider with a cluster. Only capacity providers with an ACTIVE or UPDATING status can be used.

If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created. New capacity providers can be created with the CreateCapacityProvider API operation.

To use a AWS Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT capacity providers. The AWS Fargate capacity providers are available to all accounts and only need to be associated with a cluster to be used.

If a default capacity provider strategy is not defined for a cluster during creation, it can be defined later with the PutClusterCapacityProviders API operation.

" + "documentation":"

The capacity provider strategy to set as the default for the cluster. When a default capacity provider strategy is set for a cluster, when calling the RunTask or CreateService APIs wtih no capacity provider strategy or launch type specified, the default capacity provider strategy for the cluster is used.

If a default capacity provider strategy is not defined for a cluster during creation, it can be defined later with the PutClusterCapacityProviders API operation.

" } } }, @@ -1803,11 +1860,11 @@ }, "loadBalancers":{ "shape":"LoadBalancers", - "documentation":"

A load balancer object representing the load balancers to use with your service. For more information, see Service Load Balancing in the Amazon Elastic Container Service Developer Guide.

If the service is using the rolling update (ECS) deployment controller and using either an Application Load Balancer or Network Load Balancer, you must specify one or more target group ARNs to attach to the service. The service-linked role is required for services that make use of multiple target groups. For more information, see Using Service-Linked Roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

If the service is using the CODE_DEPLOY deployment controller, the service is required to use either an Application Load Balancer or Network Load Balancer. When creating an AWS CodeDeploy deployment group, you specify two target groups (referred to as a targetGroupPair). During a deployment, AWS CodeDeploy determines which task set in your service has the status PRIMARY and associates one target group with it, and then associates the other target group with the replacement task set. The load balancer can also have up to two listeners: a required listener for production traffic and an optional listener that allows you perform validation tests with Lambda functions before routing production traffic to it.

After you create a service using the ECS deployment controller, the load balancer name or target group ARN, container name, and container port specified in the service definition are immutable. If you are using the CODE_DEPLOY deployment controller, these values can be changed when updating the service.

For Application Load Balancers and Network Load Balancers, this object must contain the load balancer target group ARN, the container name (as it appears in a container definition), and the container port to access from the load balancer. The load balancer name parameter must be omitted. When a task from this service is placed on a container instance, the container instance and port combination is registered as a target in the target group specified here.

For Classic Load Balancers, this object must contain the load balancer name, the container name (as it appears in a container definition), and the container port to access from the load balancer. The target group ARN parameter must be omitted. When a task from this service is placed on a container instance, the container instance is registered with the load balancer specified here.

Services with tasks that use the awsvpc network mode (for example, those with the Fargate launch type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers are not supported. Also, when you create any target groups for these services, you must choose ip as the target type, not instance, because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance.

" + "documentation":"

A load balancer object representing the load balancers to use with your service. For more information, see Service Load Balancing in the Amazon Elastic Container Service Developer Guide.

If the service is using the rolling update (ECS) deployment controller and using either an Application Load Balancer or Network Load Balancer, you must specify one or more target group ARNs to attach to the service. The service-linked role is required for services that make use of multiple target groups. For more information, see Using service-linked roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

If the service is using the CODE_DEPLOY deployment controller, the service is required to use either an Application Load Balancer or Network Load Balancer. When creating an AWS CodeDeploy deployment group, you specify two target groups (referred to as a targetGroupPair). During a deployment, AWS CodeDeploy determines which task set in your service has the status PRIMARY and associates one target group with it, and then associates the other target group with the replacement task set. The load balancer can also have up to two listeners: a required listener for production traffic and an optional listener that allows you perform validation tests with Lambda functions before routing production traffic to it.

After you create a service using the ECS deployment controller, the load balancer name or target group ARN, container name, and container port specified in the service definition are immutable. If you are using the CODE_DEPLOY deployment controller, these values can be changed when updating the service.

For Application Load Balancers and Network Load Balancers, this object must contain the load balancer target group ARN, the container name (as it appears in a container definition), and the container port to access from the load balancer. The load balancer name parameter must be omitted. When a task from this service is placed on a container instance, the container instance and port combination is registered as a target in the target group specified here.

For Classic Load Balancers, this object must contain the load balancer name, the container name (as it appears in a container definition), and the container port to access from the load balancer. The target group ARN parameter must be omitted. When a task from this service is placed on a container instance, the container instance is registered with the load balancer specified here.

Services with tasks that use the awsvpc network mode (for example, those with the Fargate launch type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers are not supported. Also, when you create any target groups for these services, you must choose ip as the target type, not instance, because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance.

" }, "serviceRegistries":{ "shape":"ServiceRegistries", - "documentation":"

The details of the service discovery registries to assign to this service. For more information, see Service Discovery.

Service discovery is supported for Fargate tasks if you are using platform version v1.1.0 or later. For more information, see AWS Fargate Platform Versions.

" + "documentation":"

The details of the service discovery registries to assign to this service. For more information, see Service discovery.

Service discovery is supported for Fargate tasks if you are using platform version v1.1.0 or later. For more information, see AWS Fargate platform versions.

" }, "desiredCount":{ "shape":"BoxedInteger", @@ -1819,19 +1876,19 @@ }, "launchType":{ "shape":"LaunchType", - "documentation":"

The launch type on which to run your service. For more information, see Amazon ECS Launch Types in the Amazon Elastic Container Service Developer Guide.

If a launchType is specified, the capacityProviderStrategy parameter must be omitted.

" + "documentation":"

The launch type on which to run your service. The accepted values are FARGATE and EC2. For more information, see Amazon ECS launch types in the Amazon Elastic Container Service Developer Guide.

When a value of FARGATE is specified, your tasks are launched on AWS Fargate On-Demand infrastructure. To use Fargate Spot, you must use a capacity provider strategy with the FARGATE_SPOT capacity provider.

When a value of EC2 is specified, your tasks are launched on Amazon EC2 instances registered to your cluster.

If a launchType is specified, the capacityProviderStrategy parameter must be omitted.

" }, "capacityProviderStrategy":{ "shape":"CapacityProviderStrategy", - "documentation":"

The capacity provider strategy to use for the service.

A capacity provider strategy consists of one or more capacity providers along with the base and weight to assign to them. A capacity provider must be associated with the cluster to be used in a capacity provider strategy. The PutClusterCapacityProviders API is used to associate a capacity provider with a cluster. Only capacity providers with an ACTIVE or UPDATING status can be used.

If a capacityProviderStrategy is specified, the launchType parameter must be omitted. If no capacityProviderStrategy or launchType is specified, the defaultCapacityProviderStrategy for the cluster is used.

If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created. New capacity providers can be created with the CreateCapacityProvider API operation.

To use a AWS Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT capacity providers. The AWS Fargate capacity providers are available to all accounts and only need to be associated with a cluster to be used.

The PutClusterCapacityProviders API operation is used to update the list of available capacity providers for a cluster after the cluster is created.

" + "documentation":"

The capacity provider strategy to use for the service.

If a capacityProviderStrategy is specified, the launchType parameter must be omitted. If no capacityProviderStrategy or launchType is specified, the defaultCapacityProviderStrategy for the cluster is used.

" }, "platformVersion":{ "shape":"String", - "documentation":"

The platform version that your tasks in the service are running on. A platform version is specified only for tasks using the Fargate launch type. If one isn't specified, the LATEST platform version is used by default. For more information, see AWS Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The platform version that your tasks in the service are running on. A platform version is specified only for tasks using the Fargate launch type. If one isn't specified, the LATEST platform version is used by default. For more information, see AWS Fargate platform versions in the Amazon Elastic Container Service Developer Guide.

" }, "role":{ "shape":"String", - "documentation":"

The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon ECS to make calls to your load balancer on your behalf. This parameter is only permitted if you are using a load balancer with your service and your task definition does not use the awsvpc network mode. If you specify the role parameter, you must also specify a load balancer object with the loadBalancers parameter.

If your account has already created the Amazon ECS service-linked role, that role is used by default for your service unless you specify a role here. The service-linked role is required if your task definition uses the awsvpc network mode or if the service is configured to use service discovery, an external deployment controller, multiple target groups, or Elastic Inference accelerators in which case you should not specify a role here. For more information, see Using Service-Linked Roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

If your specified role has a path other than /, then you must either specify the full role ARN (this is recommended) or prefix the role name with the path. For example, if a role with the name bar has a path of /foo/ then you would specify /foo/bar as the role name. For more information, see Friendly Names and Paths in the IAM User Guide.

" + "documentation":"

The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon ECS to make calls to your load balancer on your behalf. This parameter is only permitted if you are using a load balancer with your service and your task definition does not use the awsvpc network mode. If you specify the role parameter, you must also specify a load balancer object with the loadBalancers parameter.

If your account has already created the Amazon ECS service-linked role, that role is used by default for your service unless you specify a role here. The service-linked role is required if your task definition uses the awsvpc network mode or if the service is configured to use service discovery, an external deployment controller, multiple target groups, or Elastic Inference accelerators in which case you should not specify a role here. For more information, see Using service-linked roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

If your specified role has a path other than /, then you must either specify the full role ARN (this is recommended) or prefix the role name with the path. For example, if a role with the name bar has a path of /foo/ then you would specify /foo/bar as the role name. For more information, see Friendly names and paths in the IAM User Guide.

" }, "deploymentConfiguration":{ "shape":"DeploymentConfiguration", @@ -1847,7 +1904,7 @@ }, "networkConfiguration":{ "shape":"NetworkConfiguration", - "documentation":"

The network configuration for the service. This parameter is required for task definitions that use the awsvpc network mode to receive their own elastic network interface, and it is not supported for other network modes. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The network configuration for the service. This parameter is required for task definitions that use the awsvpc network mode to receive their own elastic network interface, and it is not supported for other network modes. For more information, see Task networking in the Amazon Elastic Container Service Developer Guide.

" }, "healthCheckGracePeriodSeconds":{ "shape":"BoxedInteger", @@ -1872,6 +1929,10 @@ "propagateTags":{ "shape":"PropagateTags", "documentation":"

Specifies whether to propagate the tags from the task definition or the service to the tasks in the service. If no value is specified, the tags are not propagated. Tags can only be propagated to the tasks within the service during service creation. To add tags to a task after service creation, use the TagResource API action.

" + }, + "enableExecuteCommand":{ + "shape":"Boolean", + "documentation":"

Whether or not the execute command functionality is enabled for the service. If true, this enables execute command functionality on all containers in the service tasks.

" } } }, @@ -2659,7 +2720,7 @@ "documentation":"

The file type to use. The only supported value is s3.

" } }, - "documentation":"

A list of files containing the environment variables to pass to a container. You can specify up to ten environment files. The file must have a .env file extension. Each line in an environment file should contain an environment variable in VARIABLE=VALUE format. Lines beginning with # are treated as comments and are ignored. For more information on the environment variable file syntax, see Declare default environment variables in file.

If there are environment variables specified using the environment parameter in a container definition, they take precedence over the variables contained within an environment file. If multiple environment files are specified that contain the same variable, they are processed from the top down. It is recommended to use unique variable names. For more information, see Specifying Environment Variables in the Amazon Elastic Container Service Developer Guide.

This field is not valid for containers in tasks using the Fargate launch type.

" + "documentation":"

A list of files containing the environment variables to pass to a container. You can specify up to ten environment files. The file must have a .env file extension. Each line in an environment file should contain an environment variable in VARIABLE=VALUE format. Lines beginning with # are treated as comments and are ignored. For more information on the environment variable file syntax, see Declare default environment variables in file.

If there are environment variables specified using the environment parameter in a container definition, they take precedence over the variables contained within an environment file. If multiple environment files are specified that contain the same variable, they are processed from the top down. It is recommended to use unique variable names. For more information, see Specifying environment variables in the Amazon Elastic Container Service Developer Guide.

This field is only valid for containers in Fargate tasks that use platform version 1.4.0 or later.

" }, "EnvironmentFileType":{ "type":"string", @@ -2673,6 +2734,117 @@ "type":"list", "member":{"shape":"KeyValuePair"} }, + "ExecuteCommandConfiguration":{ + "type":"structure", + "members":{ + "kmsKeyId":{ + "shape":"String", + "documentation":"

Specify an AWS Key Management Service key ID to encrypt the data between the local client and the container.

" + }, + "logging":{ + "shape":"ExecuteCommandLogging", + "documentation":"

The log setting to use for redirecting logs for your execute command results. The following log settings are available.

" + }, + "logConfiguration":{ + "shape":"ExecuteCommandLogConfiguration", + "documentation":"

The log configuration for the results of the execute command actions. The logs can be sent to CloudWatch Logs or an Amazon S3 bucket. When logging=OVERRIDE is specified, a logConfiguration must be provided.

" + } + }, + "documentation":"

The details of the execute command configuration.

" + }, + "ExecuteCommandLogConfiguration":{ + "type":"structure", + "members":{ + "cloudWatchLogGroupName":{ + "shape":"String", + "documentation":"

The name of the CloudWatch log group to send logs to.

The CloudWatch log group must already be created.

" + }, + "cloudWatchEncryptionEnabled":{ + "shape":"Boolean", + "documentation":"

Whether or not to enable encryption on the CloudWatch logs. If not specified, encryption will be disabled.

" + }, + "s3BucketName":{ + "shape":"String", + "documentation":"

The name of the S3 bucket to send logs to.

The S3 bucket must already be created.

" + }, + "s3EncryptionEnabled":{ + "shape":"Boolean", + "documentation":"

Whether or not to enable encryption on the CloudWatch logs. If not specified, encryption will be disabled.

" + }, + "s3KeyPrefix":{ + "shape":"String", + "documentation":"

An optional folder in the S3 bucket to place logs in.

" + } + }, + "documentation":"

The log configuration for the results of the execute command actions. The logs can be sent to CloudWatch Logs or an Amazon S3 bucket.

" + }, + "ExecuteCommandLogging":{ + "type":"string", + "enum":[ + "NONE", + "DEFAULT", + "OVERRIDE" + ] + }, + "ExecuteCommandRequest":{ + "type":"structure", + "required":[ + "command", + "interactive", + "task" + ], + "members":{ + "cluster":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) or short name of the cluster the task is running in. If you do not specify a cluster, the default cluster is assumed.

" + }, + "container":{ + "shape":"String", + "documentation":"

The name of the container to execute the command on. A container name only needs to be specified for tasks containing multiple containers.

" + }, + "command":{ + "shape":"String", + "documentation":"

The command to run on the container.

" + }, + "interactive":{ + "shape":"Boolean", + "documentation":"

Use this flag to run your command in interactive mode.

" + }, + "task":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) or ID of the task the container is part of.

" + } + } + }, + "ExecuteCommandResponse":{ + "type":"structure", + "members":{ + "clusterArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the cluster.

" + }, + "containerArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the container.

" + }, + "containerName":{ + "shape":"String", + "documentation":"

The name of the container.

" + }, + "interactive":{ + "shape":"Boolean", + "documentation":"

Whether or not the execute command session is running in interactive mode.

" + }, + "session":{ + "shape":"Session", + "documentation":"

The details of the SSM session that was created for this instance of execute-command.

" + }, + "taskArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the task.

" + } + } + }, "FSxWindowsFileServerAuthorizationConfig":{ "type":"structure", "required":[ @@ -2746,7 +2918,7 @@ }, "options":{ "shape":"FirelensConfigurationOptionsMap", - "documentation":"

The options to use when configuring the log router. This field is optional and can be used to specify a custom configuration file or to add additional metadata, such as the task, task definition, cluster, and container instance details to the log event. If specified, the syntax to use is \"options\":{\"enable-ecs-log-metadata\":\"true|false\",\"config-file-type:\"s3|file\",\"config-file-value\":\"arn:aws:s3:::mybucket/fluent.conf|filepath\"}. For more information, see Creating a Task Definition that Uses a FireLens Configuration in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The options to use when configuring the log router. This field is optional and can be used to specify a custom configuration file or to add additional metadata, such as the task, task definition, cluster, and container instance details to the log event. If specified, the syntax to use is \"options\":{\"enable-ecs-log-metadata\":\"true|false\",\"config-file-type:\"s3|file\",\"config-file-value\":\"arn:aws:s3:::mybucket/fluent.conf|filepath\"}. For more information, see Creating a Task Definition that Uses a FireLens Configuration in the Amazon Elastic Container Service Developer Guide.

Tasks hosted on AWS Fargate only support the file configuration file type.

" } }, "documentation":"

The FireLens configuration for the container. This is used to specify and configure a log router for container logs. For more information, see Custom Log Routing in the Amazon Elastic Container Service Developer Guide.

" @@ -3362,6 +3534,67 @@ ] }, "Long":{"type":"long"}, + "ManagedAgent":{ + "type":"structure", + "members":{ + "lastStartedAt":{ + "shape":"Timestamp", + "documentation":"

The Unix timestamp for when the managed agent was last started.

" + }, + "name":{ + "shape":"ManagedAgentName", + "documentation":"

The name of the managed agent. When the execute command feature is enabled, the managed agent name is ExecuteCommandAgent.

" + }, + "reason":{ + "shape":"String", + "documentation":"

The reason for why the managed agent is in the state it is in.

" + }, + "lastStatus":{ + "shape":"String", + "documentation":"

The last known status of the managed agent.

" + } + }, + "documentation":"

Details about the managed agent status for the container.

" + }, + "ManagedAgentName":{ + "type":"string", + "enum":["ExecuteCommandAgent"] + }, + "ManagedAgentStateChange":{ + "type":"structure", + "required":[ + "containerName", + "managedAgentName", + "status" + ], + "members":{ + "containerName":{ + "shape":"String", + "documentation":"

The name of the container associated with the managed agent.

" + }, + "managedAgentName":{ + "shape":"ManagedAgentName", + "documentation":"

The name of the managed agent.

" + }, + "status":{ + "shape":"String", + "documentation":"

The status of the managed agent.

" + }, + "reason":{ + "shape":"String", + "documentation":"

The reason for the status of the managed agent.

" + } + }, + "documentation":"

An object representing a change in state for a managed agent.

" + }, + "ManagedAgentStateChanges":{ + "type":"list", + "member":{"shape":"ManagedAgentStateChange"} + }, + "ManagedAgents":{ + "type":"list", + "member":{"shape":"ManagedAgent"} + }, "ManagedScaling":{ "type":"structure", "members":{ @@ -3865,7 +4098,7 @@ }, "requiresCompatibilities":{ "shape":"CompatibilityList", - "documentation":"

The task launch type that Amazon ECS should validate the task definition against. This ensures that the task definition parameters are compatible with the specified launch type. If no value is specified, it defaults to EC2.

" + "documentation":"

The task launch type that Amazon ECS should validate the task definition against. A client exception is returned if the task definition doesn't validate against the compatibilities specified. If no value is specified, the parameter is omitted from the response.

" }, "cpu":{ "shape":"String", @@ -3881,11 +4114,11 @@ }, "pidMode":{ "shape":"PidMode", - "documentation":"

The process namespace to use for the containers in the task. The valid values are host or task. If host is specified, then all containers within the tasks that specified the host PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same process namespace. If no value is specified, the default is a private namespace. For more information, see PID settings in the Docker run reference.

If the host PID mode is used, be aware that there is a heightened risk of undesired process namespace expose. For more information, see Docker security.

This parameter is not supported for Windows containers or tasks using the Fargate launch type.

" + "documentation":"

The process namespace to use for the containers in the task. The valid values are host or task. If host is specified, then all containers within the tasks that specified the host PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same process namespace. If no value is specified, the default is a private namespace. For more information, see PID settings in the Docker run reference.

If the host PID mode is used, be aware that there is a heightened risk of undesired process namespace expose. For more information, see Docker security.

This parameter is not supported for Windows containers or tasks run on AWS Fargate.

" }, "ipcMode":{ "shape":"IpcMode", - "documentation":"

The IPC resource namespace to use for the containers in the task. The valid values are host, task, or none. If host is specified, then all containers within the tasks that specified the host IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same IPC resources. If none is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. For more information, see IPC settings in the Docker run reference.

If the host IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose. For more information, see Docker security.

If you are setting namespaced kernel parameters using systemControls for the containers in the task, the following will apply to your IPC resource namespace. For more information, see System Controls in the Amazon Elastic Container Service Developer Guide.

This parameter is not supported for Windows containers or tasks using the Fargate launch type.

" + "documentation":"

The IPC resource namespace to use for the containers in the task. The valid values are host, task, or none. If host is specified, then all containers within the tasks that specified the host IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same IPC resources. If none is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. For more information, see IPC settings in the Docker run reference.

If the host IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose. For more information, see Docker security.

If you are setting namespaced kernel parameters using systemControls for the containers in the task, the following will apply to your IPC resource namespace. For more information, see System Controls in the Amazon Elastic Container Service Developer Guide.

This parameter is not supported for Windows containers or tasks run on AWS Fargate.

" }, "proxyConfiguration":{"shape":"ProxyConfiguration"}, "inferenceAccelerators":{ @@ -4005,7 +4238,7 @@ "members":{ "capacityProviderStrategy":{ "shape":"CapacityProviderStrategy", - "documentation":"

The capacity provider strategy to use for the task.

A capacity provider strategy consists of one or more capacity providers along with the base and weight to assign to them. A capacity provider must be associated with the cluster to be used in a capacity provider strategy. The PutClusterCapacityProviders API is used to associate a capacity provider with a cluster. Only capacity providers with an ACTIVE or UPDATING status can be used.

If a capacityProviderStrategy is specified, the launchType parameter must be omitted. If no capacityProviderStrategy or launchType is specified, the defaultCapacityProviderStrategy for the cluster is used.

If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created. New capacity providers can be created with the CreateCapacityProvider API operation.

To use a AWS Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT capacity providers. The AWS Fargate capacity providers are available to all accounts and only need to be associated with a cluster to be used.

The PutClusterCapacityProviders API operation is used to update the list of available capacity providers for a cluster after the cluster is created.

" + "documentation":"

The capacity provider strategy to use for the task.

If a capacityProviderStrategy is specified, the launchType parameter must be omitted. If no capacityProviderStrategy or launchType is specified, the defaultCapacityProviderStrategy for the cluster is used.

" }, "cluster":{ "shape":"String", @@ -4019,13 +4252,17 @@ "shape":"Boolean", "documentation":"

Specifies whether to enable Amazon ECS managed tags for the task. For more information, see Tagging Your Amazon ECS Resources in the Amazon Elastic Container Service Developer Guide.

" }, + "enableExecuteCommand":{ + "shape":"Boolean", + "documentation":"

Whether or not to enable the execute command functionality for the containers in this task. If true, this enables execute command functionality on all containers in the task.

" + }, "group":{ "shape":"String", "documentation":"

The name of the task group to associate with the task. The default value is the family name of the task definition (for example, family:my-family-name).

" }, "launchType":{ "shape":"LaunchType", - "documentation":"

The launch type on which to run your task. For more information, see Amazon ECS Launch Types in the Amazon Elastic Container Service Developer Guide.

If a launchType is specified, the capacityProviderStrategy parameter must be omitted.

" + "documentation":"

The launch type on which to run your task. The accepted values are FARGATE and EC2. For more information, see Amazon ECS Launch Types in the Amazon Elastic Container Service Developer Guide.

When a value of FARGATE is specified, your tasks are launched on AWS Fargate On-Demand infrastructure. To use Fargate Spot, you must use a capacity provider strategy with the FARGATE_SPOT capacity provider.

When a value of EC2 is specified, your tasks are launched on Amazon EC2 instances registered to your cluster.

If a launchType is specified, the capacityProviderStrategy parameter must be omitted.

" }, "networkConfiguration":{ "shape":"NetworkConfiguration", @@ -4136,6 +4373,10 @@ "type":"list", "member":{"shape":"Secret"} }, + "SensitiveString":{ + "type":"string", + "sensitive":true + }, "ServerException":{ "type":"structure", "members":{ @@ -4263,6 +4504,10 @@ "propagateTags":{ "shape":"PropagateTags", "documentation":"

Specifies whether to propagate the tags from the task definition or the service to the task. If no value is specified, the tags are not propagated.

" + }, + "enableExecuteCommand":{ + "shape":"Boolean", + "documentation":"

Whether or not the execute command functionality is enabled for the service. If true, the execute command functionality is enabled for all containers in tasks as part of the service.

" } }, "documentation":"

Details on a service within a cluster

" @@ -4341,6 +4586,24 @@ "type":"list", "member":{"shape":"Service"} }, + "Session":{ + "type":"structure", + "members":{ + "sessionId":{ + "shape":"String", + "documentation":"

The ID of the execute command session.

" + }, + "streamUrl":{ + "shape":"String", + "documentation":"

A URL back to managed agent on the container that the SSM Session Manager client uses to send commands and receive output from the container.

" + }, + "tokenValue":{ + "shape":"SensitiveString", + "documentation":"

An encrypted token value containing session and caller information. Used to authenticate the connection to the container.

" + } + }, + "documentation":"

The details of the execute command session.

" + }, "Setting":{ "type":"structure", "members":{ @@ -4406,6 +4669,10 @@ "shape":"Boolean", "documentation":"

Specifies whether to enable Amazon ECS managed tags for the task. For more information, see Tagging Your Amazon ECS Resources in the Amazon Elastic Container Service Developer Guide.

" }, + "enableExecuteCommand":{ + "shape":"Boolean", + "documentation":"

Whether or not the execute command functionality is enabled for the task. If true, this enables execute command functionality on all containers in the task.

" + }, "group":{ "shape":"String", "documentation":"

The name of the task group to associate with the task. The default value is the family name of the task definition (for example, family:my-family-name).

" @@ -4590,6 +4857,10 @@ "shape":"AttachmentStateChanges", "documentation":"

Any attachments associated with the state change request.

" }, + "managedAgents":{ + "shape":"ManagedAgentStateChanges", + "documentation":"

The details for the managed agent associated with the task.

" + }, "pullStartedAt":{ "shape":"Timestamp", "documentation":"

The Unix timestamp for when the container image pull began.

" @@ -4689,6 +4960,13 @@ "max":50, "min":0 }, + "TargetNotConnectedException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The target container is not properly configured with the execute command agent or the container is no longer active or running.

", + "exception":true + }, "TargetNotFoundException":{ "type":"structure", "members":{ @@ -4751,6 +5029,10 @@ "shape":"String", "documentation":"

The desired status of the task. For more information, see Task Lifecycle.

" }, + "enableExecuteCommand":{ + "shape":"Boolean", + "documentation":"

Whether or not execute command functionality is enabled for this task. If true, this enables execute command functionality on all containers in the task.

" + }, "executionStoppedAt":{ "shape":"Timestamp", "documentation":"

The Unix timestamp for when the task execution stopped.

" @@ -4855,7 +5137,7 @@ }, "taskRoleArn":{ "shape":"String", - "documentation":"

The short name or full Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants containers in the task permission to call AWS APIs on your behalf. For more information, see Amazon ECS Task Role in the Amazon Elastic Container Service Developer Guide.

IAM roles for tasks on Windows require that the -EnableTaskIAMRole option is set when you launch the Amazon ECS-optimized Windows AMI. Your containers must also run some configuration code in order to take advantage of the feature. For more information, see Windows IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The short name or full Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants containers in the task permission to call AWS APIs on your behalf. For more information, see Amazon ECS Task Role in the Amazon Elastic Container Service Developer Guide.

IAM roles for tasks on Windows require that the -EnableTaskIAMRole option is set when you launch the Amazon ECS-optimized Windows AMI. Your containers must also run some configuration code in order to take advantage of the feature. For more information, see Windows IAM roles for tasks in the Amazon Elastic Container Service Developer Guide.

" }, "executionRoleArn":{ "shape":"String", @@ -4871,7 +5153,7 @@ }, "volumes":{ "shape":"VolumeList", - "documentation":"

The list of volume definitions for the task.

If your tasks are using the Fargate launch type, the host and sourcePath parameters are not supported.

For more information about volume definition parameters and defaults, see Amazon ECS Task Definitions in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The list of data volume definitions for the task. For more information, see Using data volumes in tasks in the Amazon Elastic Container Service Developer Guide.

The host and sourcePath parameters are not supported for tasks run on AWS Fargate.

" }, "status":{ "shape":"TaskDefinitionStatus", @@ -4879,19 +5161,19 @@ }, "requiresAttributes":{ "shape":"RequiresAttributes", - "documentation":"

The container instance attributes required by your task. This field is not valid if you are using the Fargate launch type for your task.

" + "documentation":"

The container instance attributes required by your task. When an Amazon EC2 instance is registered to your cluster, the Amazon ECS container agent assigns some standard attributes to the instance. You can apply custom attributes, specified as key-value pairs using the Amazon ECS console or the PutAttributes API. These attributes are used when considering task placement for tasks hosted on Amazon EC2 instances. For more information, see Attributes in the Amazon Elastic Container Service Developer Guide.

This parameter is not supported for tasks run on AWS Fargate.

" }, "placementConstraints":{ "shape":"TaskDefinitionPlacementConstraints", - "documentation":"

An array of placement constraint objects to use for tasks. This field is not valid if you are using the Fargate launch type for your task.

" + "documentation":"

An array of placement constraint objects to use for tasks.

This parameter is not supported for tasks run on AWS Fargate.

" }, "compatibilities":{ "shape":"CompatibilityList", - "documentation":"

The launch type to use with your task. For more information, see Amazon ECS Launch Types in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The task launch types the task definition validated against during task definition registration. For more information, see Amazon ECS launch types in the Amazon Elastic Container Service Developer Guide.

" }, "requiresCompatibilities":{ "shape":"CompatibilityList", - "documentation":"

The launch type the task requires. If no value is specified, it will default to EC2. Valid values include EC2 and FARGATE.

" + "documentation":"

The task launch types the task definition was validated against. To determine which task launch types the task definition is validated for, see the TaskDefinition$compatibilities parameter.

" }, "cpu":{ "shape":"String", @@ -4899,7 +5181,7 @@ }, "memory":{ "shape":"String", - "documentation":"

The amount (in MiB) of memory used by the task.

If using the EC2 launch type, you must specify either a task-level memory value or a container-level memory value. This field is optional and any value can be used. If a task-level memory value is specified then the container-level memory value is optional. For more information regarding container-level memory and memory reservation, see ContainerDefinition.

If using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of valid values for the cpu parameter:

" + "documentation":"

The amount (in MiB) of memory used by the task.

If your tasks will be run on Amazon EC2 instances, you must specify either a task-level memory value or a container-level memory value. This field is optional and any value can be used. If a task-level memory value is specified then the container-level memory value is optional. For more information regarding container-level memory and memory reservation, see ContainerDefinition.

If your tasks will be run on AWS Fargate, this field is required and you must use one of the following values, which determines your range of valid values for the cpu parameter:

" }, "inferenceAccelerators":{ "shape":"InferenceAccelerators", @@ -4907,11 +5189,11 @@ }, "pidMode":{ "shape":"PidMode", - "documentation":"

The process namespace to use for the containers in the task. The valid values are host or task. If host is specified, then all containers within the tasks that specified the host PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same process namespace. If no value is specified, the default is a private namespace. For more information, see PID settings in the Docker run reference.

If the host PID mode is used, be aware that there is a heightened risk of undesired process namespace expose. For more information, see Docker security.

This parameter is not supported for Windows containers or tasks using the Fargate launch type.

" + "documentation":"

The process namespace to use for the containers in the task. The valid values are host or task. If host is specified, then all containers within the tasks that specified the host PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same process namespace. If no value is specified, the default is a private namespace. For more information, see PID settings in the Docker run reference.

If the host PID mode is used, be aware that there is a heightened risk of undesired process namespace expose. For more information, see Docker security.

This parameter is not supported for Windows containers or tasks run on AWS Fargate.

" }, "ipcMode":{ "shape":"IpcMode", - "documentation":"

The IPC resource namespace to use for the containers in the task. The valid values are host, task, or none. If host is specified, then all containers within the tasks that specified the host IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same IPC resources. If none is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. For more information, see IPC settings in the Docker run reference.

If the host IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose. For more information, see Docker security.

If you are setting namespaced kernel parameters using systemControls for the containers in the task, the following will apply to your IPC resource namespace. For more information, see System Controls in the Amazon Elastic Container Service Developer Guide.

This parameter is not supported for Windows containers or tasks using the Fargate launch type.

" + "documentation":"

The IPC resource namespace to use for the containers in the task. The valid values are host, task, or none. If host is specified, then all containers within the tasks that specified the host IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same IPC resources. If none is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. For more information, see IPC settings in the Docker run reference.

If the host IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose. For more information, see Docker security.

If you are setting namespaced kernel parameters using systemControls for the containers in the task, the following will apply to your IPC resource namespace. For more information, see System Controls in the Amazon Elastic Container Service Developer Guide.

This parameter is not supported for Windows containers or tasks run on AWS Fargate.

" }, "proxyConfiguration":{ "shape":"ProxyConfiguration", @@ -4957,10 +5239,10 @@ }, "expression":{ "shape":"String", - "documentation":"

A cluster query language expression to apply to the constraint. For more information, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

A cluster query language expression to apply to the constraint. For more information, see Cluster query language in the Amazon Elastic Container Service Developer Guide.

" } }, - "documentation":"

An object representing a constraint on task placement in the task definition. For more information, see Task Placement Constraints in the Amazon Elastic Container Service Developer Guide.

If you are using the Fargate launch type, task placement constraints are not supported.

" + "documentation":"

An object representing a constraint on task placement in the task definition. For more information, see Task placement constraints in the Amazon Elastic Container Service Developer Guide.

Task placement constraints are not supported for tasks run on AWS Fargate.

" }, "TaskDefinitionPlacementConstraintType":{ "type":"string", @@ -5072,7 +5354,7 @@ }, "launchType":{ "shape":"LaunchType", - "documentation":"

The launch type the tasks in the task set are using. For more information, see Amazon ECS Launch Types in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The launch type the tasks in the task set are using. For more information, see Amazon ECS launch types in the Amazon Elastic Container Service Developer Guide.

" }, "capacityProviderStrategy":{ "shape":"CapacityProviderStrategy", @@ -5080,7 +5362,7 @@ }, "platformVersion":{ "shape":"String", - "documentation":"

The platform version on which the tasks in the task set are running. A platform version is only specified for tasks using the Fargate launch type. If one is not specified, the LATEST platform version is used by default. For more information, see AWS Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The AWS Fargate platform version on which the tasks in the task set are running. A platform version is only specified for tasks run on AWS Fargate. For more information, see AWS Fargate platform versions in the Amazon Elastic Container Service Developer Guide.

" }, "networkConfiguration":{ "shape":"NetworkConfiguration", @@ -5092,7 +5374,7 @@ }, "serviceRegistries":{ "shape":"ServiceRegistries", - "documentation":"

The details of the service discovery registries to assign to this task set. For more information, see Service Discovery.

" + "documentation":"

The details of the service discovery registries to assign to this task set. For more information, see Service discovery.

" }, "scale":{ "shape":"Scale", @@ -5277,6 +5559,30 @@ "capacityProvider":{"shape":"CapacityProvider"} } }, + "UpdateClusterRequest":{ + "type":"structure", + "required":["cluster"], + "members":{ + "cluster":{ + "shape":"String", + "documentation":"

The name of the cluster to modify the settings for.

" + }, + "settings":{ + "shape":"ClusterSettings", + "documentation":"

The cluster settings for your cluster.

" + }, + "configuration":{ + "shape":"ClusterConfiguration", + "documentation":"

The execute command configuration for the cluster.

" + } + } + }, + "UpdateClusterResponse":{ + "type":"structure", + "members":{ + "cluster":{"shape":"Cluster"} + } + }, "UpdateClusterSettingsRequest":{ "type":"structure", "required":[ @@ -5440,6 +5746,10 @@ "healthCheckGracePeriodSeconds":{ "shape":"BoxedInteger", "documentation":"

The period of time, in seconds, that the Amazon ECS service scheduler should ignore unhealthy Elastic Load Balancing target health checks after a task has first started. This is only valid if your service is configured to use a load balancer. If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you can specify a health check grace period of up to 2,147,483,647 seconds. During that time, the Amazon ECS service scheduler ignores the Elastic Load Balancing health check status. This grace period can prevent the ECS service scheduler from marking tasks as unhealthy and stopping them before they have time to come up.

" + }, + "enableExecuteCommand":{ + "shape":"BoxedBoolean", + "documentation":"

If true, this enables execute command functionality on all task containers.

If you do not want to override the value that was set when the service was created, you can set this to null when performing this action.

" } } }, @@ -5513,7 +5823,7 @@ }, "dockerVolumeConfiguration":{ "shape":"DockerVolumeConfiguration", - "documentation":"

This parameter is specified when you are using Docker volumes. Docker volumes are only supported when you are using the EC2 launch type. Windows containers only support the use of the local driver. To use bind mounts, specify the host parameter instead.

" + "documentation":"

This parameter is specified when you are using Docker volumes.

Windows containers only support the use of the local driver. To use bind mounts, specify the host parameter instead.

Docker volumes are not supported by tasks run on AWS Fargate.

" }, "efsVolumeConfiguration":{ "shape":"EFSVolumeConfiguration", @@ -5549,5 +5859,5 @@ "member":{"shape":"Volume"} } }, - "documentation":"Amazon Elastic Container Service

Amazon Elastic Container Service (Amazon ECS) is a highly scalable, fast, container management service that makes it easy to run, stop, and manage Docker containers on a cluster. You can host your cluster on a serverless infrastructure that is managed by Amazon ECS by launching your services or tasks using the Fargate launch type. For more control, you can host your tasks on a cluster of Amazon Elastic Compute Cloud (Amazon EC2) instances that you manage by using the EC2 launch type. For more information about launch types, see Amazon ECS Launch Types.

Amazon ECS lets you launch and stop container-based applications with simple API calls, allows you to get the state of your cluster from a centralized service, and gives you access to many familiar Amazon EC2 features.

You can use Amazon ECS to schedule the placement of containers across your cluster based on your resource needs, isolation policies, and availability requirements. Amazon ECS eliminates the need for you to operate your own cluster management and configuration management systems or worry about scaling your management infrastructure.

" + "documentation":"Amazon Elastic Container Service

Amazon Elastic Container Service (Amazon ECS) is a highly scalable, fast, container management service that makes it easy to run, stop, and manage Docker containers on a cluster. You can host your cluster on a serverless infrastructure that is managed by Amazon ECS by launching your services or tasks on AWS Fargate. For more control, you can host your tasks on a cluster of Amazon Elastic Compute Cloud (Amazon EC2) instances that you manage.

Amazon ECS makes it easy to launch and stop container-based applications with simple API calls, allows you to get the state of your cluster from a centralized service, and gives you access to many familiar Amazon EC2 features.

You can use Amazon ECS to schedule the placement of containers across your cluster based on your resource needs, isolation policies, and availability requirements. Amazon ECS eliminates the need for you to operate your own cluster management and configuration management systems or worry about scaling your management infrastructure.

" } diff --git a/botocore/data/efs/2015-02-01/service-2.json b/botocore/data/efs/2015-02-01/service-2.json index 6fa2a241..d682593e 100644 --- a/botocore/data/efs/2015-02-01/service-2.json +++ b/botocore/data/efs/2015-02-01/service-2.json @@ -28,7 +28,7 @@ {"shape":"FileSystemNotFound"}, {"shape":"AccessPointLimitExceeded"} ], - "documentation":"

Creates an EFS access point. An access point is an application-specific view into an EFS file system that applies an operating system user and group, and a file system path, to any file system request made through the access point. The operating system user and group override any identity information provided by the NFS client. The file system path is exposed as the access point's root directory. Applications using the access point can only access data in its own directory and below. To learn more, see Mounting a File System Using EFS Access Points.

This operation requires permissions for the elasticfilesystem:CreateAccessPoint action.

" + "documentation":"

Creates an EFS access point. An access point is an application-specific view into an EFS file system that applies an operating system user and group, and a file system path, to any file system request made through the access point. The operating system user and group override any identity information provided by the NFS client. The file system path is exposed as the access point's root directory. Applications using the access point can only access data in its own directory and below. To learn more, see Mounting a file system using EFS access points.

This operation requires permissions for the elasticfilesystem:CreateAccessPoint action.

" }, "CreateFileSystem":{ "name":"CreateFileSystem", @@ -45,9 +45,10 @@ {"shape":"FileSystemAlreadyExists"}, {"shape":"FileSystemLimitExceeded"}, {"shape":"InsufficientThroughputCapacity"}, - {"shape":"ThroughputLimitExceeded"} + {"shape":"ThroughputLimitExceeded"}, + {"shape":"UnsupportedAvailabilityZone"} ], - "documentation":"

Creates a new, empty file system. The operation requires a creation token in the request that Amazon EFS uses to ensure idempotent creation (calling the operation with same creation token has no effect). If a file system does not currently exist that is owned by the caller's AWS account with the specified creation token, this operation does the following:

Otherwise, this operation returns a FileSystemAlreadyExists error with the ID of the existing file system.

For basic use cases, you can use a randomly generated UUID for the creation token.

The idempotent operation allows you to retry a CreateFileSystem call without risk of creating an extra file system. This can happen when an initial call fails in a way that leaves it uncertain whether or not a file system was actually created. An example might be that a transport level timeout occurred or your connection was reset. As long as you use the same creation token, if the initial call had succeeded in creating a file system, the client can learn of its existence from the FileSystemAlreadyExists error.

The CreateFileSystem call returns while the file system's lifecycle state is still creating. You can check the file system creation status by calling the DescribeFileSystems operation, which among other things returns the file system state.

This operation also takes an optional PerformanceMode parameter that you choose for your file system. We recommend generalPurpose performance mode for most file systems. File systems using the maxIO performance mode can scale to higher levels of aggregate throughput and operations per second with a tradeoff of slightly higher latencies for most file operations. The performance mode can't be changed after the file system has been created. For more information, see Amazon EFS: Performance Modes.

After the file system is fully created, Amazon EFS sets its lifecycle state to available, at which point you can create one or more mount targets for the file system in your VPC. For more information, see CreateMountTarget. You mount your Amazon EFS file system on an EC2 instances in your VPC by using the mount target. For more information, see Amazon EFS: How it Works.

This operation requires permissions for the elasticfilesystem:CreateFileSystem action.

" + "documentation":"

Creates a new, empty file system. The operation requires a creation token in the request that Amazon EFS uses to ensure idempotent creation (calling the operation with same creation token has no effect). If a file system does not currently exist that is owned by the caller's AWS account with the specified creation token, this operation does the following:

Otherwise, this operation returns a FileSystemAlreadyExists error with the ID of the existing file system.

For basic use cases, you can use a randomly generated UUID for the creation token.

The idempotent operation allows you to retry a CreateFileSystem call without risk of creating an extra file system. This can happen when an initial call fails in a way that leaves it uncertain whether or not a file system was actually created. An example might be that a transport level timeout occurred or your connection was reset. As long as you use the same creation token, if the initial call had succeeded in creating a file system, the client can learn of its existence from the FileSystemAlreadyExists error.

For more information, see Creating a file system in the Amazon EFS User Guide.

The CreateFileSystem call returns while the file system's lifecycle state is still creating. You can check the file system creation status by calling the DescribeFileSystems operation, which among other things returns the file system state.

This operation accepts an optional PerformanceMode parameter that you choose for your file system. We recommend generalPurpose performance mode for most file systems. File systems using the maxIO performance mode can scale to higher levels of aggregate throughput and operations per second with a tradeoff of slightly higher latencies for most file operations. The performance mode can't be changed after the file system has been created. For more information, see Amazon EFS performance modes.

You can set the throughput mode for the file system using the ThroughputMode parameter.

After the file system is fully created, Amazon EFS sets its lifecycle state to available, at which point you can create one or more mount targets for the file system in your VPC. For more information, see CreateMountTarget. You mount your Amazon EFS file system on an EC2 instances in your VPC by using the mount target. For more information, see Amazon EFS: How it Works.

This operation requires permissions for the elasticfilesystem:CreateFileSystem action.

" }, "CreateMountTarget":{ "name":"CreateMountTarget", @@ -70,9 +71,10 @@ {"shape":"NetworkInterfaceLimitExceeded"}, {"shape":"SecurityGroupLimitExceeded"}, {"shape":"SecurityGroupNotFound"}, - {"shape":"UnsupportedAvailabilityZone"} + {"shape":"UnsupportedAvailabilityZone"}, + {"shape":"AvailabilityZonesMismatch"} ], - "documentation":"

Creates a mount target for a file system. You can then mount the file system on EC2 instances by using the mount target.

You can create one mount target in each Availability Zone in your VPC. All EC2 instances in a VPC within a given Availability Zone share a single mount target for a given file system. If you have multiple subnets in an Availability Zone, you create a mount target in one of the subnets. EC2 instances do not need to be in the same subnet as the mount target in order to access their file system. For more information, see Amazon EFS: How it Works.

In the request, you also specify a file system ID for which you are creating the mount target and the file system's lifecycle state must be available. For more information, see DescribeFileSystems.

In the request, you also provide a subnet ID, which determines the following:

After creating the mount target, Amazon EFS returns a response that includes, a MountTargetId and an IpAddress. You use this IP address when mounting the file system in an EC2 instance. You can also use the mount target's DNS name when mounting the file system. The EC2 instance on which you mount the file system by using the mount target can resolve the mount target's DNS name to its IP address. For more information, see How it Works: Implementation Overview.

Note that you can create mount targets for a file system in only one VPC, and there can be only one mount target per Availability Zone. That is, if the file system already has one or more mount targets created for it, the subnet specified in the request to add another mount target must meet the following requirements:

If the request satisfies the requirements, Amazon EFS does the following:

The CreateMountTarget call returns only after creating the network interface, but while the mount target state is still creating, you can check the mount target creation status by calling the DescribeMountTargets operation, which among other things returns the mount target state.

We recommend that you create a mount target in each of the Availability Zones. There are cost considerations for using a file system in an Availability Zone through a mount target created in another Availability Zone. For more information, see Amazon EFS. In addition, by always using a mount target local to the instance's Availability Zone, you eliminate a partial failure scenario. If the Availability Zone in which your mount target is created goes down, then you can't access your file system through that mount target.

This operation requires permissions for the following action on the file system:

This operation also requires permissions for the following Amazon EC2 actions:

" + "documentation":"

Creates a mount target for a file system. You can then mount the file system on EC2 instances by using the mount target.

You can create one mount target in each Availability Zone in your VPC. All EC2 instances in a VPC within a given Availability Zone share a single mount target for a given file system. If you have multiple subnets in an Availability Zone, you create a mount target in one of the subnets. EC2 instances do not need to be in the same subnet as the mount target in order to access their file system.

You can create only one mount target for an EFS file system using One Zone storage classes. You must create that mount target in the same Availability Zone in which the file system is located. Use the AvailabilityZoneName and AvailabiltyZoneId properties in the DescribeFileSystems response object to get this information. Use the subnetId associated with the file system's Availability Zone when creating the mount target.

For more information, see Amazon EFS: How it Works.

To create a mount target for a file system, the file system's lifecycle state must be available. For more information, see DescribeFileSystems.

In the request, provide the following:

After creating the mount target, Amazon EFS returns a response that includes, a MountTargetId and an IpAddress. You use this IP address when mounting the file system in an EC2 instance. You can also use the mount target's DNS name when mounting the file system. The EC2 instance on which you mount the file system by using the mount target can resolve the mount target's DNS name to its IP address. For more information, see How it Works: Implementation Overview.

Note that you can create mount targets for a file system in only one VPC, and there can be only one mount target per Availability Zone. That is, if the file system already has one or more mount targets created for it, the subnet specified in the request to add another mount target must meet the following requirements:

If the request satisfies the requirements, Amazon EFS does the following:

The CreateMountTarget call returns only after creating the network interface, but while the mount target state is still creating, you can check the mount target creation status by calling the DescribeMountTargets operation, which among other things returns the mount target state.

We recommend that you create a mount target in each of the Availability Zones. There are cost considerations for using a file system in an Availability Zone through a mount target created in another Availability Zone. For more information, see Amazon EFS. In addition, by always using a mount target local to the instance's Availability Zone, you eliminate a partial failure scenario. If the Availability Zone in which your mount target is created goes down, then you can't access your file system through that mount target.

This operation requires permissions for the following action on the file system:

This operation also requires permissions for the following Amazon EC2 actions:

" }, "CreateTags":{ "name":"CreateTags", @@ -374,7 +376,7 @@ {"shape":"InvalidPolicyException"}, {"shape":"IncorrectFileSystemLifeCycleState"} ], - "documentation":"

Applies an Amazon EFS FileSystemPolicy to an Amazon EFS file system. A file system policy is an IAM resource-based policy and can contain multiple policy statements. A file system always has exactly one file system policy, which can be the default policy or an explicit policy set or updated using this API operation. When an explicit policy is set, it overrides the default policy. For more information about the default file system policy, see Default EFS File System Policy.

This operation requires permissions for the elasticfilesystem:PutFileSystemPolicy action.

" + "documentation":"

Applies an Amazon EFS FileSystemPolicy to an Amazon EFS file system. A file system policy is an IAM resource-based policy and can contain multiple policy statements. A file system always has exactly one file system policy, which can be the default policy or an explicit policy set or updated using this API operation. EFS file system policies have a 20,000 character limit. When an explicit policy is set, it overrides the default policy. For more information about the default file system policy, see Default EFS File System Policy.

EFS file system policies have a 20,000 character limit.

This operation requires permissions for the elasticfilesystem:PutFileSystemPolicy action.

" }, "PutLifecycleConfiguration":{ "name":"PutLifecycleConfiguration", @@ -462,7 +464,11 @@ "error":{"httpStatusCode":409}, "exception":true }, - "AccessPointArn":{"type":"string"}, + "AccessPointArn":{ + "type":"string", + "max":128, + "pattern":"^arn:aws[-a-z]*:elasticfilesystem:[0-9a-z-:]+:access-point/fsap-[0-9a-f]{8,40}$" + }, "AccessPointDescription":{ "type":"structure", "members":{ @@ -513,7 +519,11 @@ "type":"list", "member":{"shape":"AccessPointDescription"} }, - "AccessPointId":{"type":"string"}, + "AccessPointId":{ + "type":"string", + "max":128, + "pattern":"^(arn:aws[-a-z]*:elasticfilesystem:[0-9a-z-:]+:access-point/fsap-[0-9a-f]{8,40}|fsap-[0-9a-f]{8,40})$" + }, "AccessPointLimitExceeded":{ "type":"structure", "required":["ErrorCode"], @@ -537,22 +547,38 @@ "exception":true }, "AvailabilityZoneId":{"type":"string"}, - "AvailabilityZoneName":{"type":"string"}, + "AvailabilityZoneName":{ + "type":"string", + "max":64, + "min":1, + "pattern":".+" + }, + "AvailabilityZonesMismatch":{ + "type":"structure", + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Returned if the Availability Zone that was specified for a mount target is different from the Availability Zone that was specified for One Zone storage classes. For more information, see Regional and One Zone storage redundancy.

", + "error":{"httpStatusCode":400}, + "exception":true + }, "AwsAccountId":{ "type":"string", "max":14, "pattern":"^(\\d{12})|(\\d{4}-\\d{4}-\\d{4})$" }, + "Backup":{"type":"boolean"}, "BackupPolicy":{ "type":"structure", "required":["Status"], "members":{ "Status":{ "shape":"Status", - "documentation":"

Describes the status of the file system's backup policy.

" + "documentation":"

Describes the status of the file system's backup policy.

" } }, - "documentation":"

The backup policy for the file system, showing the curent status. If ENABLED, the file system is being backed up.

" + "documentation":"

The backup policy for the file system used to create automatic daily backups. If status has a value of ENABLED, the file system is being automatically backed up. For more information, see Automatic backups.

" }, "BackupPolicyDescription":{ "type":"structure", @@ -578,7 +604,8 @@ "ClientToken":{ "type":"string", "max":64, - "min":1 + "min":1, + "pattern":".+" }, "CreateAccessPointRequest":{ "type":"structure", @@ -606,7 +633,7 @@ }, "RootDirectory":{ "shape":"RootDirectory", - "documentation":"

Specifies the directory on the Amazon EFS file system that the access point exposes as the root directory of your file system to NFS clients using the access point. The clients using the access point can only access the root directory and below. If the RootDirectory > Path specified does not exist, EFS creates it and applies the CreationInfo settings when a client connects to an access point. When specifying a RootDirectory, you need to provide the Path, and the CreationInfo is optional.

" + "documentation":"

Specifies the directory on the Amazon EFS file system that the access point exposes as the root directory of your file system to NFS clients using the access point. The clients using the access point can only access the root directory and below. If the RootDirectory > Path specified does not exist, EFS creates it and applies the CreationInfo settings when a client connects to an access point. When specifying a RootDirectory, you need to provide the Path, and the CreationInfo.

Amazon EFS creates a root directory only if you have provided the CreationInfo: OwnUid, OwnGID, and permissions for the directory. If you do not provide this information, Amazon EFS does not create the root directory. If the root directory does not exist, attempts to mount using the access point will fail.

" } } }, @@ -621,7 +648,7 @@ }, "PerformanceMode":{ "shape":"PerformanceMode", - "documentation":"

The performance mode of the file system. We recommend generalPurpose performance mode for most file systems. File systems using the maxIO performance mode can scale to higher levels of aggregate throughput and operations per second with a tradeoff of slightly higher latencies for most file operations. The performance mode can't be changed after the file system has been created.

" + "documentation":"

The performance mode of the file system. We recommend generalPurpose performance mode for most file systems. File systems using the maxIO performance mode can scale to higher levels of aggregate throughput and operations per second with a tradeoff of slightly higher latencies for most file operations. The performance mode can't be changed after the file system has been created.

The maxIO mode is not supported on file systems using One Zone storage classes.

" }, "Encrypted":{ "shape":"Encrypted", @@ -629,15 +656,23 @@ }, "KmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The ID of the AWS KMS CMK to be used to protect the encrypted file system. This parameter is only required if you want to use a nondefault CMK. If this parameter is not specified, the default CMK for Amazon EFS is used. This ID can be in one of the following formats:

If KmsKeyId is specified, the CreateFileSystemRequest$Encrypted parameter must be set to true.

EFS accepts only symmetric CMKs. You cannot use asymmetric CMKs with EFS file systems.

" + "documentation":"

The ID of the AWS KMS CMK to be used to protect the encrypted file system. This parameter is only required if you want to use a non-default CMK. If this parameter is not specified, the default CMK for Amazon EFS is used. This ID can be in one of the following formats:

If KmsKeyId is specified, the CreateFileSystemRequest$Encrypted parameter must be set to true.

EFS accepts only symmetric CMKs. You cannot use asymmetric CMKs with EFS file systems.

" }, "ThroughputMode":{ "shape":"ThroughputMode", - "documentation":"

The throughput mode for the file system to be created. There are two throughput modes to choose from for your file system: bursting and provisioned. If you set ThroughputMode to provisioned, you must also set a value for ProvisionedThroughPutInMibps. You can decrease your file system's throughput in Provisioned Throughput mode or change between the throughput modes as long as it’s been more than 24 hours since the last decrease or throughput mode change. For more, see Specifying Throughput with Provisioned Mode in the Amazon EFS User Guide.

" + "documentation":"

Specifies the throughput mode for the file system, either bursting or provisioned. If you set ThroughputMode to provisioned, you must also set a value for ProvisionedThroughputInMibps. After you create the file system, you can decrease your file system's throughput in Provisioned Throughput mode or change between the throughput modes, as long as it’s been more than 24 hours since the last decrease or throughput mode change. For more information, see Specifying throughput with provisioned mode in the Amazon EFS User Guide.

Default is bursting.

" }, "ProvisionedThroughputInMibps":{ "shape":"ProvisionedThroughputInMibps", - "documentation":"

The throughput, measured in MiB/s, that you want to provision for a file system that you're creating. Valid values are 1-1024. Required if ThroughputMode is set to provisioned. The upper limit for throughput is 1024 MiB/s. You can get this limit increased by contacting AWS Support. For more information, see Amazon EFS Limits That You Can Increase in the Amazon EFS User Guide.

" + "documentation":"

The throughput, measured in MiB/s, that you want to provision for a file system that you're creating. Valid values are 1-1024. Required if ThroughputMode is set to provisioned. The upper limit for throughput is 1024 MiB/s. To increase this limit, contact AWS Support. For more information, see Amazon EFS quotas that you can increase in the Amazon EFS User Guide.

" + }, + "AvailabilityZoneName":{ + "shape":"AvailabilityZoneName", + "documentation":"

Used to create a file system that uses One Zone storage classes. It specifies the AWS Availability Zone in which to create the file system. Use the format us-east-1a to specify the Availability Zone. For more information about One Zone storage classes, see Using EFS storage classes in the Amazon EFS User Guide.

One Zone storage classes are not available in all Availability Zones in AWS Regions where Amazon EFS is available.

" + }, + "Backup":{ + "shape":"Backup", + "documentation":"

Specifies whether automatic backups are enabled on the file system that you are creating. Set the value to true to enable automatic backups. If you are creating a file system that uses One Zone storage classes, automatic backups are enabled by default. For more information, see Automatic backups in the Amazon EFS User Guide.

Default is false. However, if you specify an AvailabilityZoneName, the default is true.

AWS Backup is not available in all AWS Regions where Amazon EFS is available.

" }, "Tags":{ "shape":"Tags", @@ -658,7 +693,7 @@ }, "SubnetId":{ "shape":"SubnetId", - "documentation":"

The ID of the subnet to add the mount target in.

" + "documentation":"

The ID of the subnet to add the mount target in. For file systems that use One Zone storage classes, use the subnet that is associated with the file system's Availability Zone.

" }, "IpAddress":{ "shape":"IpAddress", @@ -712,7 +747,7 @@ "documentation":"

Specifies the POSIX permissions to apply to the RootDirectory, in the format of an octal number representing the file's mode bits.

" } }, - "documentation":"

Required if the RootDirectory > Path specified does not exist. Specifies the POSIX IDs and permissions to apply to the access point's RootDirectory > Path. If the access point root directory does not exist, EFS creates it with these settings when a client connects to the access point. When specifying CreationInfo, you must include values for all properties.

If you do not provide CreationInfo and the specified RootDirectory does not exist, attempts to mount the file system using the access point will fail.

" + "documentation":"

Required if the RootDirectory > Path specified does not exist. Specifies the POSIX IDs and permissions to apply to the access point's RootDirectory > Path. If the access point root directory does not exist, EFS creates it with these settings when a client connects to the access point. When specifying CreationInfo, you must include values for all properties.

Amazon EFS creates a root directory only if you have provided the CreationInfo: OwnUid, OwnGID, and permissions for the directory. If you do not provide this information, Amazon EFS does not create the root directory. If the root directory does not exist, attempts to mount using the access point will fail.

If you do not provide CreationInfo and the specified RootDirectory does not exist, attempts to mount the file system using the access point will fail.

" }, "CreationToken":{ "type":"string", @@ -1133,11 +1168,19 @@ }, "ThroughputMode":{ "shape":"ThroughputMode", - "documentation":"

The throughput mode for a file system. There are two throughput modes to choose from for your file system: bursting and provisioned. If you set ThroughputMode to provisioned, you must also set a value for ProvisionedThroughPutInMibps. You can decrease your file system's throughput in Provisioned Throughput mode or change between the throughput modes as long as it’s been more than 24 hours since the last decrease or throughput mode change.

" + "documentation":"

Displays the file system's throughput mode. For more information, see Throughput modes in the Amazon EFS User Guide.

" }, "ProvisionedThroughputInMibps":{ "shape":"ProvisionedThroughputInMibps", - "documentation":"

The throughput, measured in MiB/s, that you want to provision for a file system. Valid values are 1-1024. Required if ThroughputMode is set to provisioned. The limit on throughput is 1024 MiB/s. You can get these limits increased by contacting AWS Support. For more information, see Amazon EFS Limits That You Can Increase in the Amazon EFS User Guide.

" + "documentation":"

The amount of provisioned throughput, measured in MiB/s, for the file system. Valid for file systems using ThroughputMode set to provisioned.

" + }, + "AvailabilityZoneName":{ + "shape":"AvailabilityZoneName", + "documentation":"

Describes the AWS Availability Zone in which the file system is located, and is valid only for file systems using One Zone storage classes. For more information, see Using EFS storage classes in the Amazon EFS User Guide.

" + }, + "AvailabilityZoneId":{ + "shape":"AvailabilityZoneId", + "documentation":"

The unique and consistent identifier of the Availability Zone in which the file system's One Zone storage classes exist. For example, use1-az1 is an Availability Zone ID for the us-east-1 AWS Region, and it has the same location in every AWS account.

" }, "Tags":{ "shape":"Tags", @@ -1266,7 +1309,7 @@ "ErrorCode":{"shape":"ErrorCode"}, "Message":{"shape":"ErrorMessage"} }, - "documentation":"

Returned if there's not enough capacity to provision additional throughput. This value might be returned when you try to create a file system in provisioned throughput mode, when you attempt to increase the provisioned throughput of an existing file system, or when you attempt to change an existing file system from bursting to provisioned throughput mode.

", + "documentation":"

Returned if there's not enough capacity to provision additional throughput. This value might be returned when you try to create a file system in provisioned throughput mode, when you attempt to increase the provisioned throughput of an existing file system, or when you attempt to change an existing file system from bursting to provisioned throughput mode. Try again later.

", "error":{"httpStatusCode":503}, "exception":true }, @@ -1320,7 +1363,8 @@ "available", "updating", "deleting", - "deleted" + "deleted", + "error" ] }, "LifecycleConfigurationDescription":{ @@ -1395,6 +1439,7 @@ }, "MaxResults":{ "type":"integer", + "documentation":"Max results used for pagination.", "min":1 }, "ModifyMountTargetSecurityGroupsRequest":{ @@ -1468,15 +1513,15 @@ }, "AvailabilityZoneId":{ "shape":"AvailabilityZoneId", - "documentation":"

The unique and consistent identifier of the Availability Zone (AZ) that the mount target resides in. For example, use1-az1 is an AZ ID for the us-east-1 Region and it has the same location in every AWS account.

" + "documentation":"

The unique and consistent identifier of the Availability Zone that the mount target resides in. For example, use1-az1 is an AZ ID for the us-east-1 Region and it has the same location in every AWS account.

" }, "AvailabilityZoneName":{ "shape":"AvailabilityZoneName", - "documentation":"

The name of the Availability Zone (AZ) that the mount target resides in. AZs are independently mapped to names for each AWS account. For example, the Availability Zone us-east-1a for your AWS account might not be the same location as us-east-1a for another AWS account.

" + "documentation":"

The name of the Availability Zone in which the mount target is located. Availability Zones are independently mapped to names for each AWS account. For example, the Availability Zone us-east-1a for your AWS account might not be the same location as us-east-1a for another AWS account.

" }, "VpcId":{ "shape":"VpcId", - "documentation":"

The Virtual Private Cloud (VPC) ID that the mount target is configured in.

" + "documentation":"

The virtual private cloud (VPC) ID that the mount target is configured in.

" } }, "documentation":"

Provides a description of a mount target.

" @@ -1539,7 +1584,8 @@ "Path":{ "type":"string", "max":100, - "min":1 + "min":1, + "pattern":"^(\\/|(\\/(?!\\.)+[^$#<>;`|&?{}^*/\\n]+){1,4})$" }, "PerformanceMode":{ "type":"string", @@ -1550,9 +1596,16 @@ }, "Permissions":{ "type":"string", + "max":4, + "min":3, "pattern":"^[0-7]{3,4}$" }, - "Policy":{"type":"string"}, + "Policy":{ + "type":"string", + "max":20000, + "min":1, + "pattern":"[\\s\\S]+" + }, "PolicyNotFound":{ "type":"structure", "members":{ @@ -1623,7 +1676,7 @@ }, "Policy":{ "shape":"Policy", - "documentation":"

The FileSystemPolicy that you're creating. Accepts a JSON formatted policy definition. To find out more about the elements that make up a file system policy, see EFS Resource-based Policies.

" + "documentation":"

The FileSystemPolicy that you're creating. Accepts a JSON formatted policy definition. EFS file system policies have a 20,000 character limit. To find out more about the elements that make up a file system policy, see EFS Resource-based Policies.

" }, "BypassPolicyLockoutSafetyCheck":{ "shape":"BypassPolicyLockoutSafetyCheck", @@ -1650,7 +1703,11 @@ } } }, - "ResourceId":{"type":"string"}, + "ResourceId":{ + "type":"string", + "max":128, + "pattern":"^(arn:aws[-a-z]*:elasticfilesystem:[0-9a-z-:]+:(access-point/fsap|file-system/fs)-[0-9a-f]{8,40}|fs(ap)?-[0-9a-f]{8,40})$" + }, "RootDirectory":{ "type":"structure", "members":{ @@ -1807,7 +1864,13 @@ ] }, "Timestamp":{"type":"timestamp"}, - "Token":{"type":"string"}, + "Token":{ + "type":"string", + "documentation":"Token used for pagination.", + "max":128, + "min":1, + "pattern":".+" + }, "TooManyRequests":{ "type":"structure", "required":["ErrorCode"], @@ -1841,7 +1904,7 @@ "ErrorCode":{"shape":"ErrorCode"}, "Message":{"shape":"ErrorMessage"} }, - "documentation":"

", + "documentation":"

Returned if the requested Amazon EFS functionality is not available in the specified Availability Zone.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -1860,7 +1923,7 @@ }, "TagKeys":{ "shape":"TagKeys", - "documentation":"

The keys of the key:value tag pairs that you want to remove from the specified EFS resource.

", + "documentation":"

The keys of the key-value tag pairs that you want to remove from the specified EFS resource.

", "location":"querystring", "locationName":"tagKeys" } @@ -1878,11 +1941,11 @@ }, "ThroughputMode":{ "shape":"ThroughputMode", - "documentation":"

(Optional) The throughput mode that you want your file system to use. If you're not updating your throughput mode, you don't need to provide this value in your request. If you are changing the ThroughputMode to provisioned, you must also set a value for ProvisionedThroughputInMibps.

" + "documentation":"

(Optional) Updates the file system's throughput mode. If you're not updating your throughput mode, you don't need to provide this value in your request. If you are changing the ThroughputMode to provisioned, you must also set a value for ProvisionedThroughputInMibps.

" }, "ProvisionedThroughputInMibps":{ "shape":"ProvisionedThroughputInMibps", - "documentation":"

(Optional) The amount of throughput, in MiB/s, that you want to provision for your file system. Valid values are 1-1024. Required if ThroughputMode is changed to provisioned on update. If you're not updating the amount of provisioned throughput for your file system, you don't need to provide this value in your request.

" + "documentation":"

(Optional) Sets the amount of provisioned throughput, in MiB/s, for the file system. Valid values are 1-1024. If you are changing the throughput mode to provisioned, you must also provide the amount of provisioned throughput. Required if ThroughputMode is changed to provisioned on update.

" } } }, @@ -1893,7 +1956,7 @@ "ErrorCode":{"shape":"ErrorCode"}, "Message":{"shape":"ErrorMessage"} }, - "documentation":"

Returned if the AWS Backup service is not available in the region that the request was made.

", + "documentation":"

Returned if the AWS Backup service is not available in the Region in which the request was made.

", "error":{"httpStatusCode":400}, "exception":true }, diff --git a/botocore/data/eks/2017-11-01/paginators-1.json b/botocore/data/eks/2017-11-01/paginators-1.json index f9e6d18a..fae5270d 100644 --- a/botocore/data/eks/2017-11-01/paginators-1.json +++ b/botocore/data/eks/2017-11-01/paginators-1.json @@ -35,6 +35,12 @@ "limit_key": "maxResults", "output_token": "nextToken", "result_key": "addons" + }, + "ListIdentityProviderConfigs": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "identityProviderConfigs" } } } diff --git a/botocore/data/eks/2017-11-01/service-2.json b/botocore/data/eks/2017-11-01/service-2.json index bfedc15d..e0498691 100644 --- a/botocore/data/eks/2017-11-01/service-2.json +++ b/botocore/data/eks/2017-11-01/service-2.json @@ -13,6 +13,42 @@ "uid":"eks-2017-11-01" }, "operations":{ + "AssociateEncryptionConfig":{ + "name":"AssociateEncryptionConfig", + "http":{ + "method":"POST", + "requestUri":"/clusters/{name}/encryption-config/associate" + }, + "input":{"shape":"AssociateEncryptionConfigRequest"}, + "output":{"shape":"AssociateEncryptionConfigResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServerException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Associate encryption configuration to an existing cluster.

You can use this API to enable encryption on existing clusters which do not have encryption already enabled. This allows you to implement a defense-in-depth security strategy without migrating applications to new EKS clusters.

" + }, + "AssociateIdentityProviderConfig":{ + "name":"AssociateIdentityProviderConfig", + "http":{ + "method":"POST", + "requestUri":"/clusters/{name}/identity-provider-configs/associate" + }, + "input":{"shape":"AssociateIdentityProviderConfigRequest"}, + "output":{"shape":"AssociateIdentityProviderConfigResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServerException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Associate an identity provider configuration to a cluster.

If you want to authenticate identities using an identity provider, you can create an identity provider configuration and associate it to your cluster. After configuring authentication to your cluster you can create Kubernetes roles and clusterroles to assign permissions to the roles, and then bind the roles to the identities using Kubernetes rolebindings and clusterrolebindings. For more information see Using RBAC Authorization in the Kubernetes documentation.

" + }, "CreateAddon":{ "name":"CreateAddon", "http":{ @@ -48,7 +84,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"UnsupportedAvailabilityZoneException"} ], - "documentation":"

Creates an Amazon EKS control plane.

The Amazon EKS control plane consists of control plane instances that run the Kubernetes software, such as etcd and the API server. The control plane runs in an account managed by AWS, and the Kubernetes API is exposed via the Amazon EKS API server endpoint. Each Amazon EKS cluster control plane is single-tenant and unique and runs on its own set of Amazon EC2 instances.

The cluster control plane is provisioned across multiple Availability Zones and fronted by an Elastic Load Balancing Network Load Balancer. Amazon EKS also provisions elastic network interfaces in your VPC subnets to provide connectivity from the control plane instances to the worker nodes (for example, to support kubectl exec, logs, and proxy data flows).

Amazon EKS worker nodes run in your AWS account and connect to your cluster's control plane via the Kubernetes API server endpoint and a certificate file that is created for your cluster.

You can use the endpointPublicAccess and endpointPrivateAccess parameters to enable or disable public and private access to your cluster's Kubernetes API server endpoint. By default, public access is enabled, and private access is disabled. For more information, see Amazon EKS Cluster Endpoint Access Control in the Amazon EKS User Guide .

You can use the logging parameter to enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS Cluster Control Plane Logs in the Amazon EKS User Guide .

CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see Amazon CloudWatch Pricing.

Cluster creation typically takes between 10 and 15 minutes. After you create an Amazon EKS cluster, you must configure your Kubernetes tooling to communicate with the API server and launch worker nodes into your cluster. For more information, see Managing Cluster Authentication and Launching Amazon EKS Worker Nodes in the Amazon EKS User Guide.

" + "documentation":"

Creates an Amazon EKS control plane.

The Amazon EKS control plane consists of control plane instances that run the Kubernetes software, such as etcd and the API server. The control plane runs in an account managed by AWS, and the Kubernetes API is exposed via the Amazon EKS API server endpoint. Each Amazon EKS cluster control plane is single-tenant and unique and runs on its own set of Amazon EC2 instances.

The cluster control plane is provisioned across multiple Availability Zones and fronted by an Elastic Load Balancing Network Load Balancer. Amazon EKS also provisions elastic network interfaces in your VPC subnets to provide connectivity from the control plane instances to the nodes (for example, to support kubectl exec, logs, and proxy data flows).

Amazon EKS nodes run in your AWS account and connect to your cluster's control plane via the Kubernetes API server endpoint and a certificate file that is created for your cluster.

Cluster creation typically takes several minutes. After you create an Amazon EKS cluster, you must configure your Kubernetes tooling to communicate with the API server and launch nodes into your cluster. For more information, see Managing Cluster Authentication and Launching Amazon EKS nodes in the Amazon EKS User Guide.

" }, "CreateFargateProfile":{ "name":"CreateFargateProfile", @@ -85,7 +121,7 @@ {"shape":"ServerException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Creates a managed worker node group for an Amazon EKS cluster. You can only create a node group for your cluster that is equal to the current Kubernetes version for the cluster. All node groups are created with the latest AMI release version for the respective minor Kubernetes version of the cluster, unless you deploy a custom AMI using a launch template. For more information about using launch templates, see Launch template support.

An Amazon EKS managed node group is an Amazon EC2 Auto Scaling group and associated Amazon EC2 instances that are managed by AWS for an Amazon EKS cluster. Each node group uses a version of the Amazon EKS optimized Amazon Linux 2 AMI. For more information, see Managed Node Groups in the Amazon EKS User Guide.

" + "documentation":"

Creates a managed node group for an Amazon EKS cluster. You can only create a node group for your cluster that is equal to the current Kubernetes version for the cluster. All node groups are created with the latest AMI release version for the respective minor Kubernetes version of the cluster, unless you deploy a custom AMI using a launch template. For more information about using launch templates, see Launch template support.

An Amazon EKS managed node group is an Amazon EC2 Auto Scaling group and associated Amazon EC2 instances that are managed by AWS for an Amazon EKS cluster. Each node group uses a version of the Amazon EKS optimized Amazon Linux 2 AMI. For more information, see Managed Node Groups in the Amazon EKS User Guide.

" }, "DeleteAddon":{ "name":"DeleteAddon", @@ -219,6 +255,23 @@ ], "documentation":"

Returns descriptive information about an AWS Fargate profile.

" }, + "DescribeIdentityProviderConfig":{ + "name":"DescribeIdentityProviderConfig", + "http":{ + "method":"POST", + "requestUri":"/clusters/{name}/identity-provider-configs/describe" + }, + "input":{"shape":"DescribeIdentityProviderConfigRequest"}, + "output":{"shape":"DescribeIdentityProviderConfigResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ClientException"}, + {"shape":"ServerException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Returns descriptive information about an identity provider configuration.

" + }, "DescribeNodegroup":{ "name":"DescribeNodegroup", "http":{ @@ -252,6 +305,24 @@ ], "documentation":"

Returns descriptive information about an update against your Amazon EKS cluster or associated managed node group.

When the status of the update is Succeeded, the update is complete. If an update fails, the status is Failed, and an error detail explains the reason for the failure.

" }, + "DisassociateIdentityProviderConfig":{ + "name":"DisassociateIdentityProviderConfig", + "http":{ + "method":"POST", + "requestUri":"/clusters/{name}/identity-provider-configs/disassociate" + }, + "input":{"shape":"DisassociateIdentityProviderConfigRequest"}, + "output":{"shape":"DisassociateIdentityProviderConfigResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServerException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Disassociates an identity provider configuration from a cluster. If you disassociate an identity provider from your cluster, users included in the provider can no longer access the cluster. However, you can still access the cluster with AWS IAM users.

" + }, "ListAddons":{ "name":"ListAddons", "http":{ @@ -301,6 +372,23 @@ ], "documentation":"

Lists the AWS Fargate profiles associated with the specified cluster in your AWS account in the specified Region.

" }, + "ListIdentityProviderConfigs":{ + "name":"ListIdentityProviderConfigs", + "http":{ + "method":"GET", + "requestUri":"/clusters/{name}/identity-provider-configs" + }, + "input":{"shape":"ListIdentityProviderConfigsRequest"}, + "output":{"shape":"ListIdentityProviderConfigsResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

A list of identity provider configurations.

" + }, "ListNodegroups":{ "name":"ListNodegroups", "http":{ @@ -360,7 +448,7 @@ {"shape":"BadRequestException"}, {"shape":"NotFoundException"} ], - "documentation":"

Associates the specified tags to a resource with the specified resourceArn. If existing tags on a resource are not specified in the request parameters, they are not changed. When a resource is deleted, the tags associated with that resource are deleted as well. Tags that you create for Amazon EKS resources do not propagate to any other resources associated with the cluster. For example, if you tag a cluster with this operation, that tag does not automatically propagate to the subnets and worker nodes associated with the cluster.

" + "documentation":"

Associates the specified tags to a resource with the specified resourceArn. If existing tags on a resource are not specified in the request parameters, they are not changed. When a resource is deleted, the tags associated with that resource are deleted as well. Tags that you create for Amazon EKS resources do not propagate to any other resources associated with the cluster. For example, if you tag a cluster with this operation, that tag does not automatically propagate to the subnets and nodes associated with the cluster.

" }, "UntagResource":{ "name":"UntagResource", @@ -575,7 +663,8 @@ "InternalFailure", "ClusterUnreachable", "InsufficientNumberOfReplicas", - "ConfigurationConflict" + "ConfigurationConflict", + "AdmissionRequestDenied" ] }, "AddonIssueList":{ @@ -620,6 +709,74 @@ "type":"list", "member":{"shape":"AddonInfo"} }, + "AssociateEncryptionConfigRequest":{ + "type":"structure", + "required":[ + "clusterName", + "encryptionConfig" + ], + "members":{ + "clusterName":{ + "shape":"String", + "documentation":"

The name of the cluster that you are associating with encryption configuration.

", + "location":"uri", + "locationName":"name" + }, + "encryptionConfig":{ + "shape":"EncryptionConfigList", + "documentation":"

The configuration you are using for encryption.

" + }, + "clientRequestToken":{ + "shape":"String", + "documentation":"

The client request token you are using with the encryption configuration.

", + "idempotencyToken":true + } + } + }, + "AssociateEncryptionConfigResponse":{ + "type":"structure", + "members":{ + "update":{"shape":"Update"} + } + }, + "AssociateIdentityProviderConfigRequest":{ + "type":"structure", + "required":[ + "clusterName", + "oidc" + ], + "members":{ + "clusterName":{ + "shape":"String", + "documentation":"

The name of the cluster to associate the configuration to.

", + "location":"uri", + "locationName":"name" + }, + "oidc":{ + "shape":"OidcIdentityProviderConfigRequest", + "documentation":"

An object that represents an OpenID Connect (OIDC) identity provider configuration.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The metadata to apply to the configuration to assist with categorization and organization. Each tag consists of a key and an optional value, both of which you define.

" + }, + "clientRequestToken":{ + "shape":"String", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", + "idempotencyToken":true + } + } + }, + "AssociateIdentityProviderConfigResponse":{ + "type":"structure", + "members":{ + "update":{"shape":"Update"}, + "tags":{ + "shape":"TagMap", + "documentation":"

The tags for the resource.

" + } + } + }, "AutoScalingGroup":{ "type":"structure", "members":{ @@ -985,7 +1142,7 @@ }, "instanceTypes":{ "shape":"StringList", - "documentation":"

Specify the instance types for a node group. If you specify a GPU instance type, be sure to specify AL2_x86_64_GPU with the amiType parameter. If you specify launchTemplate, then you can specify zero or one instance type in your launch template or you can specify 0-20 instance types for instanceTypes. If however, you specify an instance type in your launch template and specify any instanceTypes, the node group deployment will fail. If you don't specify an instance type in a launch template or for instanceTypes, then t3.medium is used, by default. If you specify Spot for capacityType, then we recommend specifying multiple values for instanceTypes. For more information, see Managed node group capacity types and Launch template support in the Amazon EKS User Guide.

" + "documentation":"

Specify the instance types for a node group. If you specify a GPU instance type, be sure to specify AL2_x86_64_GPU with the amiType parameter. If you specify launchTemplate, then you can specify zero or one instance type in your launch template or you can specify 0-20 instance types for instanceTypes. If however, you specify an instance type in your launch template and specify any instanceTypes, the node group deployment will fail. If you don't specify an instance type in a launch template or for instanceTypes, then t3.medium is used, by default. If you specify Spot for capacityType, then we recommend specifying multiple values for instanceTypes. For more information, see Managed node group capacity types and Launch template support in the Amazon EKS User Guide.

" }, "amiType":{ "shape":"AMITypes", @@ -997,7 +1154,7 @@ }, "nodeRole":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the IAM role to associate with your node group. The Amazon EKS worker node kubelet daemon makes calls to AWS APIs on your behalf. Worker nodes receive permissions for these API calls through an IAM instance profile and associated policies. Before you can launch worker nodes and register them into a cluster, you must create an IAM role for those worker nodes to use when they are launched. For more information, see Amazon EKS Worker Node IAM Role in the Amazon EKS User Guide . If you specify launchTemplate, then don't specify IamInstanceProfile in your launch template, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role to associate with your node group. The Amazon EKS worker node kubelet daemon makes calls to AWS APIs on your behalf. Nodes receive permissions for these API calls through an IAM instance profile and associated policies. Before you can launch nodes and register them into a cluster, you must create an IAM role for those nodes to use when they are launched. For more information, see Amazon EKS node IAM role in the Amazon EKS User Guide . If you specify launchTemplate, then don't specify IamInstanceProfile in your launch template, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

" }, "labels":{ "shape":"labelsMap", @@ -1273,6 +1430,34 @@ } } }, + "DescribeIdentityProviderConfigRequest":{ + "type":"structure", + "required":[ + "clusterName", + "identityProviderConfig" + ], + "members":{ + "clusterName":{ + "shape":"String", + "documentation":"

The cluster name that the identity provider configuration is associated to.

", + "location":"uri", + "locationName":"name" + }, + "identityProviderConfig":{ + "shape":"IdentityProviderConfig", + "documentation":"

An object that represents an identity provider configuration.

" + } + } + }, + "DescribeIdentityProviderConfigResponse":{ + "type":"structure", + "members":{ + "identityProviderConfig":{ + "shape":"IdentityProviderConfigResponse", + "documentation":"

The object that represents an OpenID Connect (OIDC) identity provider configuration.

" + } + } + }, "DescribeNodegroupRequest":{ "type":"structure", "required":[ @@ -1345,6 +1530,36 @@ } } }, + "DisassociateIdentityProviderConfigRequest":{ + "type":"structure", + "required":[ + "clusterName", + "identityProviderConfig" + ], + "members":{ + "clusterName":{ + "shape":"String", + "documentation":"

The name of the cluster to disassociate an identity provider from.

", + "location":"uri", + "locationName":"name" + }, + "identityProviderConfig":{ + "shape":"IdentityProviderConfig", + "documentation":"

An object that represents an identity provider configuration.

" + }, + "clientRequestToken":{ + "shape":"String", + "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", + "idempotencyToken":true + } + } + }, + "DisassociateIdentityProviderConfigResponse":{ + "type":"structure", + "members":{ + "update":{"shape":"Update"} + } + }, "EncryptionConfig":{ "type":"structure", "members":{ @@ -1380,7 +1595,8 @@ "InsufficientFreeAddresses", "ClusterUnreachable", "InsufficientNumberOfReplicas", - "ConfigurationConflict" + "ConfigurationConflict", + "AdmissionRequestDenied" ] }, "ErrorDetail":{ @@ -1491,10 +1707,42 @@ "members":{ "oidc":{ "shape":"OIDC", - "documentation":"

The OpenID Connect identity provider information for the cluster.

" + "documentation":"

An object representing the OpenID Connect identity provider information.

" } }, - "documentation":"

An object representing an identity provider for authentication credentials.

" + "documentation":"

An object representing an identity provider.

" + }, + "IdentityProviderConfig":{ + "type":"structure", + "required":[ + "type", + "name" + ], + "members":{ + "type":{ + "shape":"String", + "documentation":"

The type of the identity provider configuration.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the identity provider configuration.

" + } + }, + "documentation":"

An object representing an identity provider configuration.

" + }, + "IdentityProviderConfigResponse":{ + "type":"structure", + "members":{ + "oidc":{ + "shape":"OidcIdentityProviderConfig", + "documentation":"

An object that represents an OpenID Connect (OIDC) identity provider configuration.

" + } + }, + "documentation":"

An object that represents an identity configuration.

" + }, + "IdentityProviderConfigs":{ + "type":"list", + "member":{"shape":"IdentityProviderConfig"} }, "InvalidParameterException":{ "type":"structure", @@ -1541,7 +1789,7 @@ "members":{ "code":{ "shape":"NodegroupIssueCode", - "documentation":"

A brief description of the error.

" + "documentation":"

A brief description of the error.

" }, "message":{ "shape":"String", @@ -1712,6 +1960,49 @@ } } }, + "ListIdentityProviderConfigsRequest":{ + "type":"structure", + "required":["clusterName"], + "members":{ + "clusterName":{ + "shape":"String", + "documentation":"

The cluster name that you want to list identity provider configurations for.

", + "location":"uri", + "locationName":"name" + }, + "maxResults":{ + "shape":"ListIdentityProviderConfigsRequestMaxResults", + "documentation":"

The maximum number of identity provider configurations returned by ListIdentityProviderConfigs in paginated output. When you use this parameter, ListIdentityProviderConfigs returns only maxResults results in a single page along with a nextToken response element. You can see the remaining results of the initial request by sending another ListIdentityProviderConfigs request with the returned nextToken value. This value can be between 1 and 100. If you don't use this parameter, ListIdentityProviderConfigs returns up to 100 results and a nextToken value, if applicable.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"String", + "documentation":"

The nextToken value returned from a previous paginated IdentityProviderConfigsRequest where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListIdentityProviderConfigsRequestMaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListIdentityProviderConfigsResponse":{ + "type":"structure", + "members":{ + "identityProviderConfigs":{ + "shape":"IdentityProviderConfigs", + "documentation":"

The identity provider configurations for the cluster.

" + }, + "nextToken":{ + "shape":"String", + "documentation":"

The nextToken value returned from a previous paginated ListIdentityProviderConfigsResponse where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

" + } + } + }, "ListNodegroupsRequest":{ "type":"structure", "required":["clusterName"], @@ -1934,7 +2225,7 @@ }, "nodeRole":{ "shape":"String", - "documentation":"

The IAM role associated with your node group. The Amazon EKS worker node kubelet daemon makes calls to AWS APIs on your behalf. Worker nodes receive permissions for these API calls through an IAM instance profile and associated policies.

" + "documentation":"

The IAM role associated with your node group. The Amazon EKS node kubelet daemon makes calls to AWS APIs on your behalf. Nodes receive permissions for these API calls through an IAM instance profile and associated policies.

" }, "labels":{ "shape":"labelsMap", @@ -2005,7 +2296,7 @@ }, "remoteAccessSecurityGroup":{ "shape":"String", - "documentation":"

The remote access security group associated with the node group. This security group controls SSH access to the worker nodes.

" + "documentation":"

The remote access security group associated with the node group. This security group controls SSH access to the nodes.

" } }, "documentation":"

An object representing the resources associated with the node group, such as Auto Scaling groups and security groups for remote access.

" @@ -2015,15 +2306,15 @@ "members":{ "minSize":{ "shape":"Capacity", - "documentation":"

The minimum number of worker nodes that the managed node group can scale in to. This number must be greater than zero.

" + "documentation":"

The minimum number of nodes that the managed node group can scale in to. This number must be greater than zero.

" }, "maxSize":{ "shape":"Capacity", - "documentation":"

The maximum number of worker nodes that the managed node group can scale out to. Managed node groups can support up to 100 nodes by default.

" + "documentation":"

The maximum number of nodes that the managed node group can scale out to. For information about the maximum number that you can specify, see Amazon EKS service quotas in the Amazon EKS User Guide.

" }, "desiredSize":{ "shape":"Capacity", - "documentation":"

The current number of worker nodes that the managed node group should maintain.

" + "documentation":"

The current number of nodes that the managed node group should maintain.

" } }, "documentation":"

An object representing the scaling configuration details for the Auto Scaling group that is associated with your node group. If you specify a value for any property, then you must specify values for all of the properties.

" @@ -2054,10 +2345,107 @@ "members":{ "issuer":{ "shape":"String", - "documentation":"

The issuer URL for the OpenID Connect identity provider.

" + "documentation":"

The issuer URL for the OIDC identity provider.

" } }, - "documentation":"

An object representing the OpenID Connect identity provider information for the cluster.

" + "documentation":"

An object representing the OpenID Connect (OIDC) identity provider information for the cluster.

" + }, + "OidcIdentityProviderConfig":{ + "type":"structure", + "members":{ + "identityProviderConfigName":{ + "shape":"String", + "documentation":"

The name of the configuration.

" + }, + "identityProviderConfigArn":{ + "shape":"String", + "documentation":"

The ARN of the configuration.

" + }, + "clusterName":{ + "shape":"String", + "documentation":"

The cluster that the configuration is associated to.

" + }, + "issuerUrl":{ + "shape":"String", + "documentation":"

The URL of the OIDC identity provider that allows the API server to discover public signing keys for verifying tokens.

" + }, + "clientId":{ + "shape":"String", + "documentation":"

This is also known as audience. The ID of the client application that makes authentication requests to the OIDC identity provider.

" + }, + "usernameClaim":{ + "shape":"String", + "documentation":"

The JSON Web token (JWT) claim that is used as the username.

" + }, + "usernamePrefix":{ + "shape":"String", + "documentation":"

The prefix that is prepended to username claims to prevent clashes with existing names. The prefix can't contain system:

" + }, + "groupsClaim":{ + "shape":"String", + "documentation":"

The JSON web token (JWT) claim that the provider uses to return your groups.

" + }, + "groupsPrefix":{ + "shape":"String", + "documentation":"

The prefix that is prepended to group claims to prevent clashes with existing names (such as system: groups). For example, the value oidc: creates group names like oidc:engineering and oidc:infra. The prefix can't contain system:

" + }, + "requiredClaims":{ + "shape":"requiredClaimsMap", + "documentation":"

The key-value pairs that describe required claims in the identity token. If set, each claim is verified to be present in the token with a matching value.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The metadata to apply to the provider configuration to assist with categorization and organization. Each tag consists of a key and an optional value, both of which you defined.

" + }, + "status":{ + "shape":"configStatus", + "documentation":"

The status of the OIDC identity provider.

" + } + }, + "documentation":"

An object that represents the configuration for an OpenID Connect (OIDC) identity provider.

" + }, + "OidcIdentityProviderConfigRequest":{ + "type":"structure", + "required":[ + "identityProviderConfigName", + "issuerUrl", + "clientId" + ], + "members":{ + "identityProviderConfigName":{ + "shape":"String", + "documentation":"

The name of the OIDC provider configuration.

" + }, + "issuerUrl":{ + "shape":"String", + "documentation":"

The URL of the OpenID identity provider that allows the API server to discover public signing keys for verifying tokens. The URL must begin with https:// and should correspond to the iss claim in the provider's OIDC ID tokens. Per the OIDC standard, path components are allowed but query parameters are not. Typically the URL consists of only a hostname, like https://server.example.org or https://example.com. This URL should point to the level below .well-known/openid-configuration and must be publicly accessible over the internet.

" + }, + "clientId":{ + "shape":"String", + "documentation":"

This is also known as audience. The ID for the client application that makes authentication requests to the OpenID identity provider.

" + }, + "usernameClaim":{ + "shape":"String", + "documentation":"

The JSON Web Token (JWT) claim to use as the username. The default is sub, which is expected to be a unique identifier of the end user. You can choose other claims, such as email or name, depending on the OpenID identity provider. Claims other than email are prefixed with the issuer URL to prevent naming clashes with other plug-ins.

" + }, + "usernamePrefix":{ + "shape":"String", + "documentation":"

The prefix that is prepended to username claims to prevent clashes with existing names. If you do not provide this field, and username is a value other than email, the prefix defaults to issuerurl#. You can use the value - to disable all prefixing.

" + }, + "groupsClaim":{ + "shape":"String", + "documentation":"

The JWT claim that the provider uses to return your groups.

" + }, + "groupsPrefix":{ + "shape":"String", + "documentation":"

The prefix that is prepended to group claims to prevent clashes with existing names (such as system: groups). For example, the value oidc: will create group names like oidc:engineering and oidc:infra.

" + }, + "requiredClaims":{ + "shape":"requiredClaimsMap", + "documentation":"

The key value pairs that describe required claims in the identity token. If set, each claim is verified to be present in the token with a matching value. For the maximum number of claims that you can require, see Amazon EKS service quotas in the Amazon EKS User Guide.

" + } + }, + "documentation":"

An object representing an OpenID Connect (OIDC) configuration. Before associating an OIDC identity provider to your cluster, review the considerations in Authenticating users for your cluster from an OpenID Connect identity provider in the Amazon EKS User Guide.

" }, "Provider":{ "type":"structure", @@ -2074,11 +2462,11 @@ "members":{ "ec2SshKey":{ "shape":"String", - "documentation":"

The Amazon EC2 SSH key that provides access for SSH communication with the worker nodes in the managed node group. For more information, see Amazon EC2 Key Pairs in the Amazon Elastic Compute Cloud User Guide for Linux Instances.

" + "documentation":"

The Amazon EC2 SSH key that provides access for SSH communication with the nodes in the managed node group. For more information, see Amazon EC2 Key Pairs in the Amazon Elastic Compute Cloud User Guide for Linux Instances.

" }, "sourceSecurityGroups":{ "shape":"StringList", - "documentation":"

The security groups that are allowed SSH access (port 22) to the worker nodes. If you specify an Amazon EC2 SSH key but do not specify a source security group when you create a managed node group, then port 22 on the worker nodes is opened to the internet (0.0.0.0/0). For more information, see Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

The security groups that are allowed SSH access (port 22) to the nodes. If you specify an Amazon EC2 SSH key but do not specify a source security group when you create a managed node group, then port 22 on the nodes is opened to the internet (0.0.0.0/0). For more information, see Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

" } }, "documentation":"

An object representing the remote access configuration for the managed node group.

" @@ -2546,6 +2934,8 @@ "MinSize", "ReleaseVersion", "PublicAccessCidrs", + "IdentityProviderConfig", + "EncryptionConfig", "AddonVersion", "ServiceAccountRoleArn", "ResolveConflicts" @@ -2571,6 +2961,9 @@ "EndpointAccessUpdate", "LoggingUpdate", "ConfigUpdate", + "AssociateIdentityProviderConfig", + "DisassociateIdentityProviderConfig", + "AssociateEncryptionConfig", "AddonUpdate" ] }, @@ -2579,11 +2972,11 @@ "members":{ "subnetIds":{ "shape":"StringList", - "documentation":"

Specify subnets for your Amazon EKS worker nodes. Amazon EKS creates cross-account elastic network interfaces in these subnets to allow communication between your worker nodes and the Kubernetes control plane.

" + "documentation":"

Specify subnets for your Amazon EKS nodes. Amazon EKS creates cross-account elastic network interfaces in these subnets to allow communication between your nodes and the Kubernetes control plane.

" }, "securityGroupIds":{ "shape":"StringList", - "documentation":"

Specify one or more security groups for the cross-account elastic network interfaces that Amazon EKS creates to use to allow communication between your worker nodes and the Kubernetes control plane. If you don't specify any security groups, then familiarize yourself with the difference between Amazon EKS defaults for clusters deployed with Kubernetes:

For more information, see Amazon EKS security group considerations in the Amazon EKS User Guide .

" + "documentation":"

Specify one or more security groups for the cross-account elastic network interfaces that Amazon EKS creates to use to allow communication between your nodes and the Kubernetes control plane. If you don't specify any security groups, then familiarize yourself with the difference between Amazon EKS defaults for clusters deployed with Kubernetes:

For more information, see Amazon EKS security group considerations in the Amazon EKS User Guide .

" }, "endpointPublicAccess":{ "shape":"BoxedBoolean", @@ -2591,11 +2984,11 @@ }, "endpointPrivateAccess":{ "shape":"BoxedBoolean", - "documentation":"

Set this value to true to enable private access for your cluster's Kubernetes API server endpoint. If you enable private access, Kubernetes API requests from within your cluster's VPC use the private VPC endpoint. The default value for this parameter is false, which disables private access for your Kubernetes API server. If you disable private access and you have worker nodes or AWS Fargate pods in the cluster, then ensure that publicAccessCidrs includes the necessary CIDR blocks for communication with the worker nodes or Fargate pods. For more information, see Amazon EKS Cluster Endpoint Access Control in the Amazon EKS User Guide .

" + "documentation":"

Set this value to true to enable private access for your cluster's Kubernetes API server endpoint. If you enable private access, Kubernetes API requests from within your cluster's VPC use the private VPC endpoint. The default value for this parameter is false, which disables private access for your Kubernetes API server. If you disable private access and you have nodes or AWS Fargate pods in the cluster, then ensure that publicAccessCidrs includes the necessary CIDR blocks for communication with the nodes or Fargate pods. For more information, see Amazon EKS Cluster Endpoint Access Control in the Amazon EKS User Guide .

" }, "publicAccessCidrs":{ "shape":"StringList", - "documentation":"

The CIDR blocks that are allowed access to your cluster's public Kubernetes API server endpoint. Communication to the endpoint from addresses outside of the CIDR blocks that you specify is denied. The default value is 0.0.0.0/0. If you've disabled private endpoint access and you have worker nodes or AWS Fargate pods in the cluster, then ensure that you specify the necessary CIDR blocks. For more information, see Amazon EKS Cluster Endpoint Access Control in the Amazon EKS User Guide .

" + "documentation":"

The CIDR blocks that are allowed access to your cluster's public Kubernetes API server endpoint. Communication to the endpoint from addresses outside of the CIDR blocks that you specify is denied. The default value is 0.0.0.0/0. If you've disabled private endpoint access and you have nodes or AWS Fargate pods in the cluster, then ensure that you specify the necessary CIDR blocks. For more information, see Amazon EKS Cluster Endpoint Access Control in the Amazon EKS User Guide .

" } }, "documentation":"

An object representing the VPC configuration to use for an Amazon EKS cluster.

" @@ -2609,7 +3002,7 @@ }, "securityGroupIds":{ "shape":"StringList", - "documentation":"

The security groups associated with the cross-account elastic network interfaces that are used to allow communication between your worker nodes and the Kubernetes control plane.

" + "documentation":"

The security groups associated with the cross-account elastic network interfaces that are used to allow communication between your nodes and the Kubernetes control plane.

" }, "clusterSecurityGroupId":{ "shape":"String", @@ -2625,15 +3018,23 @@ }, "endpointPrivateAccess":{ "shape":"Boolean", - "documentation":"

This parameter indicates whether the Amazon EKS private API server endpoint is enabled. If the Amazon EKS private API server endpoint is enabled, Kubernetes API requests that originate from within your cluster's VPC use the private VPC endpoint instead of traversing the internet. If this value is disabled and you have worker nodes or AWS Fargate pods in the cluster, then ensure that publicAccessCidrs includes the necessary CIDR blocks for communication with the worker nodes or Fargate pods. For more information, see Amazon EKS Cluster Endpoint Access Control in the Amazon EKS User Guide .

" + "documentation":"

This parameter indicates whether the Amazon EKS private API server endpoint is enabled. If the Amazon EKS private API server endpoint is enabled, Kubernetes API requests that originate from within your cluster's VPC use the private VPC endpoint instead of traversing the internet. If this value is disabled and you have nodes or AWS Fargate pods in the cluster, then ensure that publicAccessCidrs includes the necessary CIDR blocks for communication with the nodes or Fargate pods. For more information, see Amazon EKS Cluster Endpoint Access Control in the Amazon EKS User Guide .

" }, "publicAccessCidrs":{ "shape":"StringList", - "documentation":"

The CIDR blocks that are allowed access to your cluster's public Kubernetes API server endpoint. Communication to the endpoint from addresses outside of the listed CIDR blocks is denied. The default value is 0.0.0.0/0. If you've disabled private endpoint access and you have worker nodes or AWS Fargate pods in the cluster, then ensure that the necessary CIDR blocks are listed. For more information, see Amazon EKS Cluster Endpoint Access Control in the Amazon EKS User Guide .

" + "documentation":"

The CIDR blocks that are allowed access to your cluster's public Kubernetes API server endpoint. Communication to the endpoint from addresses outside of the listed CIDR blocks is denied. The default value is 0.0.0.0/0. If you've disabled private endpoint access and you have nodes or AWS Fargate pods in the cluster, then ensure that the necessary CIDR blocks are listed. For more information, see Amazon EKS Cluster Endpoint Access Control in the Amazon EKS User Guide .

" } }, "documentation":"

An object representing an Amazon EKS cluster VPC configuration response.

" }, + "configStatus":{ + "type":"string", + "enum":[ + "CREATING", + "DELETING", + "ACTIVE" + ] + }, "labelKey":{ "type":"string", "max":63, @@ -2652,6 +3053,21 @@ "type":"map", "key":{"shape":"labelKey"}, "value":{"shape":"labelValue"} + }, + "requiredClaimsKey":{ + "type":"string", + "max":63, + "min":1 + }, + "requiredClaimsMap":{ + "type":"map", + "key":{"shape":"requiredClaimsKey"}, + "value":{"shape":"requiredClaimsValue"} + }, + "requiredClaimsValue":{ + "type":"string", + "max":253, + "min":1 } }, "documentation":"

Amazon Elastic Kubernetes Service (Amazon EKS) is a managed service that makes it easy for you to run Kubernetes on AWS without needing to stand up or maintain your own Kubernetes control plane. Kubernetes is an open-source system for automating the deployment, scaling, and management of containerized applications.

Amazon EKS runs up-to-date versions of the open-source Kubernetes software, so you can use all the existing plugins and tooling from the Kubernetes community. Applications running on Amazon EKS are fully compatible with applications running on any standard Kubernetes environment, whether running in on-premises data centers or public clouds. This means that you can easily migrate any standard Kubernetes application to Amazon EKS without any code modification required.

" diff --git a/botocore/data/elasticache/2015-02-02/service-2.json b/botocore/data/elasticache/2015-02-02/service-2.json index 4066a498..23b98675 100644 --- a/botocore/data/elasticache/2015-02-02/service-2.json +++ b/botocore/data/elasticache/2015-02-02/service-2.json @@ -439,7 +439,7 @@ {"shape":"CacheSubnetGroupInUse"}, {"shape":"CacheSubnetGroupNotFoundFault"} ], - "documentation":"

Deletes a cache subnet group.

You cannot delete a cache subnet group if it is associated with any clusters.

" + "documentation":"

Deletes a cache subnet group.

You cannot delete a default cache subnet group or one that is associated with any clusters.

" }, "DeleteGlobalReplicationGroup":{ "name":"DeleteGlobalReplicationGroup", @@ -1090,7 +1090,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Modifies a replication group's shards (node groups) by allowing you to add shards, remove shards, or rebalance the keyspaces among exisiting shards.

" + "documentation":"

Modifies a replication group's shards (node groups) by allowing you to add shards, remove shards, or rebalance the keyspaces among existing shards.

" }, "ModifyUser":{ "name":"ModifyUser", @@ -2412,7 +2412,7 @@ }, "PreferredMaintenanceWindow":{ "shape":"String", - "documentation":"

Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are:

Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period.

Valid values for ddd are:

Example: sun:23:00-mon:01:30

" + "documentation":"

Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are:

" }, "Port":{ "shape":"IntegerOptional", @@ -2625,7 +2625,7 @@ }, "Engine":{ "shape":"String", - "documentation":"

The name of the cache engine to be used for the clusters in this replication group.

" + "documentation":"

The name of the cache engine to be used for the clusters in this replication group. Must be Redis.

" }, "EngineVersion":{ "shape":"String", diff --git a/botocore/data/elbv2/2015-12-01/service-2.json b/botocore/data/elbv2/2015-12-01/service-2.json index 2ea2fdef..a0dc201e 100644 --- a/botocore/data/elbv2/2015-12-01/service-2.json +++ b/botocore/data/elbv2/2015-12-01/service-2.json @@ -80,7 +80,7 @@ {"shape":"ALPNPolicyNotSupportedException"}, {"shape":"TooManyTagsException"} ], - "documentation":"

Creates a listener for the specified Application Load Balancer, Network Load Balancer. or Gateway Load Balancer.

For more information, see the following:

This operation is idempotent, which means that it completes at most one time. If you attempt to create multiple listeners with the same settings, each call succeeds.

" + "documentation":"

Creates a listener for the specified Application Load Balancer, Network Load Balancer, or Gateway Load Balancer.

For more information, see the following:

This operation is idempotent, which means that it completes at most one time. If you attempt to create multiple listeners with the same settings, each call succeeds.

" }, "CreateLoadBalancer":{ "name":"CreateLoadBalancer", @@ -1231,7 +1231,7 @@ }, "HealthCheckIntervalSeconds":{ "shape":"HealthCheckIntervalSeconds", - "documentation":"

The approximate amount of time, in seconds, between health checks of an individual target. For TCP health checks, the supported values are 10 and 30 seconds. If the target type is instance or ip, the default is 30 seconds. If the target group protocol is GENEVE, the default is 10 seconds. If the target type is lambda, the default is 35 seconds.

" + "documentation":"

The approximate amount of time, in seconds, between health checks of an individual target. If the target group protocol is TCP, TLS, UDP, or TCP_UDP, the supported values are 10 and 30 seconds. If the target group protocol is HTTP or HTTPS, the default is 30 seconds. If the target group protocol is GENEVE, the default is 10 seconds. If the target type is lambda, the default is 35 seconds.

" }, "HealthCheckTimeoutSeconds":{ "shape":"HealthCheckTimeoutSeconds", @@ -2147,7 +2147,7 @@ "members":{ "Code":{ "shape":"LoadBalancerStateEnum", - "documentation":"

The state code. The initial state of the load balancer is provisioning. After the load balancer is fully set up and ready to route traffic, its state is active. If the load balancer could not be set up, its state is failed.

" + "documentation":"

The state code. The initial state of the load balancer is provisioning. After the load balancer is fully set up and ready to route traffic, its state is active. If load balancer is routing traffic but does not have the resources it needs to scale, its state isactive_impaired. If the load balancer could not be set up, its state is failed.

" }, "Reason":{ "shape":"StateReason", @@ -3100,7 +3100,7 @@ "members":{ "Key":{ "shape":"TargetGroupAttributeKey", - "documentation":"

The name of the attribute.

The following attribute is supported by all load balancers:

The following attributes are supported by both Application Load Balancers and Network Load Balancers:

The following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:

The following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:

The following attributes are supported only by Network Load Balancers:

" + "documentation":"

The name of the attribute.

The following attribute is supported by all load balancers:

The following attributes are supported by both Application Load Balancers and Network Load Balancers:

The following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:

The following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:

The following attributes are supported only by Network Load Balancers:

" }, "Value":{ "shape":"TargetGroupAttributeValue", diff --git a/botocore/data/emr-containers/2020-10-01/service-2.json b/botocore/data/emr-containers/2020-10-01/service-2.json index af1a36f9..0db831a4 100644 --- a/botocore/data/emr-containers/2020-10-01/service-2.json +++ b/botocore/data/emr-containers/2020-10-01/service-2.json @@ -487,7 +487,10 @@ } } }, - "Date":{"type":"timestamp"}, + "Date":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, "DeleteManagedEndpointRequest":{ "type":"structure", "required":[ diff --git a/botocore/data/emr/2009-03-31/service-2.json b/botocore/data/emr/2009-03-31/service-2.json index 692b62a9..e905589f 100644 --- a/botocore/data/emr/2009-03-31/service-2.json +++ b/botocore/data/emr/2009-03-31/service-2.json @@ -107,7 +107,7 @@ {"shape":"InternalServerException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to change.

Creates a new Amazon EMR Studio.

" + "documentation":"

Creates a new Amazon EMR Studio.

" }, "CreateStudioSessionMapping":{ "name":"CreateStudioSessionMapping", @@ -120,7 +120,7 @@ {"shape":"InternalServerError"}, {"shape":"InvalidRequestException"} ], - "documentation":"

The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to change.

Maps a user or group to the Amazon EMR Studio specified by StudioId, and applies a session policy to refine Studio permissions for that user or group.

" + "documentation":"

Maps a user or group to the Amazon EMR Studio specified by StudioId, and applies a session policy to refine Studio permissions for that user or group.

" }, "DeleteSecurityConfiguration":{ "name":"DeleteSecurityConfiguration", @@ -147,7 +147,7 @@ {"shape":"InternalServerException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to change.

Removes an Amazon EMR Studio from the Studio metadata store.

" + "documentation":"

Removes an Amazon EMR Studio from the Studio metadata store.

" }, "DeleteStudioSessionMapping":{ "name":"DeleteStudioSessionMapping", @@ -160,7 +160,7 @@ {"shape":"InternalServerError"}, {"shape":"InvalidRequestException"} ], - "documentation":"

The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to change.

Removes a user or group from an Amazon EMR Studio.

" + "documentation":"

Removes a user or group from an Amazon EMR Studio.

" }, "DescribeCluster":{ "name":"DescribeCluster", @@ -244,7 +244,7 @@ {"shape":"InternalServerException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to change.

Returns details for the specified Amazon EMR Studio including ID, Name, VPC, Studio access URL, and so on.

" + "documentation":"

Returns details for the specified Amazon EMR Studio including ID, Name, VPC, Studio access URL, and so on.

" }, "GetBlockPublicAccessConfiguration":{ "name":"GetBlockPublicAccessConfiguration", @@ -282,7 +282,7 @@ {"shape":"InternalServerError"}, {"shape":"InvalidRequestException"} ], - "documentation":"

The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to change.

Fetches mapping details for the specified Amazon EMR Studio and identity (user or group).

" + "documentation":"

Fetches mapping details for the specified Amazon EMR Studio and identity (user or group).

" }, "ListBootstrapActions":{ "name":"ListBootstrapActions", @@ -394,7 +394,7 @@ {"shape":"InternalServerException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Provides a list of steps for the cluster in reverse order unless you specify stepIds with the request of filter by StepStates. You can specify a maximum of ten stepIDs.

" + "documentation":"

Provides a list of steps for the cluster in reverse order unless you specify stepIds with the request of filter by StepStates. You can specify a maximum of 10 stepIDs.

" }, "ListStudioSessionMappings":{ "name":"ListStudioSessionMappings", @@ -408,7 +408,7 @@ {"shape":"InternalServerError"}, {"shape":"InvalidRequestException"} ], - "documentation":"

The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to change.

Returns a list of all user or group session mappings for the EMR Studio specified by StudioId.

" + "documentation":"

Returns a list of all user or group session mappings for the Amazon EMR Studio specified by StudioId.

" }, "ListStudios":{ "name":"ListStudios", @@ -422,7 +422,7 @@ {"shape":"InternalServerException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to change.

Returns a list of all Amazon EMR Studios associated with the AWS account. The list includes details such as ID, Studio Access URL, and creation time for each Studio.

" + "documentation":"

Returns a list of all Amazon EMR Studios associated with the AWS account. The list includes details such as ID, Studio Access URL, and creation time for each Studio.

" }, "ModifyCluster":{ "name":"ModifyCluster", @@ -607,6 +607,19 @@ ], "documentation":"

TerminateJobFlows shuts a list of clusters (job flows) down. When a job flow is shut down, any step not yet completed is canceled and the EC2 instances on which the cluster is running are stopped. Any log files not already saved are uploaded to Amazon S3 if a LogUri was specified when the cluster was created.

The maximum number of clusters allowed is 10. The call to TerminateJobFlows is asynchronous. Depending on the configuration of the cluster, it may take up to 1-5 minutes for the cluster to completely terminate and release allocated resources, such as Amazon EC2 instances.

" }, + "UpdateStudio":{ + "name":"UpdateStudio", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateStudioInput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Updates an Amazon EMR Studio configuration, including attributes such as name, description, and subnets.

" + }, "UpdateStudioSessionMapping":{ "name":"UpdateStudioSessionMapping", "http":{ @@ -618,7 +631,7 @@ {"shape":"InternalServerError"}, {"shape":"InvalidRequestException"} ], - "documentation":"

The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to change.

Updates the session policy attached to the user or group for the specified Amazon EMR Studio.

" + "documentation":"

Updates the session policy attached to the user or group for the specified Amazon EMR Studio.

" } }, "shapes":{ @@ -1438,7 +1451,8 @@ "ServiceRole", "UserRole", "WorkspaceSecurityGroupId", - "EngineSecurityGroupId" + "EngineSecurityGroupId", + "DefaultS3Location" ], "members":{ "Name":{ @@ -1447,7 +1461,7 @@ }, "Description":{ "shape":"XmlStringMaxLen256", - "documentation":"

A detailed description of the Studio.

" + "documentation":"

A detailed description of the Amazon EMR Studio.

" }, "AuthMode":{ "shape":"AuthMode", @@ -1459,7 +1473,7 @@ }, "SubnetIds":{ "shape":"SubnetIdList", - "documentation":"

A list of subnet IDs to associate with the Studio. The subnets must belong to the VPC specified by VpcId. Studio users can create a Workspace in any of the specified subnets.

" + "documentation":"

A list of subnet IDs to associate with the Amazon EMR Studio. A Studio can have a maximum of 5 subnets. The subnets must belong to the VPC specified by VpcId. Studio users can create a Workspace in any of the specified subnets.

" }, "ServiceRole":{ "shape":"XmlString", @@ -1467,7 +1481,7 @@ }, "UserRole":{ "shape":"XmlString", - "documentation":"

The IAM user role that will be assumed by users and groups logged in to a Studio. The permissions attached to this IAM role can be scoped down for each user or group using session policies.

" + "documentation":"

The IAM user role that will be assumed by users and groups logged in to an Amazon EMR Studio. The permissions attached to this IAM role can be scoped down for each user or group using session policies.

" }, "WorkspaceSecurityGroupId":{ "shape":"XmlStringMaxLen256", @@ -1479,11 +1493,11 @@ }, "DefaultS3Location":{ "shape":"XmlString", - "documentation":"

The default Amazon S3 location to back up EMR Studio Workspaces and notebook files. A Studio user can select an alternative Amazon S3 location when creating a Workspace.

" + "documentation":"

The Amazon S3 location to back up Amazon EMR Studio Workspaces and notebook files.

" }, "Tags":{ "shape":"TagList", - "documentation":"

A list of tags to associate with the Studio. Tags are user-defined key-value pairs that consist of a required key string with a maximum of 128 characters, and an optional value string with a maximum of 256 characters.

" + "documentation":"

A list of tags to associate with the Amazon EMR Studio. Tags are user-defined key-value pairs that consist of a required key string with a maximum of 128 characters, and an optional value string with a maximum of 256 characters.

" } } }, @@ -1518,11 +1532,11 @@ }, "IdentityName":{ "shape":"XmlStringMaxLen256", - "documentation":"

The name of the user or group. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

" + "documentation":"

The name of the user or group. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

" }, "IdentityType":{ "shape":"IdentityType", - "documentation":"

Specifies whether the identity to map to the Studio is a user or a group.

" + "documentation":"

Specifies whether the identity to map to the Amazon EMR Studio is a user or a group.

" }, "SessionPolicyArn":{ "shape":"XmlStringMaxLen256", @@ -1573,11 +1587,11 @@ }, "IdentityName":{ "shape":"XmlStringMaxLen256", - "documentation":"

The name of the user name or group to remove from the Studio. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

" + "documentation":"

The name of the user name or group to remove from the Amazon EMR Studio. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

" }, "IdentityType":{ "shape":"IdentityType", - "documentation":"

Specifies whether the identity to delete from the Studio is a user or a group.

" + "documentation":"

Specifies whether the identity to delete from the Amazon EMR Studio is a user or a group.

" } } }, @@ -1959,7 +1973,7 @@ }, "IdentityName":{ "shape":"XmlStringMaxLen256", - "documentation":"

The name of the user or group to fetch. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

" + "documentation":"

The name of the user or group to fetch. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

" }, "IdentityType":{ "shape":"IdentityType", @@ -1982,7 +1996,7 @@ "members":{ "Properties":{ "shape":"KeyValueList", - "documentation":"

A list of Java properties that are set when the step runs. You can use these properties to pass key value pairs to your main function.

" + "documentation":"

A list of Java properties that are set when the step runs. You can use these properties to pass key-value pairs to your main function.

" }, "Jar":{ "shape":"XmlString", @@ -2114,7 +2128,7 @@ }, "TargetSpotCapacity":{ "shape":"WholeNumber", - "documentation":"

The target capacity of Spot units for the instance fleet, which determines how many Spot instances to provision. When the instance fleet launches, Amazon EMR tries to provision Spot instances as specified by InstanceTypeConfig. Each instance configuration has a specified WeightedCapacity. When a Spot instance is provisioned, the WeightedCapacity units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a WeightedCapacity of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units. You can use InstanceFleet$ProvisionedSpotCapacity to determine the Spot capacity units that have been provisioned for the instance fleet.

If not specified or set to 0, only On-Demand instances are provisioned for the instance fleet. At least one of TargetSpotCapacity and TargetOnDemandCapacity should be greater than 0. For a master instance fleet, only one of TargetSpotCapacity and TargetOnDemandCapacity can be specified, and its value must be 1.

" + "documentation":"

The target capacity of Spot units for the instance fleet, which determines how many Spot Instances to provision. When the instance fleet launches, Amazon EMR tries to provision Spot Instances as specified by InstanceTypeConfig. Each instance configuration has a specified WeightedCapacity. When a Spot instance is provisioned, the WeightedCapacity units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a WeightedCapacity of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units. You can use InstanceFleet$ProvisionedSpotCapacity to determine the Spot capacity units that have been provisioned for the instance fleet.

If not specified or set to 0, only On-Demand Instances are provisioned for the instance fleet. At least one of TargetSpotCapacity and TargetOnDemandCapacity should be greater than 0. For a master instance fleet, only one of TargetSpotCapacity and TargetOnDemandCapacity can be specified, and its value must be 1.

" }, "ProvisionedOnDemandCapacity":{ "shape":"WholeNumber", @@ -2145,7 +2159,7 @@ }, "InstanceFleetType":{ "shape":"InstanceFleetType", - "documentation":"

The node type that the instance fleet hosts. Valid values are MASTER,CORE,and TASK.

" + "documentation":"

The node type that the instance fleet hosts. Valid values are MASTER, CORE, and TASK.

" }, "TargetOnDemandCapacity":{ "shape":"WholeNumber", @@ -2308,7 +2322,7 @@ }, "BidPrice":{ "shape":"String", - "documentation":"

The bid price for each EC2 Spot Instance type as defined by InstanceType. Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%.

" + "documentation":"

If specified, indicates that the instance group uses Spot Instances. This is the maximum price you are willing to pay for Spot Instances. Specify OnDemandPrice to set the amount equal to the On-Demand price, or specify an amount in USD.

" }, "InstanceType":{ "shape":"InstanceType", @@ -2383,7 +2397,7 @@ }, "BidPrice":{ "shape":"XmlStringMaxLen256", - "documentation":"

The bid price for each EC2 Spot Instance type as defined by InstanceType. Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%.

" + "documentation":"

If specified, indicates that the instance group uses Spot Instances. This is the maximum price you are willing to pay for Spot Instances. Specify OnDemandPrice to set the amount equal to the On-Demand price, or specify an amount in USD.

" }, "InstanceType":{ "shape":"InstanceType", @@ -2442,7 +2456,7 @@ }, "BidPrice":{ "shape":"XmlStringMaxLen256", - "documentation":"

The bid price for each EC2 Spot Instance type as defined by InstanceType. Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%.

" + "documentation":"

If specified, indicates that the instance group uses Spot Instances. This is the maximum price you are willing to pay for Spot Instances. Specify OnDemandPrice to set the amount equal to the On-Demand price, or specify an amount in USD.

" }, "InstanceType":{ "shape":"InstanceType", @@ -2814,7 +2828,7 @@ "documentation":"

The message associated with the exception.

" } }, - "documentation":"

This exception occurs when there is an internal failure in the EMR service.

", + "documentation":"

This exception occurs when there is an internal failure in the Amazon EMR service.

", "exception":true, "fault":true }, @@ -3435,7 +3449,7 @@ "members":{ "SessionMappings":{ "shape":"SessionMappingSummaryList", - "documentation":"

A list of session mapping summary objects. Each object includes session mapping details such as creation time, identity type (user or group), and Studio ID.

" + "documentation":"

A list of session mapping summary objects. Each object includes session mapping details such as creation time, identity type (user or group), and Amazon EMR Studio ID.

" }, "Marker":{ "shape":"Marker", @@ -3512,7 +3526,7 @@ }, "StepConcurrencyLevel":{ "shape":"Integer", - "documentation":"

The number of steps that can be executed concurrently. You can specify a maximum of 256 steps.

" + "documentation":"

The number of steps that can be executed concurrently. You can specify a minimum of 1 step and a maximum of 256 steps.

" } } }, @@ -3671,6 +3685,35 @@ "type":"list", "member":{"shape":"NotebookExecutionSummary"} }, + "OnDemandCapacityReservationOptions":{ + "type":"structure", + "members":{ + "UsageStrategy":{ + "shape":"OnDemandCapacityReservationUsageStrategy", + "documentation":"

Indicates whether to use unused Capacity Reservations for fulfilling On-Demand capacity.

If you specify use-capacity-reservations-first, the fleet uses unused Capacity Reservations to fulfill On-Demand capacity up to the target On-Demand capacity. If multiple instance pools have unused Capacity Reservations, the On-Demand allocation strategy (lowest-price) is applied. If the number of unused Capacity Reservations is less than the On-Demand target capacity, the remaining On-Demand target capacity is launched according to the On-Demand allocation strategy (lowest-price).

If you do not specify a value, the fleet fulfils the On-Demand capacity according to the chosen On-Demand allocation strategy.

" + }, + "CapacityReservationPreference":{ + "shape":"OnDemandCapacityReservationPreference", + "documentation":"

Indicates the instance's Capacity Reservation preferences. Possible preferences include:

" + }, + "CapacityReservationResourceGroupArn":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The ARN of the Capacity Reservation resource group in which to run the instance.

" + } + }, + "documentation":"

Describes the strategy for using unused Capacity Reservations for fulfilling On-Demand capacity.

" + }, + "OnDemandCapacityReservationPreference":{ + "type":"string", + "enum":[ + "open", + "none" + ] + }, + "OnDemandCapacityReservationUsageStrategy":{ + "type":"string", + "enum":["use-capacity-reservations-first"] + }, "OnDemandProvisioningAllocationStrategy":{ "type":"string", "enum":["lowest-price"] @@ -3681,7 +3724,11 @@ "members":{ "AllocationStrategy":{ "shape":"OnDemandProvisioningAllocationStrategy", - "documentation":"

Specifies the strategy to use in launching On-Demand Instance fleets. Currently, the only option is lowest-price (the default), which launches the lowest price first.

" + "documentation":"

Specifies the strategy to use in launching On-Demand instance fleets. Currently, the only option is lowest-price (the default), which launches the lowest price first.

" + }, + "CapacityReservationOptions":{ + "shape":"OnDemandCapacityReservationOptions", + "documentation":"

The launch specification for On-Demand instances in the instance fleet, which determines the allocation strategy.

" } }, "documentation":"

The launch specification for On-Demand Instances in the instance fleet, which determines the allocation strategy.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. On-Demand Instances allocation strategy is available in Amazon EMR version 5.12.1 and later.

" @@ -3796,7 +3843,7 @@ }, "ClusterArn":{ "shape":"ArnType", - "documentation":"

The Amazon Resource Name of the cluster.

" + "documentation":"

The Amazon Resource Name (ARN) of the cluster.

" } } }, @@ -3955,7 +4002,7 @@ }, "NewSupportedProducts":{ "shape":"NewSupportedProductsList", - "documentation":"

For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and later, use Applications.

A list of strings that indicates third-party software to use with the job flow that accepts a user argument list. EMR accepts and forwards the argument list to the corresponding installation script as bootstrap action arguments. For more information, see \"Launch a Job Flow on the MapR Distribution for Hadoop\" in the Amazon EMR Developer Guide. Supported values are:

" + "documentation":"

For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and later, use Applications.

A list of strings that indicates third-party software to use with the job flow that accepts a user argument list. EMR accepts and forwards the argument list to the corresponding installation script as bootstrap action arguments. For more information, see \"Launch a Job Flow on the MapR Distribution for Hadoop\" in the Amazon EMR Developer Guide. Supported values are:

" }, "Applications":{ "shape":"ApplicationList", @@ -4029,11 +4076,11 @@ "members":{ "JobFlowId":{ "shape":"XmlStringMaxLen256", - "documentation":"

An unique identifier for the job flow.

" + "documentation":"

A unique identifier for the job flow.

" }, "ClusterArn":{ "shape":"ArnType", - "documentation":"

The Amazon Resource Name of the cluster.

" + "documentation":"

The Amazon Resource Name (ARN) of the cluster.

" } }, "documentation":"

The result of the RunJobFlow operation.

" @@ -4170,11 +4217,11 @@ }, "IdentityName":{ "shape":"XmlStringMaxLen256", - "documentation":"

The name of the user or group. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference.

" + "documentation":"

The name of the user or group. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference.

" }, "IdentityType":{ "shape":"IdentityType", - "documentation":"

Specifies whether the identity mapped to the Studio is a user or a group.

" + "documentation":"

Specifies whether the identity mapped to the Amazon EMR Studio is a user or a group.

" }, "SessionPolicyArn":{ "shape":"XmlStringMaxLen256", @@ -4204,11 +4251,11 @@ }, "IdentityName":{ "shape":"XmlStringMaxLen256", - "documentation":"

The name of the user or group. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference.

" + "documentation":"

The name of the user or group. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference.

" }, "IdentityType":{ "shape":"IdentityType", - "documentation":"

Specifies whether the identity mapped to the Studio is a user or a group.

" + "documentation":"

Specifies whether the identity mapped to the Amazon EMR Studio is a user or a group.

" }, "SessionPolicyArn":{ "shape":"XmlStringMaxLen256", @@ -4315,7 +4362,7 @@ }, "BlockDurationMinutes":{ "shape":"WholeNumber", - "documentation":"

The defined duration for Spot Instances (also known as Spot blocks) in minutes. When specified, the Spot Instance does not terminate before the defined duration expires, and defined duration pricing for Spot instances applies. Valid values are 60, 120, 180, 240, 300, or 360. The duration period starts as soon as a Spot Instance receives its instance ID. At the end of the duration, Amazon EC2 marks the Spot Instance for termination and provides a Spot Instance termination notice, which gives the instance a two-minute warning before it terminates.

" + "documentation":"

The defined duration for Spot Instances (also known as Spot blocks) in minutes. When specified, the Spot Instance does not terminate before the defined duration expires, and defined duration pricing for Spot Instances applies. Valid values are 60, 120, 180, 240, 300, or 360. The duration period starts as soon as a Spot Instance receives its instance ID. At the end of the duration, Amazon EC2 marks the Spot Instance for termination and provides a Spot Instance termination notice, which gives the instance a two-minute warning before it terminates.

" }, "AllocationStrategy":{ "shape":"SpotProvisioningAllocationStrategy", @@ -4650,27 +4697,27 @@ "members":{ "StudioId":{ "shape":"XmlStringMaxLen256", - "documentation":"

The ID of the EMR Studio.

" + "documentation":"

The ID of the Amazon EMR Studio.

" }, "StudioArn":{ "shape":"XmlStringMaxLen256", - "documentation":"

The Amazon Resource Name (ARN) of the EMR Studio.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon EMR Studio.

" }, "Name":{ "shape":"XmlStringMaxLen256", - "documentation":"

The name of the EMR Studio.

" + "documentation":"

The name of the Amazon EMR Studio.

" }, "Description":{ "shape":"XmlStringMaxLen256", - "documentation":"

The detailed description of the EMR Studio.

" + "documentation":"

The detailed description of the Amazon EMR Studio.

" }, "AuthMode":{ "shape":"AuthMode", - "documentation":"

Specifies whether the Studio authenticates users using single sign-on (SSO) or IAM.

" + "documentation":"

Specifies whether the Amazon EMR Studio authenticates users using single sign-on (SSO) or IAM.

" }, "VpcId":{ "shape":"XmlStringMaxLen256", - "documentation":"

The ID of the VPC associated with the EMR Studio.

" + "documentation":"

The ID of the VPC associated with the Amazon EMR Studio.

" }, "SubnetIds":{ "shape":"SubnetIdList", @@ -4702,7 +4749,7 @@ }, "DefaultS3Location":{ "shape":"XmlString", - "documentation":"

The default Amazon S3 location to back up Amazon EMR Studio Workspaces and notebook files.

" + "documentation":"

The Amazon S3 location to back up Amazon EMR Studio Workspaces and notebook files.

" }, "Tags":{ "shape":"TagList", @@ -4728,7 +4775,7 @@ }, "Description":{ "shape":"XmlStringMaxLen256", - "documentation":"

The detailed description of the EMR Studio.

" + "documentation":"

The detailed description of the Amazon EMR Studio.

" }, "Url":{ "shape":"XmlStringMaxLen256", @@ -4761,7 +4808,7 @@ "documentation":"

The list of user-supplied arguments.

" } }, - "documentation":"

The list of supported product configurations which allow user-supplied arguments. EMR accepts these arguments and forwards them to the corresponding installation script as bootstrap action arguments.

" + "documentation":"

The list of supported product configurations that allow user-supplied arguments. EMR accepts these arguments and forwards them to the corresponding installation script as bootstrap action arguments.

" }, "SupportedProductsList":{ "type":"list", @@ -4828,6 +4875,32 @@ "COUNT_PER_SECOND" ] }, + "UpdateStudioInput":{ + "type":"structure", + "required":["StudioId"], + "members":{ + "StudioId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The ID of the Amazon EMR Studio to update.

" + }, + "Name":{ + "shape":"XmlStringMaxLen256", + "documentation":"

A descriptive name for the Amazon EMR Studio.

" + }, + "Description":{ + "shape":"XmlStringMaxLen256", + "documentation":"

A detailed description to assign to the Amazon EMR Studio.

" + }, + "SubnetIds":{ + "shape":"SubnetIdList", + "documentation":"

A list of subnet IDs to associate with the Amazon EMR Studio. The list can include new subnet IDs, but must also include all of the subnet IDs previously associated with the Studio. The list order does not matter. A Studio can have a maximum of 5 subnets. The subnets must belong to the same VPC as the Studio.

" + }, + "DefaultS3Location":{ + "shape":"XmlString", + "documentation":"

The Amazon S3 location to back up Workspaces and notebook files for the Amazon EMR Studio.

" + } + } + }, "UpdateStudioSessionMappingInput":{ "type":"structure", "required":[ @@ -4838,7 +4911,7 @@ "members":{ "StudioId":{ "shape":"XmlStringMaxLen256", - "documentation":"

The ID of the EMR Studio.

" + "documentation":"

The ID of the Amazon EMR Studio.

" }, "IdentityId":{ "shape":"XmlStringMaxLen256", @@ -4846,7 +4919,7 @@ }, "IdentityName":{ "shape":"XmlStringMaxLen256", - "documentation":"

The name of the user or group to update. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

" + "documentation":"

The name of the user or group to update. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

" }, "IdentityType":{ "shape":"IdentityType", diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index 4d2aba8b..3cdb8e38 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -22,6 +22,9 @@ "ap-northeast-2" : { "description" : "Asia Pacific (Seoul)" }, + "ap-northeast-3" : { + "description" : "Asia Pacific (Osaka)" + }, "ap-south-1" : { "description" : "Asia Pacific (Mumbai)" }, @@ -83,6 +86,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -137,6 +141,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -255,6 +260,22 @@ "us-west-2" : { } } }, + "amplifybackend" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, "api.detective" : { "defaults" : { "protocols" : [ "https" ] @@ -332,6 +353,12 @@ }, "hostname" : "api.ecr.ap-northeast-2.amazonaws.com" }, + "ap-northeast-3" : { + "credentialScope" : { + "region" : "ap-northeast-3" + }, + "hostname" : "api.ecr.ap-northeast-3.amazonaws.com" + }, "ap-south-1" : { "credentialScope" : { "region" : "ap-south-1" @@ -581,6 +608,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -638,6 +666,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -785,6 +814,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -856,9 +886,11 @@ }, "batch" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -983,6 +1015,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -1240,6 +1273,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -1448,6 +1482,12 @@ }, "hostname" : "cognito-idp-fips.us-east-2.amazonaws.com" }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "cognito-idp-fips.us-west-1.amazonaws.com" + }, "fips-us-west-2" : { "credentialScope" : { "region" : "us-west-2" @@ -1548,6 +1588,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -1603,7 +1644,10 @@ }, "contact-lens" : { "endpoints" : { + "ap-northeast-1" : { }, "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-2" : { }, "us-east-1" : { }, "us-west-2" : { } } @@ -1760,6 +1804,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -1819,6 +1864,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -1994,6 +2040,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -2055,6 +2102,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -2112,6 +2160,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -2166,6 +2215,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -2217,6 +2267,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -2265,6 +2316,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -2295,6 +2347,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -2343,6 +2396,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -2377,6 +2431,12 @@ }, "hostname" : "elasticfilesystem-fips.ap-northeast-2.amazonaws.com" }, + "fips-ap-northeast-3" : { + "credentialScope" : { + "region" : "ap-northeast-3" + }, + "hostname" : "elasticfilesystem-fips.ap-northeast-3.amazonaws.com" + }, "fips-ap-south-1" : { "credentialScope" : { "region" : "ap-south-1" @@ -2490,6 +2550,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -2542,6 +2603,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -2618,8 +2680,18 @@ }, "emr-containers" : { "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, "us-west-2" : { } } }, @@ -2639,6 +2711,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -2669,6 +2742,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -2717,6 +2791,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -3009,6 +3084,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -3063,6 +3139,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -3509,6 +3586,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -3599,6 +3677,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -3656,6 +3735,7 @@ }, "hostname" : "lakeformation-fips.us-west-2.amazonaws.com" }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -3669,6 +3749,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -3782,6 +3863,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -4070,6 +4152,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -4142,6 +4225,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -4735,6 +4819,36 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "ram-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "ram-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "ram-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "ram-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "ram-fips.us-west-2.amazonaws.com" + }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, @@ -4749,6 +4863,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -4805,6 +4920,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -4906,6 +5022,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -5099,6 +5216,7 @@ "signatureVersions" : [ "s3", "s3v4" ] }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { "hostname" : "s3.ap-southeast-1.amazonaws.com", @@ -5174,6 +5292,13 @@ "hostname" : "s3-control.ap-northeast-2.amazonaws.com", "signatureVersions" : [ "s3v4" ] }, + "ap-northeast-3" : { + "credentialScope" : { + "region" : "ap-northeast-3" + }, + "hostname" : "s3-control.ap-northeast-3.amazonaws.com", + "signatureVersions" : [ "s3v4" ] + }, "ap-south-1" : { "credentialScope" : { "region" : "ap-south-1" @@ -5366,6 +5491,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -5604,6 +5730,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -5711,6 +5838,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -5833,6 +5961,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -5885,6 +6014,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -5935,6 +6065,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -5989,6 +6120,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -6131,6 +6263,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -6197,6 +6330,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -6245,6 +6379,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -6328,6 +6463,8 @@ }, "transfer" : { "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -6369,6 +6506,7 @@ }, "hostname" : "transfer-fips.us-west-2.amazonaws.com" }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -6715,6 +6853,7 @@ "endpoints" : { "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ca-central-1" : { }, @@ -6744,6 +6883,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -7165,6 +7305,16 @@ }, "isRegionalized" : true }, + "guardduty" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + }, + "isRegionalized" : true + }, "health" : { "endpoints" : { "cn-north-1" : { }, @@ -7958,6 +8108,11 @@ "us-gov-west-1" : { } } }, + "connect" : { + "endpoints" : { + "us-gov-west-1" : { } + } + }, "data.iot" : { "defaults" : { "credentialScope" : { @@ -8566,6 +8721,22 @@ "us-gov-west-1" : { } } }, + "models.lex" : { + "defaults" : { + "credentialScope" : { + "service" : "lex" + } + }, + "endpoints" : { + "us-gov-west-1" : { }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "models-fips.lex.us-gov-west-1.amazonaws.com" + } + } + }, "monitoring" : { "endpoints" : { "fips-us-gov-east-1" : { @@ -8668,8 +8839,18 @@ }, "ram" : { "endpoints" : { - "us-gov-east-1" : { }, - "us-gov-west-1" : { } + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "ram.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "ram.us-gov-west-1.amazonaws.com" + } } }, "rds" : { @@ -8759,9 +8940,31 @@ "us-gov-west-1" : { } } }, + "runtime.lex" : { + "defaults" : { + "credentialScope" : { + "service" : "lex" + } + }, + "endpoints" : { + "us-gov-west-1" : { }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "runtime-fips.lex.us-gov-west-1.amazonaws.com" + } + } + }, "runtime.sagemaker" : { "endpoints" : { - "us-gov-west-1" : { } + "us-gov-west-1" : { }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "runtime.sagemaker.us-gov-west-1.amazonaws.com" + } } }, "s3" : { @@ -9422,6 +9625,11 @@ "us-iso-east-1" : { } } }, + "outposts" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, "rds" : { "endpoints" : { "us-iso-east-1" : { } @@ -9749,6 +9957,18 @@ "us-isob-east-1" : { } } }, + "route53" : { + "endpoints" : { + "aws-iso-b-global" : { + "credentialScope" : { + "region" : "us-isob-east-1" + }, + "hostname" : "route53.sc2s.sgov.gov" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-iso-b-global" + }, "s3" : { "defaults" : { "protocols" : [ "http", "https" ], diff --git a/botocore/data/es/2015-01-01/service-2.json b/botocore/data/es/2015-01-01/service-2.json index 59dfd48f..f2678e1b 100644 --- a/botocore/data/es/2015-01-01/service-2.json +++ b/botocore/data/es/2015-01-01/service-2.json @@ -203,6 +203,22 @@ ], "documentation":"

Delete the package.

" }, + "DescribeDomainAutoTunes":{ + "name":"DescribeDomainAutoTunes", + "http":{ + "method":"GET", + "requestUri":"/2015-01-01/es/domain/{DomainName}/autoTunes" + }, + "input":{"shape":"DescribeDomainAutoTunesRequest"}, + "output":{"shape":"DescribeDomainAutoTunesResponse"}, + "errors":[ + {"shape":"BaseException"}, + {"shape":"InternalException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Provides scheduled Auto-Tune action details for the Elasticsearch domain, such as Auto-Tune action type, description, severity, and scheduled date.

" + }, "DescribeElasticsearchDomain":{ "name":"DescribeElasticsearchDomain", "http":{ @@ -847,6 +863,181 @@ }, "documentation":"

Container for response returned by AssociatePackage operation.

" }, + "AutoTune":{ + "type":"structure", + "members":{ + "AutoTuneType":{ + "shape":"AutoTuneType", + "documentation":"

Specifies Auto-Tune type. Valid value is SCHEDULED_ACTION.

" + }, + "AutoTuneDetails":{ + "shape":"AutoTuneDetails", + "documentation":"

Specifies details of the Auto-Tune action. See the Developer Guide for more information.

" + } + }, + "documentation":"

Specifies Auto-Tune type and Auto-Tune action details.

" + }, + "AutoTuneDate":{ + "type":"timestamp", + "documentation":"

Specifies timestamp for the Auto-Tune action scheduled for the domain.

" + }, + "AutoTuneDesiredState":{ + "type":"string", + "documentation":"

Specifies the Auto-Tune desired state. Valid values are ENABLED, DISABLED.

", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "AutoTuneDetails":{ + "type":"structure", + "members":{ + "ScheduledAutoTuneDetails":{"shape":"ScheduledAutoTuneDetails"} + }, + "documentation":"

Specifies details of the Auto-Tune action. See the Developer Guide for more information.

" + }, + "AutoTuneList":{ + "type":"list", + "member":{"shape":"AutoTune"} + }, + "AutoTuneMaintenanceSchedule":{ + "type":"structure", + "members":{ + "StartAt":{ + "shape":"StartAt", + "documentation":"

Specifies timestamp at which Auto-Tune maintenance schedule start.

" + }, + "Duration":{ + "shape":"Duration", + "documentation":"

Specifies maintenance schedule duration: duration value and duration unit. See the Developer Guide for more information.

" + }, + "CronExpressionForRecurrence":{ + "shape":"String", + "documentation":"

Specifies cron expression for a recurring maintenance schedule. See the Developer Guide for more information.

" + } + }, + "documentation":"

Specifies Auto-Tune maitenance schedule. See the Developer Guide for more information.

" + }, + "AutoTuneMaintenanceScheduleList":{ + "type":"list", + "member":{"shape":"AutoTuneMaintenanceSchedule"}, + "max":100 + }, + "AutoTuneOptions":{ + "type":"structure", + "members":{ + "DesiredState":{ + "shape":"AutoTuneDesiredState", + "documentation":"

Specifies the Auto-Tune desired state. Valid values are ENABLED, DISABLED.

" + }, + "RollbackOnDisable":{ + "shape":"RollbackOnDisable", + "documentation":"

Specifies the rollback state while disabling Auto-Tune for the domain. Valid values are NO_ROLLBACK, DEFAULT_ROLLBACK.

" + }, + "MaintenanceSchedules":{ + "shape":"AutoTuneMaintenanceScheduleList", + "documentation":"

Specifies list of maitenance schedules. See the Developer Guide for more information.

" + } + }, + "documentation":"

Specifies the Auto-Tune options: the Auto-Tune desired state for the domain, rollback state when disabling Auto-Tune options and list of maintenance schedules.

" + }, + "AutoTuneOptionsInput":{ + "type":"structure", + "members":{ + "DesiredState":{ + "shape":"AutoTuneDesiredState", + "documentation":"

Specifies the Auto-Tune desired state. Valid values are ENABLED, DISABLED.

" + }, + "MaintenanceSchedules":{ + "shape":"AutoTuneMaintenanceScheduleList", + "documentation":"

Specifies list of maitenance schedules. See the Developer Guide for more information.

" + } + }, + "documentation":"

Specifies the Auto-Tune options: the Auto-Tune desired state for the domain and list of maintenance schedules.

" + }, + "AutoTuneOptionsOutput":{ + "type":"structure", + "members":{ + "State":{ + "shape":"AutoTuneState", + "documentation":"

Specifies the AutoTuneState for the Elasticsearch domain.

" + }, + "ErrorMessage":{ + "shape":"String", + "documentation":"

Specifies the error message while enabling or disabling the Auto-Tune.

" + } + }, + "documentation":"

Specifies the Auto-Tune options: the Auto-Tune desired state for the domain and list of maintenance schedules.

" + }, + "AutoTuneOptionsStatus":{ + "type":"structure", + "members":{ + "Options":{ + "shape":"AutoTuneOptions", + "documentation":"

Specifies Auto-Tune options for the specified Elasticsearch domain.

" + }, + "Status":{ + "shape":"AutoTuneStatus", + "documentation":"

Specifies Status of the Auto-Tune options for the specified Elasticsearch domain.

" + } + }, + "documentation":"

Specifies the status of Auto-Tune options for the specified Elasticsearch domain.

" + }, + "AutoTuneState":{ + "type":"string", + "documentation":"

Specifies the Auto-Tune state for the Elasticsearch domain. For valid states see the Developer Guide.

", + "enum":[ + "ENABLED", + "DISABLED", + "ENABLE_IN_PROGRESS", + "DISABLE_IN_PROGRESS", + "DISABLED_AND_ROLLBACK_SCHEDULED", + "DISABLED_AND_ROLLBACK_IN_PROGRESS", + "DISABLED_AND_ROLLBACK_COMPLETE", + "DISABLED_AND_ROLLBACK_ERROR", + "ERROR" + ] + }, + "AutoTuneStatus":{ + "type":"structure", + "required":[ + "CreationDate", + "UpdateDate", + "State" + ], + "members":{ + "CreationDate":{ + "shape":"UpdateTimestamp", + "documentation":"

Timestamp which tells Auto-Tune options creation date .

" + }, + "UpdateDate":{ + "shape":"UpdateTimestamp", + "documentation":"

Timestamp which tells Auto-Tune options last updated time.

" + }, + "UpdateVersion":{ + "shape":"UIntValue", + "documentation":"

Specifies the Auto-Tune options latest version.

" + }, + "State":{ + "shape":"AutoTuneState", + "documentation":"

Specifies the AutoTuneState for the Elasticsearch domain.

" + }, + "ErrorMessage":{ + "shape":"String", + "documentation":"

Specifies the error message while enabling or disabling the Auto-Tune options.

" + }, + "PendingDeletion":{ + "shape":"Boolean", + "documentation":"

Indicates whether the Elasticsearch domain is being deleted.

" + } + }, + "documentation":"

Provides the current status of the Auto-Tune options.

" + }, + "AutoTuneType":{ + "type":"string", + "documentation":"

Specifies Auto-Tune type. Valid value is SCHEDULED_ACTION.

", + "enum":["SCHEDULED_ACTION"] + }, "BackendRole":{ "type":"string", "max":256, @@ -1019,6 +1210,14 @@ "AdvancedSecurityOptions":{ "shape":"AdvancedSecurityOptionsInput", "documentation":"

Specifies advanced security options.

" + }, + "AutoTuneOptions":{ + "shape":"AutoTuneOptionsInput", + "documentation":"

Specifies Auto-Tune options.

" + }, + "TagList":{ + "shape":"TagList", + "documentation":"

A list of Tag added during domain creation.

" } } }, @@ -1224,6 +1423,41 @@ "ELIGIBLE" ] }, + "DescribeDomainAutoTunesRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"DomainName", + "documentation":"

Specifies the domain name for which you want Auto-Tune action details.

", + "location":"uri", + "locationName":"DomainName" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

Set this value to limit the number of results returned. If not specified, defaults to 100.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

NextToken is sent in case the earlier API call results contain the NextToken. It is used for pagination.

" + } + }, + "documentation":"

Container for the parameters to the DescribeDomainAutoTunes operation.

" + }, + "DescribeDomainAutoTunesResponse":{ + "type":"structure", + "members":{ + "AutoTunes":{ + "shape":"AutoTuneList", + "documentation":"

Specifies the list of setting adjustments that Auto-Tune has made to the domain. See the Developer Guide for more information.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

Specifies an identifier to allow retrieval of paginated results.

" + } + }, + "documentation":"

The result of DescribeDomainAutoTunes request. See the Developer Guide for more information.

" + }, "DescribeElasticsearchDomainConfigRequest":{ "type":"structure", "required":["DomainName"], @@ -1718,6 +1952,26 @@ ] }, "Double":{"type":"double"}, + "Duration":{ + "type":"structure", + "members":{ + "Value":{ + "shape":"DurationValue", + "documentation":"

Integer to specify the value of a maintenance schedule duration. See the Developer Guide for more information.

" + }, + "Unit":{ + "shape":"TimeUnit", + "documentation":"

Specifies the unit of a maintenance schedule duration. Valid value is HOURS. See the Developer Guide for more information.

" + } + }, + "documentation":"

Specifies maintenance schedule duration: duration value and duration unit. See the Developer Guide for more information.

" + }, + "DurationValue":{ + "type":"long", + "documentation":"

Integer to specify the value of a maintenance schedule duration. See the Developer Guide for more information.

", + "max":24, + "min":1 + }, "EBSOptions":{ "type":"structure", "members":{ @@ -1946,6 +2200,10 @@ "AdvancedSecurityOptions":{ "shape":"AdvancedSecurityOptionsStatus", "documentation":"

Specifies AdvancedSecurityOptions for the domain.

" + }, + "AutoTuneOptions":{ + "shape":"AutoTuneOptionsStatus", + "documentation":"

Specifies AutoTuneOptions for the domain.

" } }, "documentation":"

The configuration of an Elasticsearch domain.

" @@ -2047,6 +2305,10 @@ "AdvancedSecurityOptions":{ "shape":"AdvancedSecurityOptions", "documentation":"

The current status of the Elasticsearch domain's advanced security options.

" + }, + "AutoTuneOptions":{ + "shape":"AutoTuneOptionsOutput", + "documentation":"

The current status of the Elasticsearch domain's Auto-Tune options.

" } }, "documentation":"

The current status of an Elasticsearch domain.

" @@ -3163,6 +3425,14 @@ "max":2048, "min":20 }, + "RollbackOnDisable":{ + "type":"string", + "documentation":"

Specifies the rollback state while disabling Auto-Tune for the domain. Valid values are NO_ROLLBACK, DEFAULT_ROLLBACK.

", + "enum":[ + "NO_ROLLBACK", + "DEFAULT_ROLLBACK" + ] + }, "S3BucketName":{ "type":"string", "max":63, @@ -3257,6 +3527,49 @@ }, "documentation":"

Describes the SAML application configured for the domain.

" }, + "ScheduledAutoTuneActionType":{ + "type":"string", + "documentation":"

Specifies Auto-Tune action type. Valid values are JVM_HEAP_SIZE_TUNING and JVM_YOUNG_GEN_TUNING.

", + "enum":[ + "JVM_HEAP_SIZE_TUNING", + "JVM_YOUNG_GEN_TUNING" + ] + }, + "ScheduledAutoTuneDescription":{ + "type":"string", + "documentation":"

Specifies Auto-Tune action description.

" + }, + "ScheduledAutoTuneDetails":{ + "type":"structure", + "members":{ + "Date":{ + "shape":"AutoTuneDate", + "documentation":"

Specifies timestamp for the Auto-Tune action scheduled for the domain.

" + }, + "ActionType":{ + "shape":"ScheduledAutoTuneActionType", + "documentation":"

Specifies Auto-Tune action type. Valid values are JVM_HEAP_SIZE_TUNING and JVM_YOUNG_GEN_TUNING.

" + }, + "Action":{ + "shape":"ScheduledAutoTuneDescription", + "documentation":"

Specifies Auto-Tune action description.

" + }, + "Severity":{ + "shape":"ScheduledAutoTuneSeverityType", + "documentation":"

Specifies Auto-Tune action severity. Valid values are LOW, MEDIUM and HIGH.

" + } + }, + "documentation":"

Specifies details of the scheduled Auto-Tune action. See the Developer Guide for more information.

" + }, + "ScheduledAutoTuneSeverityType":{ + "type":"string", + "documentation":"

Specifies Auto-Tune action severity. Valid values are LOW, MEDIUM and HIGH.

", + "enum":[ + "LOW", + "MEDIUM", + "HIGH" + ] + }, "ServiceSoftwareOptions":{ "type":"structure", "members":{ @@ -3327,6 +3640,7 @@ }, "documentation":"

Status of a daily automated snapshot.

" }, + "StartAt":{"type":"timestamp"}, "StartElasticsearchServiceSoftwareUpdateRequest":{ "type":"structure", "required":["DomainName"], @@ -3438,6 +3752,11 @@ "max":256, "min":0 }, + "TimeUnit":{ + "type":"string", + "documentation":"

Specifies the unit of a maintenance schedule duration. Valid value is HOUR. See the Developer Guide for more information.

", + "enum":["HOURS"] + }, "UIntValue":{ "type":"integer", "min":0 @@ -3499,6 +3818,10 @@ "EncryptionAtRestOptions":{ "shape":"EncryptionAtRestOptions", "documentation":"

Specifies the Encryption At Rest Options.

" + }, + "AutoTuneOptions":{ + "shape":"AutoTuneOptions", + "documentation":"

Specifies Auto-Tune options.

" } }, "documentation":"

Container for the parameters to the UpdateElasticsearchDomain operation. Specifies the type and number of instances in the domain cluster.

" diff --git a/botocore/data/events/2015-10-07/service-2.json b/botocore/data/events/2015-10-07/service-2.json index 46d473a7..c40a67ae 100644 --- a/botocore/data/events/2015-10-07/service-2.json +++ b/botocore/data/events/2015-10-07/service-2.json @@ -44,6 +44,22 @@ ], "documentation":"

Cancels the specified replay.

" }, + "CreateApiDestination":{ + "name":"CreateApiDestination", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateApiDestinationRequest"}, + "output":{"shape":"CreateApiDestinationResponse"}, + "errors":[ + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalException"} + ], + "documentation":"

Creates an API destination, which is an HTTP invocation endpoint configured as a target for events.

" + }, "CreateArchive":{ "name":"CreateArchive", "http":{ @@ -62,6 +78,21 @@ ], "documentation":"

Creates an archive of events with the specified settings. When you create an archive, incoming events might not immediately start being sent to the archive. Allow a short period of time for changes to take effect. If you do not specify a pattern to filter events sent to the archive, all events are sent to the archive except replayed events. Replayed events are not sent to an archive.

" }, + "CreateConnection":{ + "name":"CreateConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateConnectionRequest"}, + "output":{"shape":"CreateConnectionResponse"}, + "errors":[ + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalException"} + ], + "documentation":"

Creates a connection. A connection defines the authorization type and credentials to use for authorization with an API destination HTTP endpoint.

" + }, "CreateEventBus":{ "name":"CreateEventBus", "http":{ @@ -114,6 +145,36 @@ ], "documentation":"

You can use this operation to temporarily stop receiving events from the specified partner event source. The matching event bus is not deleted.

When you deactivate a partner event source, the source goes into PENDING state. If it remains in PENDING state for more than two weeks, it is deleted.

To activate a deactivated partner event source, use ActivateEventSource.

" }, + "DeauthorizeConnection":{ + "name":"DeauthorizeConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeauthorizeConnectionRequest"}, + "output":{"shape":"DeauthorizeConnectionResponse"}, + "errors":[ + {"shape":"ConcurrentModificationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalException"} + ], + "documentation":"

Removes all authorization parameters from the connection. This lets you remove the secret from the connection so you can reuse it without having to create a new connection.

" + }, + "DeleteApiDestination":{ + "name":"DeleteApiDestination", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteApiDestinationRequest"}, + "output":{"shape":"DeleteApiDestinationResponse"}, + "errors":[ + {"shape":"ConcurrentModificationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalException"} + ], + "documentation":"

Deletes the specified API destination.

" + }, "DeleteArchive":{ "name":"DeleteArchive", "http":{ @@ -129,6 +190,21 @@ ], "documentation":"

Deletes the specified archive.

" }, + "DeleteConnection":{ + "name":"DeleteConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteConnectionRequest"}, + "output":{"shape":"DeleteConnectionResponse"}, + "errors":[ + {"shape":"ConcurrentModificationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalException"} + ], + "documentation":"

Deletes a connection.

" + }, "DeleteEventBus":{ "name":"DeleteEventBus", "http":{ @@ -171,6 +247,20 @@ ], "documentation":"

Deletes the specified rule.

Before you can delete the rule, you must remove all targets, using RemoveTargets.

When you delete a rule, incoming events might continue to match to the deleted rule. Allow a short period of time for changes to take effect.

Managed rules are rules created and managed by another AWS service on your behalf. These rules are created by those other AWS services to support functionality in those services. You can delete these rules using the Force option, but you should do so only if you are sure the other service is not still using that rule.

" }, + "DescribeApiDestination":{ + "name":"DescribeApiDestination", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeApiDestinationRequest"}, + "output":{"shape":"DescribeApiDestinationResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalException"} + ], + "documentation":"

Retrieves details about an API destination.

" + }, "DescribeArchive":{ "name":"DescribeArchive", "http":{ @@ -186,6 +276,20 @@ ], "documentation":"

Retrieves details about an archive.

" }, + "DescribeConnection":{ + "name":"DescribeConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeConnectionRequest"}, + "output":{"shape":"DescribeConnectionResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalException"} + ], + "documentation":"

Retrieves details about a connection.

" + }, "DescribeEventBus":{ "name":"DescribeEventBus", "http":{ @@ -288,6 +392,19 @@ ], "documentation":"

Enables the specified rule. If the rule does not exist, the operation fails.

When you enable a rule, incoming events might not immediately start matching to a newly enabled rule. Allow a short period of time for changes to take effect.

" }, + "ListApiDestinations":{ + "name":"ListApiDestinations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListApiDestinationsRequest"}, + "output":{"shape":"ListApiDestinationsResponse"}, + "errors":[ + {"shape":"InternalException"} + ], + "documentation":"

Retrieves a list of API destination in the account in the current Region.

" + }, "ListArchives":{ "name":"ListArchives", "http":{ @@ -302,6 +419,19 @@ ], "documentation":"

Lists your archives. You can either list all the archives or you can provide a prefix to match to the archive names. Filter parameters are exclusive.

" }, + "ListConnections":{ + "name":"ListConnections", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListConnectionsRequest"}, + "output":{"shape":"ListConnectionsResponse"}, + "errors":[ + {"shape":"InternalException"} + ], + "documentation":"

Retrieves a list of connections from the account.

" + }, "ListEventBuses":{ "name":"ListEventBuses", "http":{ @@ -503,7 +633,7 @@ {"shape":"ManagedRuleException"}, {"shape":"InternalException"} ], - "documentation":"

Adds the specified targets to the specified rule, or updates the targets if they are already associated with the rule.

Targets are the resources that are invoked when a rule is triggered.

You can configure the following as targets for Events:

Creating rules with built-in targets is supported only in the AWS Management Console. The built-in targets are EC2 CreateSnapshot API call, EC2 RebootInstances API call, EC2 StopInstances API call, and EC2 TerminateInstances API call.

For some target types, PutTargets provides target-specific parameters. If the target is a Kinesis data stream, you can optionally specify which shard the event goes to by using the KinesisParameters argument. To invoke a command on multiple EC2 instances with one rule, you can use the RunCommandParameters field.

To be able to make API calls against the resources that you own, Amazon EventBridge (CloudWatch Events) needs the appropriate permissions. For AWS Lambda and Amazon SNS resources, EventBridge relies on resource-based policies. For EC2 instances, Kinesis data streams, AWS Step Functions state machines and API Gateway REST APIs, EventBridge relies on IAM roles that you specify in the RoleARN argument in PutTargets. For more information, see Authentication and Access Control in the Amazon EventBridge User Guide.

If another AWS account is in the same region and has granted you permission (using PutPermission), you can send events to that account. Set that account's event bus as a target of the rules in your account. To send the matched events to the other account, specify that account's event bus as the Arn value when you run PutTargets. If your account sends events to another account, your account is charged for each sent event. Each event sent to another account is charged as a custom event. The account receiving the event is not charged. For more information, see Amazon EventBridge (CloudWatch Events) Pricing.

Input, InputPath, and InputTransformer are not available with PutTarget if the target is an event bus of a different AWS account.

If you are setting the event bus of another account as the target, and that account granted permission to your account through an organization instead of directly by the account ID, then you must specify a RoleArn with proper permissions in the Target structure. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon EventBridge User Guide.

For more information about enabling cross-account events, see PutPermission.

Input, InputPath, and InputTransformer are mutually exclusive and optional parameters of a target. When a rule is triggered due to a matched event:

When you specify InputPath or InputTransformer, you must use JSON dot notation, not bracket notation.

When you add targets to a rule and the associated rule triggers soon after, new or updated targets might not be immediately invoked. Allow a short period of time for changes to take effect.

This action can partially fail if too many requests are made at the same time. If that happens, FailedEntryCount is non-zero in the response and each entry in FailedEntries provides the ID of the failed target and the error code.

" + "documentation":"

Adds the specified targets to the specified rule, or updates the targets if they are already associated with the rule.

Targets are the resources that are invoked when a rule is triggered.

You can configure the following as targets for Events:

Creating rules with built-in targets is supported only in the AWS Management Console. The built-in targets are EC2 CreateSnapshot API call, EC2 RebootInstances API call, EC2 StopInstances API call, and EC2 TerminateInstances API call.

For some target types, PutTargets provides target-specific parameters. If the target is a Kinesis data stream, you can optionally specify which shard the event goes to by using the KinesisParameters argument. To invoke a command on multiple EC2 instances with one rule, you can use the RunCommandParameters field.

To be able to make API calls against the resources that you own, Amazon EventBridge (CloudWatch Events) needs the appropriate permissions. For AWS Lambda and Amazon SNS resources, EventBridge relies on resource-based policies. For EC2 instances, Kinesis data streams, AWS Step Functions state machines and API Gateway REST APIs, EventBridge relies on IAM roles that you specify in the RoleARN argument in PutTargets. For more information, see Authentication and Access Control in the Amazon EventBridge User Guide.

If another AWS account is in the same region and has granted you permission (using PutPermission), you can send events to that account. Set that account's event bus as a target of the rules in your account. To send the matched events to the other account, specify that account's event bus as the Arn value when you run PutTargets. If your account sends events to another account, your account is charged for each sent event. Each event sent to another account is charged as a custom event. The account receiving the event is not charged. For more information, see Amazon EventBridge (CloudWatch Events) Pricing.

Input, InputPath, and InputTransformer are not available with PutTarget if the target is an event bus of a different AWS account.

If you are setting the event bus of another account as the target, and that account granted permission to your account through an organization instead of directly by the account ID, then you must specify a RoleArn with proper permissions in the Target structure. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon EventBridge User Guide.

For more information about enabling cross-account events, see PutPermission.

Input, InputPath, and InputTransformer are mutually exclusive and optional parameters of a target. When a rule is triggered due to a matched event:

When you specify InputPath or InputTransformer, you must use JSON dot notation, not bracket notation.

When you add targets to a rule and the associated rule triggers soon after, new or updated targets might not be immediately invoked. Allow a short period of time for changes to take effect.

This action can partially fail if too many requests are made at the same time. If that happens, FailedEntryCount is non-zero in the response and each entry in FailedEntries provides the ID of the failed target and the error code.

" }, "RemovePermission":{ "name":"RemovePermission", @@ -599,6 +729,22 @@ ], "documentation":"

Removes one or more tags from the specified EventBridge resource. In Amazon EventBridge (CloudWatch Events, rules and event buses can be tagged.

" }, + "UpdateApiDestination":{ + "name":"UpdateApiDestination", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateApiDestinationRequest"}, + "output":{"shape":"UpdateApiDestinationResponse"}, + "errors":[ + {"shape":"ConcurrentModificationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Updates an API destination.

" + }, "UpdateArchive":{ "name":"UpdateArchive", "http":{ @@ -615,6 +761,22 @@ {"shape":"InvalidEventPatternException"} ], "documentation":"

Updates the specified archive.

" + }, + "UpdateConnection":{ + "name":"UpdateConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateConnectionRequest"}, + "output":{"shape":"UpdateConnectionResponse"}, + "errors":[ + {"shape":"ConcurrentModificationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Updates settings for a connection.

" } }, "shapes":{ @@ -640,6 +802,92 @@ } } }, + "ApiDestination":{ + "type":"structure", + "members":{ + "ApiDestinationArn":{ + "shape":"ApiDestinationArn", + "documentation":"

The ARN of the API destination.

" + }, + "Name":{ + "shape":"ApiDestinationName", + "documentation":"

The name of the API destination.

" + }, + "ApiDestinationState":{ + "shape":"ApiDestinationState", + "documentation":"

The state of the API destination.

" + }, + "ConnectionArn":{ + "shape":"ConnectionArn", + "documentation":"

The ARN of the connection specified for the API destination.

" + }, + "InvocationEndpoint":{ + "shape":"HttpsEndpoint", + "documentation":"

The URL to the endpoint for the API destination.

" + }, + "HttpMethod":{ + "shape":"ApiDestinationHttpMethod", + "documentation":"

The method to use to connect to the HTTP endpoint.

" + }, + "InvocationRateLimitPerSecond":{ + "shape":"ApiDestinationInvocationRateLimitPerSecond", + "documentation":"

The maximum number of invocations per second to send to the HTTP endpoint.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time that the API destination was created.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time that the API destination was last modified.

" + } + }, + "documentation":"

Contains details about an API destination.

" + }, + "ApiDestinationArn":{ + "type":"string", + "max":1600, + "min":1, + "pattern":"^arn:aws([a-z]|\\-)*:events:([a-z]|\\d|\\-)*:([0-9]{12})?:api-destination\\/[\\.\\-_A-Za-z0-9]+\\/[\\-A-Za-z0-9]+$" + }, + "ApiDestinationDescription":{ + "type":"string", + "max":512, + "pattern":".*" + }, + "ApiDestinationHttpMethod":{ + "type":"string", + "enum":[ + "POST", + "GET", + "HEAD", + "OPTIONS", + "PUT", + "PATCH", + "DELETE" + ] + }, + "ApiDestinationInvocationRateLimitPerSecond":{ + "type":"integer", + "min":1 + }, + "ApiDestinationName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[\\.\\-_A-Za-z0-9]+" + }, + "ApiDestinationResponseList":{ + "type":"list", + "member":{"shape":"ApiDestination"} + }, + "ApiDestinationState":{ + "type":"string", + "enum":[ + "ACTIVE", + "INACTIVE" + ] + }, "Archive":{ "type":"structure", "members":{ @@ -727,6 +975,12 @@ "DISABLED" ] }, + "AuthHeaderParameters":{ + "type":"string", + "max":512, + "min":1, + "pattern":"^[ \\t]*[^\\x00-\\x1F:\\x7F]+([ \\t]+[^\\x00-\\x1F:\\x7F]+)*[ \\t]*$" + }, "AwsVpcConfiguration":{ "type":"structure", "required":["Subnets"], @@ -850,6 +1104,318 @@ }, "documentation":"

A JSON string which you can use to limit the event bus permissions you are granting to only accounts that fulfill the condition. Currently, the only supported condition is membership in a certain AWS organization. The string must contain Type, Key, and Value fields. The Value field specifies the ID of the AWS organization. Following is an example value for Condition:

'{\"Type\" : \"StringEquals\", \"Key\": \"aws:PrincipalOrgID\", \"Value\": \"o-1234567890\"}'

" }, + "Connection":{ + "type":"structure", + "members":{ + "ConnectionArn":{ + "shape":"ConnectionArn", + "documentation":"

The ARN of the connection.

" + }, + "Name":{ + "shape":"ConnectionName", + "documentation":"

The name of the connection.

" + }, + "ConnectionState":{ + "shape":"ConnectionState", + "documentation":"

The state of the connection.

" + }, + "StateReason":{ + "shape":"ConnectionStateReason", + "documentation":"

The reason that the connection is in the connection state.

" + }, + "AuthorizationType":{ + "shape":"ConnectionAuthorizationType", + "documentation":"

The authorization type specified for the connection.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time that the connection was created.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time that the connection was last modified.

" + }, + "LastAuthorizedTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time that the connection was last authorized.

" + } + }, + "documentation":"

Contains information about a connection.

" + }, + "ConnectionApiKeyAuthResponseParameters":{ + "type":"structure", + "members":{ + "ApiKeyName":{ + "shape":"AuthHeaderParameters", + "documentation":"

The name of the header to use for the APIKeyValue used for authorization.

" + } + }, + "documentation":"

Contains the authorization parameters for the connection if API Key is specified as the authorization type.

" + }, + "ConnectionArn":{ + "type":"string", + "max":1600, + "min":1, + "pattern":"^arn:aws([a-z]|\\-)*:events:([a-z]|\\d|\\-)*:([0-9]{12})?:connection\\/[\\.\\-_A-Za-z0-9]+\\/[\\-A-Za-z0-9]+$" + }, + "ConnectionAuthResponseParameters":{ + "type":"structure", + "members":{ + "BasicAuthParameters":{ + "shape":"ConnectionBasicAuthResponseParameters", + "documentation":"

The authorization parameters for Basic authorization.

" + }, + "OAuthParameters":{ + "shape":"ConnectionOAuthResponseParameters", + "documentation":"

The OAuth parameters to use for authorization.

" + }, + "ApiKeyAuthParameters":{ + "shape":"ConnectionApiKeyAuthResponseParameters", + "documentation":"

The API Key parameters to use for authorization.

" + }, + "InvocationHttpParameters":{ + "shape":"ConnectionHttpParameters", + "documentation":"

Additional parameters for the connection that are passed through with every invocation to the HTTP endpoint.

" + } + }, + "documentation":"

Contains the authorization parameters to use for the connection.

" + }, + "ConnectionAuthorizationType":{ + "type":"string", + "enum":[ + "BASIC", + "OAUTH_CLIENT_CREDENTIALS", + "API_KEY" + ] + }, + "ConnectionBasicAuthResponseParameters":{ + "type":"structure", + "members":{ + "Username":{ + "shape":"AuthHeaderParameters", + "documentation":"

The user name to use for Basic authorization.

" + } + }, + "documentation":"

Contains the authorization parameters for the connection if Basic is specified as the authorization type.

" + }, + "ConnectionBodyParameter":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"String", + "documentation":"

The key for the parameter.

" + }, + "Value":{ + "shape":"String", + "documentation":"

The value associated with the key.

" + }, + "IsValueSecret":{ + "shape":"Boolean", + "documentation":"

Specified whether the value is secret.

" + } + }, + "documentation":"

Additional parameter included in the body. You can include up to 100 additional body parameters per request. An event payload cannot exceed 64 KB.

" + }, + "ConnectionBodyParametersList":{ + "type":"list", + "member":{"shape":"ConnectionBodyParameter"}, + "max":100, + "min":0 + }, + "ConnectionDescription":{ + "type":"string", + "max":512, + "pattern":".*" + }, + "ConnectionHeaderParameter":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"HeaderKey", + "documentation":"

The key for the parameter.

" + }, + "Value":{ + "shape":"HeaderValue", + "documentation":"

The value associated with the key.

" + }, + "IsValueSecret":{ + "shape":"Boolean", + "documentation":"

Specified whether the value is a secret.

" + } + }, + "documentation":"

Additional parameter included in the header. You can include up to 100 additional header parameters per request. An event payload cannot exceed 64 KB.

" + }, + "ConnectionHeaderParametersList":{ + "type":"list", + "member":{"shape":"ConnectionHeaderParameter"}, + "max":100, + "min":0 + }, + "ConnectionHttpParameters":{ + "type":"structure", + "members":{ + "HeaderParameters":{ + "shape":"ConnectionHeaderParametersList", + "documentation":"

Contains additional header parameters for the connection.

" + }, + "QueryStringParameters":{ + "shape":"ConnectionQueryStringParametersList", + "documentation":"

Contains additional query string parameters for the connection.

" + }, + "BodyParameters":{ + "shape":"ConnectionBodyParametersList", + "documentation":"

Contains additional body string parameters for the connection.

" + } + }, + "documentation":"

Contains additional parameters for the connection.

" + }, + "ConnectionName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[\\.\\-_A-Za-z0-9]+" + }, + "ConnectionOAuthClientResponseParameters":{ + "type":"structure", + "members":{ + "ClientID":{ + "shape":"AuthHeaderParameters", + "documentation":"

The client ID associated with the response to the connection request.

" + } + }, + "documentation":"

Contains the client response parameters for the connection when OAuth is specified as the authorization type.

" + }, + "ConnectionOAuthHttpMethod":{ + "type":"string", + "enum":[ + "GET", + "POST", + "PUT" + ] + }, + "ConnectionOAuthResponseParameters":{ + "type":"structure", + "members":{ + "ClientParameters":{ + "shape":"ConnectionOAuthClientResponseParameters", + "documentation":"

A ConnectionOAuthClientResponseParameters object that contains details about the client parameters returned when OAuth is specified as the authorization type.

" + }, + "AuthorizationEndpoint":{ + "shape":"HttpsEndpoint", + "documentation":"

The URL to the HTTP endpoint that authorized the request.

" + }, + "HttpMethod":{ + "shape":"ConnectionOAuthHttpMethod", + "documentation":"

The method used to connect to the HTTP endpoint.

" + }, + "OAuthHttpParameters":{ + "shape":"ConnectionHttpParameters", + "documentation":"

The additional HTTP parameters used for the OAuth authorization request.

" + } + }, + "documentation":"

Contains the response parameters when OAuth is specified as the authorization type.

" + }, + "ConnectionQueryStringParameter":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"QueryStringKey", + "documentation":"

The key for a query string parameter.

" + }, + "Value":{ + "shape":"QueryStringValue", + "documentation":"

The value associated with the key for the query string parameter.

" + }, + "IsValueSecret":{ + "shape":"Boolean", + "documentation":"

Specifies whether the value is secret.

" + } + }, + "documentation":"

Additional query string parameter for the connection. You can include up to 100 additional query string parameters per request. Each additional parameter counts towards the event payload size, which cannot exceed 64 KB.

" + }, + "ConnectionQueryStringParametersList":{ + "type":"list", + "member":{"shape":"ConnectionQueryStringParameter"}, + "max":100, + "min":0 + }, + "ConnectionResponseList":{ + "type":"list", + "member":{"shape":"Connection"} + }, + "ConnectionState":{ + "type":"string", + "enum":[ + "CREATING", + "UPDATING", + "DELETING", + "AUTHORIZED", + "DEAUTHORIZED", + "AUTHORIZING", + "DEAUTHORIZING" + ] + }, + "ConnectionStateReason":{ + "type":"string", + "max":512, + "pattern":".*" + }, + "CreateApiDestinationRequest":{ + "type":"structure", + "required":[ + "Name", + "ConnectionArn", + "InvocationEndpoint", + "HttpMethod" + ], + "members":{ + "Name":{ + "shape":"ApiDestinationName", + "documentation":"

The name for the API destination to create.

" + }, + "Description":{ + "shape":"ApiDestinationDescription", + "documentation":"

A description for the API destination to create.

" + }, + "ConnectionArn":{ + "shape":"ConnectionArn", + "documentation":"

The ARN of the connection to use for the API destination. The destination endpoint must support the authorization type specified for the connection.

" + }, + "InvocationEndpoint":{ + "shape":"HttpsEndpoint", + "documentation":"

The URL to the HTTP invocation endpoint for the API destination.

" + }, + "HttpMethod":{ + "shape":"ApiDestinationHttpMethod", + "documentation":"

The method to use for the request to the HTTP invocation endpoint.

" + }, + "InvocationRateLimitPerSecond":{ + "shape":"ApiDestinationInvocationRateLimitPerSecond", + "documentation":"

The maximum number of requests per second to send to the HTTP invocation endpoint.

" + } + } + }, + "CreateApiDestinationResponse":{ + "type":"structure", + "members":{ + "ApiDestinationArn":{ + "shape":"ApiDestinationArn", + "documentation":"

The ARN of the API destination that was created by the request.

" + }, + "ApiDestinationState":{ + "shape":"ApiDestinationState", + "documentation":"

The state of the API destination that was created by the request.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp indicating the time that the API destination was created.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp indicating the time that the API destination was last modified.

" + } + } + }, "CreateArchiveRequest":{ "type":"structure", "required":[ @@ -900,6 +1466,156 @@ } } }, + "CreateConnectionApiKeyAuthRequestParameters":{ + "type":"structure", + "required":[ + "ApiKeyName", + "ApiKeyValue" + ], + "members":{ + "ApiKeyName":{ + "shape":"AuthHeaderParameters", + "documentation":"

The name of the API key to use for authorization.

" + }, + "ApiKeyValue":{ + "shape":"AuthHeaderParameters", + "documentation":"

The value for the API key to use for authorization.

" + } + }, + "documentation":"

Contains the API key authorization parameters for the connection.

" + }, + "CreateConnectionAuthRequestParameters":{ + "type":"structure", + "members":{ + "BasicAuthParameters":{ + "shape":"CreateConnectionBasicAuthRequestParameters", + "documentation":"

A CreateConnectionBasicAuthRequestParameters object that contains the Basic authorization parameters to use for the connection.

" + }, + "OAuthParameters":{ + "shape":"CreateConnectionOAuthRequestParameters", + "documentation":"

A CreateConnectionOAuthRequestParameters object that contains the OAuth authorization parameters to use for the connection.

" + }, + "ApiKeyAuthParameters":{ + "shape":"CreateConnectionApiKeyAuthRequestParameters", + "documentation":"

A CreateConnectionApiKeyAuthRequestParameters object that contains the API key authorization parameters to use for the connection.

" + }, + "InvocationHttpParameters":{ + "shape":"ConnectionHttpParameters", + "documentation":"

A ConnectionHttpParameters object that contains the API key authorization parameters to use for the connection. Note that if you include additional parameters for the target of a rule via HttpParameters, including query strings, the parameters added for the connection take precedence.

" + } + }, + "documentation":"

Contains the authorization parameters for the connection.

" + }, + "CreateConnectionBasicAuthRequestParameters":{ + "type":"structure", + "required":[ + "Username", + "Password" + ], + "members":{ + "Username":{ + "shape":"AuthHeaderParameters", + "documentation":"

The user name to use for Basic authorization.

" + }, + "Password":{ + "shape":"AuthHeaderParameters", + "documentation":"

The password associated with the user name to use for Basic authorization.

" + } + }, + "documentation":"

Contains the Basic authorization parameters to use for the connection.

" + }, + "CreateConnectionOAuthClientRequestParameters":{ + "type":"structure", + "required":[ + "ClientID", + "ClientSecret" + ], + "members":{ + "ClientID":{ + "shape":"AuthHeaderParameters", + "documentation":"

The client ID to use for OAuth authorization for the connection.

" + }, + "ClientSecret":{ + "shape":"AuthHeaderParameters", + "documentation":"

The client secret associated with the client ID to use for OAuth authorization for the connection.

" + } + }, + "documentation":"

Contains the Basic authorization parameters to use for the connection.

" + }, + "CreateConnectionOAuthRequestParameters":{ + "type":"structure", + "required":[ + "ClientParameters", + "AuthorizationEndpoint", + "HttpMethod" + ], + "members":{ + "ClientParameters":{ + "shape":"CreateConnectionOAuthClientRequestParameters", + "documentation":"

A CreateConnectionOAuthClientRequestParameters object that contains the client parameters for OAuth authorization.

" + }, + "AuthorizationEndpoint":{ + "shape":"HttpsEndpoint", + "documentation":"

The URL to the authorization endpoint when OAuth is specified as the authorization type.

" + }, + "HttpMethod":{ + "shape":"ConnectionOAuthHttpMethod", + "documentation":"

The method to use for the authorization request.

" + }, + "OAuthHttpParameters":{ + "shape":"ConnectionHttpParameters", + "documentation":"

A ConnectionHttpParameters object that contains details about the additional parameters to use for the connection.

" + } + }, + "documentation":"

Contains the OAuth authorization parameters to use for the connection.

" + }, + "CreateConnectionRequest":{ + "type":"structure", + "required":[ + "Name", + "AuthorizationType", + "AuthParameters" + ], + "members":{ + "Name":{ + "shape":"ConnectionName", + "documentation":"

The name for the connection to create.

" + }, + "Description":{ + "shape":"ConnectionDescription", + "documentation":"

A description for the connection to create.

" + }, + "AuthorizationType":{ + "shape":"ConnectionAuthorizationType", + "documentation":"

The type of authorization to use for the connection.

" + }, + "AuthParameters":{ + "shape":"CreateConnectionAuthRequestParameters", + "documentation":"

A CreateConnectionAuthRequestParameters object that contains the authorization parameters to use to authorize with the endpoint.

" + } + } + }, + "CreateConnectionResponse":{ + "type":"structure", + "members":{ + "ConnectionArn":{ + "shape":"ConnectionArn", + "documentation":"

The ARN of the connection that was created by the request.

" + }, + "ConnectionState":{ + "shape":"ConnectionState", + "documentation":"

The state of the connection that was created by the request.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time that the connection was created.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time that the connection was last updated.

" + } + } + }, "CreateEventBusRequest":{ "type":"structure", "required":["Name"], @@ -961,14 +1677,12 @@ "Database":{ "type":"string", "max":64, - "min":1, - "pattern":"([a-zA-Z0-9]+)|(\\$(\\.[\\w_-]+(\\[(\\d+|\\*)\\])*)*)" + "min":1 }, "DbUser":{ "type":"string", "max":128, - "min":1, - "pattern":"([a-zA-Z0-9]+)|(\\$(\\.[\\w_-]+(\\[(\\d+|\\*)\\])*)*)" + "min":1 }, "DeactivateEventSourceRequest":{ "type":"structure", @@ -990,6 +1704,56 @@ }, "documentation":"

A DeadLetterConfig object that contains information about a dead-letter queue configuration.

" }, + "DeauthorizeConnectionRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"ConnectionName", + "documentation":"

The name of the connection to remove authorization from.

" + } + } + }, + "DeauthorizeConnectionResponse":{ + "type":"structure", + "members":{ + "ConnectionArn":{ + "shape":"ConnectionArn", + "documentation":"

The ARN of the connection that authorization was removed from.

" + }, + "ConnectionState":{ + "shape":"ConnectionState", + "documentation":"

The state of the connection.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time that the connection was created.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time that the connection was last updated.

" + }, + "LastAuthorizedTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time that the connection was last authorized.

" + } + } + }, + "DeleteApiDestinationRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"ApiDestinationName", + "documentation":"

The name of the destination to delete.

" + } + } + }, + "DeleteApiDestinationResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteArchiveRequest":{ "type":"structure", "required":["ArchiveName"], @@ -1005,6 +1769,41 @@ "members":{ } }, + "DeleteConnectionRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"ConnectionName", + "documentation":"

The name of the connection to delete.

" + } + } + }, + "DeleteConnectionResponse":{ + "type":"structure", + "members":{ + "ConnectionArn":{ + "shape":"ConnectionArn", + "documentation":"

The ARN of the connection that was deleted.

" + }, + "ConnectionState":{ + "shape":"ConnectionState", + "documentation":"

The state of the connection before it was deleted.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time that the connection was created.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time that the connection was last modified before it was deleted.

" + }, + "LastAuthorizedTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time that the connection was last authorized before it wa deleted.

" + } + } + }, "DeleteEventBusRequest":{ "type":"structure", "required":["Name"], @@ -1050,6 +1849,61 @@ } } }, + "DescribeApiDestinationRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"ApiDestinationName", + "documentation":"

The name of the API destination to retrieve.

" + } + } + }, + "DescribeApiDestinationResponse":{ + "type":"structure", + "members":{ + "ApiDestinationArn":{ + "shape":"ApiDestinationArn", + "documentation":"

The ARN of the API destination retrieved.

" + }, + "Name":{ + "shape":"ApiDestinationName", + "documentation":"

The name of the API destination retrieved.

" + }, + "Description":{ + "shape":"ApiDestinationDescription", + "documentation":"

The description for the API destination retrieved.

" + }, + "ApiDestinationState":{ + "shape":"ApiDestinationState", + "documentation":"

The state of the API destination retrieved.

" + }, + "ConnectionArn":{ + "shape":"ConnectionArn", + "documentation":"

The ARN of the connection specified for the API destination retrieved.

" + }, + "InvocationEndpoint":{ + "shape":"HttpsEndpoint", + "documentation":"

The URL to use to connect to the HTTP endpoint.

" + }, + "HttpMethod":{ + "shape":"ApiDestinationHttpMethod", + "documentation":"

The method to use to connect to the HTTP endpoint.

" + }, + "InvocationRateLimitPerSecond":{ + "shape":"ApiDestinationInvocationRateLimitPerSecond", + "documentation":"

The maximum number of invocations per second to specified for the API destination. Note that if you set the invocation rate maximum to a value lower the rate necessary to send all events received on to the destination HTTP endpoint, some events may not be delivered within the 24-hour retry window. If you plan to set the rate lower than the rate necessary to deliver all events, consider using a dead-letter queue to catch events that are not delivered within 24 hours.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time that the API destination was created.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time that the API destination was last modified.

" + } + } + }, "DescribeArchiveRequest":{ "type":"structure", "required":["ArchiveName"], @@ -1109,6 +1963,65 @@ } } }, + "DescribeConnectionRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"ConnectionName", + "documentation":"

The name of the connection to retrieve.

" + } + } + }, + "DescribeConnectionResponse":{ + "type":"structure", + "members":{ + "ConnectionArn":{ + "shape":"ConnectionArn", + "documentation":"

The ARN of the connection retrieved.

" + }, + "Name":{ + "shape":"ConnectionName", + "documentation":"

The name of the connection retrieved.

" + }, + "Description":{ + "shape":"ConnectionDescription", + "documentation":"

The description for the connection retrieved.

" + }, + "ConnectionState":{ + "shape":"ConnectionState", + "documentation":"

The state of the connection retrieved.

" + }, + "StateReason":{ + "shape":"ConnectionStateReason", + "documentation":"

The reason that the connection is in the current connection state.

" + }, + "AuthorizationType":{ + "shape":"ConnectionAuthorizationType", + "documentation":"

The type of authorization specified for the connection.

" + }, + "SecretArn":{ + "shape":"SecretsManagerSecretArn", + "documentation":"

The ARN of the secret created from the authorization parameters specified for the connection.

" + }, + "AuthParameters":{ + "shape":"ConnectionAuthResponseParameters", + "documentation":"

The parameters to use for authorization for the connection.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time that the connection was created.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time that the connection was last modified.

" + }, + "LastAuthorizedTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time that the connection was last authorized.

" + } + } + }, "DescribeEventBusRequest":{ "type":"structure", "members":{ @@ -1496,18 +2409,24 @@ "members":{ "PathParameterValues":{ "shape":"PathParameterList", - "documentation":"

The path parameter values to be used to populate API Gateway REST API path wildcards (\"*\").

" + "documentation":"

The path parameter values to be used to populate API Gateway REST API or EventBridge ApiDestination path wildcards (\"*\").

" }, "HeaderParameters":{ "shape":"HeaderParametersMap", - "documentation":"

The headers that need to be sent as part of request invoking the API Gateway REST API.

" + "documentation":"

The headers that need to be sent as part of request invoking the API Gateway REST API or EventBridge ApiDestination.

" }, "QueryStringParameters":{ "shape":"QueryStringParametersMap", - "documentation":"

The query string keys/values that need to be sent as part of request invoking the API Gateway REST API.

" + "documentation":"

The query string keys/values that need to be sent as part of request invoking the API Gateway REST API or EventBridge ApiDestination.

" } }, - "documentation":"

These are custom parameter to be used when the target is an API Gateway REST APIs.

" + "documentation":"

These are custom parameter to be used when the target is an API Gateway REST APIs or EventBridge ApiDestinations. In the latter case, these are merged with any InvocationParameters specified on the Connection, with any values from the Connection taking precedence.

" + }, + "HttpsEndpoint":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^((%[0-9A-Fa-f]{2}|[-()_.!~*';/?:@\\x26=+$,A-Za-z0-9])+)([).!';/?:,])?$" }, "IllegalStatusException":{ "type":"structure", @@ -1522,11 +2441,11 @@ "members":{ "InputPathsMap":{ "shape":"TransformerPaths", - "documentation":"

Map of JSON paths to be extracted from the event. You can then insert these in the template in InputTemplate to produce the output you want to be sent to the target.

InputPathsMap is an array key-value pairs, where each value is a valid JSON path. You can have as many as 10 key-value pairs. You must use JSON dot notation, not bracket notation.

The keys cannot start with \"AWS.\"

" + "documentation":"

Map of JSON paths to be extracted from the event. You can then insert these in the template in InputTemplate to produce the output you want to be sent to the target.

InputPathsMap is an array key-value pairs, where each value is a valid JSON path. You can have as many as 100 key-value pairs. You must use JSON dot notation, not bracket notation.

The keys cannot start with \"AWS.\"

" }, "InputTemplate":{ "shape":"TransformerInput", - "documentation":"

Input template where you specify placeholders that will be filled with the values of the keys from InputPathsMap to customize the data sent to the target. Enclose each InputPathsMaps value in brackets: <value> The InputTemplate must be valid JSON.

If InputTemplate is a JSON object (surrounded by curly braces), the following restrictions apply:

The following example shows the syntax for using InputPathsMap and InputTemplate.

\"InputTransformer\":

{

\"InputPathsMap\": {\"instance\": \"$.detail.instance\",\"status\": \"$.detail.status\"},

\"InputTemplate\": \"<instance> is in state <status>\"

}

To have the InputTemplate include quote marks within a JSON string, escape each quote marks with a slash, as in the following example:

\"InputTransformer\":

{

\"InputPathsMap\": {\"instance\": \"$.detail.instance\",\"status\": \"$.detail.status\"},

\"InputTemplate\": \"<instance> is in state \\\"<status>\\\"\"

}

" + "documentation":"

Input template where you specify placeholders that will be filled with the values of the keys from InputPathsMap to customize the data sent to the target. Enclose each InputPathsMaps value in brackets: <value> The InputTemplate must be valid JSON.

If InputTemplate is a JSON object (surrounded by curly braces), the following restrictions apply:

The following example shows the syntax for using InputPathsMap and InputTemplate.

\"InputTransformer\":

{

\"InputPathsMap\": {\"instance\": \"$.detail.instance\",\"status\": \"$.detail.status\"},

\"InputTemplate\": \"<instance> is in state <status>\"

}

To have the InputTemplate include quote marks within a JSON string, escape each quote marks with a slash, as in the following example:

\"InputTransformer\":

{

\"InputPathsMap\": {\"instance\": \"$.detail.instance\",\"status\": \"$.detail.status\"},

\"InputTemplate\": \"<instance> is in state \\\"<status>\\\"\"

}

The InputTemplate can also be valid JSON with varibles in quotes or out, as in the following example:

\"InputTransformer\":

{

\"InputPathsMap\": {\"instance\": \"$.detail.instance\",\"status\": \"$.detail.status\"},

\"InputTemplate\": '{\"myInstance\": <instance>,\"myStatus\": \"<instance> is in state \\\"<status>\\\"\"}'

}

" } }, "documentation":"

Contains the parameters needed for you to provide custom input to a target based on one or more pieces of data extracted from the event.

" @@ -1594,6 +2513,40 @@ "type":"integer", "min":1 }, + "ListApiDestinationsRequest":{ + "type":"structure", + "members":{ + "NamePrefix":{ + "shape":"ApiDestinationName", + "documentation":"

A name prefix to filter results returned. Only API destinations with a name that starts with the prefix are returned.

" + }, + "ConnectionArn":{ + "shape":"ConnectionArn", + "documentation":"

The ARN of the connection specified for the API destination.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token returned by a previous call to retrieve the next set of results.

" + }, + "Limit":{ + "shape":"LimitMax100", + "documentation":"

The maximum number of API destinations to include in the response.

" + } + } + }, + "ListApiDestinationsResponse":{ + "type":"structure", + "members":{ + "ApiDestinations":{ + "shape":"ApiDestinationResponseList", + "documentation":"

An array of ApiDestination objects that include information about an API destination.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token you can use in a subsequent request to retrieve the next set of results.

" + } + } + }, "ListArchivesRequest":{ "type":"structure", "members":{ @@ -1632,6 +2585,40 @@ } } }, + "ListConnectionsRequest":{ + "type":"structure", + "members":{ + "NamePrefix":{ + "shape":"ConnectionName", + "documentation":"

A name prefix to filter results returned. Only connections with a name that starts with the prefix are returned.

" + }, + "ConnectionState":{ + "shape":"ConnectionState", + "documentation":"

The state of the connection.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token returned by a previous call to retrieve the next set of results.

" + }, + "Limit":{ + "shape":"LimitMax100", + "documentation":"

The maximum number of connections to return.

" + } + } + }, + "ListConnectionsResponse":{ + "type":"structure", + "members":{ + "Connections":{ + "shape":"ConnectionResponseList", + "documentation":"

An array of connections objects that include details about the connections.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token you can use in a subsequent request to retrieve the next set of results.

" + } + } + }, "ListEventBusesRequest":{ "type":"structure", "members":{ @@ -2080,6 +3067,10 @@ "EventBusName":{ "shape":"NonPartnerEventBusNameOrArn", "documentation":"

The name or ARN of the event bus to receive the event. Only the rules that are associated with this event bus are used to match the event. If you omit this, the default event bus is used.

" + }, + "TraceHeader":{ + "shape":"TraceHeader", + "documentation":"

An AWS X-Ray trade header, which is an http header (X-Amzn-Trace-Id) that contains the trace-id associated with the event.

To learn more about X-Ray trace headers, see Tracing header in the AWS X-Ray Developer Guide.

" } }, "documentation":"

Represents an event to be submitted.

" @@ -2734,6 +3725,12 @@ "type":"string", "max":256 }, + "SecretsManagerSecretArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"^arn:aws([a-z]|\\-)*:secretsmanager:([a-z]|\\d|\\-)*:([0-9]{12})?:secret:[\\/_+=\\.@\\-A-Za-z0-9]+$" + }, "Sql":{ "type":"string", "max":100000, @@ -2933,7 +3930,7 @@ }, "HttpParameters":{ "shape":"HttpParameters", - "documentation":"

Contains the HTTP parameters to use when the target is a API Gateway REST endpoint.

If you specify an API Gateway REST API as a target, you can use this parameter to specify headers, path parameter, query string keys/values as part of your target invoking request.

" + "documentation":"

Contains the HTTP parameters to use when the target is a API Gateway REST endpoint or EventBridge ApiDestination.

If you specify an API Gateway REST API or EventBridge ApiDestination as a target, you can use this parameter to specify headers, path parameters, and query string keys/values as part of your target invoking request. If you're using ApiDestinations, the corresponding Connection can also have these values configured. In case of any conflicting keys, values from the Connection take precedence.

" }, "RedshiftDataParameters":{ "shape":"RedshiftDataParameters", @@ -2998,7 +3995,7 @@ }, "Event":{ "shape":"String", - "documentation":"

The event, in JSON format, to test against the event pattern.

" + "documentation":"

The event, in JSON format, to test against the event pattern. The JSON must follow the format specified in AWS Events, and the following fields are mandatory:

" } } }, @@ -3012,6 +4009,11 @@ } }, "Timestamp":{"type":"timestamp"}, + "TraceHeader":{ + "type":"string", + "max":500, + "min":1 + }, "TransformerInput":{ "type":"string", "max":8192, @@ -3045,6 +4047,57 @@ "members":{ } }, + "UpdateApiDestinationRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"ApiDestinationName", + "documentation":"

The name of the API destination to update.

" + }, + "Description":{ + "shape":"ApiDestinationDescription", + "documentation":"

The name of the API destination to update.

" + }, + "ConnectionArn":{ + "shape":"ConnectionArn", + "documentation":"

The ARN of the connection to use for the API destination.

" + }, + "InvocationEndpoint":{ + "shape":"HttpsEndpoint", + "documentation":"

The URL to the endpoint to use for the API destination.

" + }, + "HttpMethod":{ + "shape":"ApiDestinationHttpMethod", + "documentation":"

The method to use for the API destination.

" + }, + "InvocationRateLimitPerSecond":{ + "shape":"ApiDestinationInvocationRateLimitPerSecond", + "documentation":"

The maximum number of invocations per second to send to the API destination.

" + } + } + }, + "UpdateApiDestinationResponse":{ + "type":"structure", + "members":{ + "ApiDestinationArn":{ + "shape":"ApiDestinationArn", + "documentation":"

The ARN of the API destination that was updated.

" + }, + "ApiDestinationState":{ + "shape":"ApiDestinationState", + "documentation":"

The state of the API destination that was updated.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time that the API destination was created.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time that the API destination was last modified.

" + } + } + }, "UpdateArchiveRequest":{ "type":"structure", "required":["ArchiveName"], @@ -3087,6 +4140,139 @@ "documentation":"

The time at which the archive was updated.

" } } + }, + "UpdateConnectionApiKeyAuthRequestParameters":{ + "type":"structure", + "members":{ + "ApiKeyName":{ + "shape":"AuthHeaderParameters", + "documentation":"

The name of the API key to use for authorization.

" + }, + "ApiKeyValue":{ + "shape":"AuthHeaderParameters", + "documentation":"

The value associated with teh API key to use for authorization.

" + } + }, + "documentation":"

Contains the API key authorization parameters to use to update the connection.

" + }, + "UpdateConnectionAuthRequestParameters":{ + "type":"structure", + "members":{ + "BasicAuthParameters":{ + "shape":"UpdateConnectionBasicAuthRequestParameters", + "documentation":"

A UpdateConnectionBasicAuthRequestParameters object that contains the authorization parameters for Basic authorization.

" + }, + "OAuthParameters":{ + "shape":"UpdateConnectionOAuthRequestParameters", + "documentation":"

A UpdateConnectionOAuthRequestParameters object that contains the authorization parameters for OAuth authorization.

" + }, + "ApiKeyAuthParameters":{ + "shape":"UpdateConnectionApiKeyAuthRequestParameters", + "documentation":"

A UpdateConnectionApiKeyAuthRequestParameters object that contains the authorization parameters for API key authorization.

" + }, + "InvocationHttpParameters":{ + "shape":"ConnectionHttpParameters", + "documentation":"

A ConnectionHttpParameters object that contains the additional parameters to use for the connection.

" + } + }, + "documentation":"

Contains the additional parameters to use for the connection.

" + }, + "UpdateConnectionBasicAuthRequestParameters":{ + "type":"structure", + "members":{ + "Username":{ + "shape":"AuthHeaderParameters", + "documentation":"

The user name to use for Basic authorization.

" + }, + "Password":{ + "shape":"AuthHeaderParameters", + "documentation":"

The password associated with the user name to use for Basic authorization.

" + } + }, + "documentation":"

Contains the Basic authorization parameters for the connection.

" + }, + "UpdateConnectionOAuthClientRequestParameters":{ + "type":"structure", + "members":{ + "ClientID":{ + "shape":"AuthHeaderParameters", + "documentation":"

The client ID to use for OAuth authorization.

" + }, + "ClientSecret":{ + "shape":"AuthHeaderParameters", + "documentation":"

The client secret assciated with the client ID to use for OAuth authorization.

" + } + }, + "documentation":"

Contains the OAuth authorization parameters to use for the connection.

" + }, + "UpdateConnectionOAuthRequestParameters":{ + "type":"structure", + "members":{ + "ClientParameters":{ + "shape":"UpdateConnectionOAuthClientRequestParameters", + "documentation":"

A UpdateConnectionOAuthClientRequestParameters object that contains the client parameters to use for the connection when OAuth is specified as the authorization type.

" + }, + "AuthorizationEndpoint":{ + "shape":"HttpsEndpoint", + "documentation":"

The URL to the authorization endpoint when OAuth is specified as the authorization type.

" + }, + "HttpMethod":{ + "shape":"ConnectionOAuthHttpMethod", + "documentation":"

The method used to connect to the HTTP endpoint.

" + }, + "OAuthHttpParameters":{ + "shape":"ConnectionHttpParameters", + "documentation":"

The additional HTTP parameters used for the OAuth authorization request.

" + } + }, + "documentation":"

Contains the OAuth request parameters to use for the connection.

" + }, + "UpdateConnectionRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"ConnectionName", + "documentation":"

The name of the connection to update.

" + }, + "Description":{ + "shape":"ConnectionDescription", + "documentation":"

A description for the connection.

" + }, + "AuthorizationType":{ + "shape":"ConnectionAuthorizationType", + "documentation":"

The type of authorization to use for the connection.

" + }, + "AuthParameters":{ + "shape":"UpdateConnectionAuthRequestParameters", + "documentation":"

The authorization parameters to use for the connection.

" + } + } + }, + "UpdateConnectionResponse":{ + "type":"structure", + "members":{ + "ConnectionArn":{ + "shape":"ConnectionArn", + "documentation":"

The ARN of the connection that was updated.

" + }, + "ConnectionState":{ + "shape":"ConnectionState", + "documentation":"

The state of the connection that was updated.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time that the connection was created.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time that the connection was last modified.

" + }, + "LastAuthorizedTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time that the connection was last authorized.

" + } + } } }, "documentation":"

Amazon EventBridge helps you to respond to state changes in your AWS resources. When your resources change state, they automatically send events into an event stream. You can create rules that match selected events in the stream and route them to targets to take action. You can also use rules to take action on a predetermined schedule. For example, you can configure rules to:

For more information about the features of Amazon EventBridge, see the Amazon EventBridge User Guide.

" diff --git a/botocore/data/fis/2020-12-01/paginators-1.json b/botocore/data/fis/2020-12-01/paginators-1.json new file mode 100644 index 00000000..ea142457 --- /dev/null +++ b/botocore/data/fis/2020-12-01/paginators-1.json @@ -0,0 +1,3 @@ +{ + "pagination": {} +} diff --git a/botocore/data/fis/2020-12-01/service-2.json b/botocore/data/fis/2020-12-01/service-2.json new file mode 100644 index 00000000..10e4b796 --- /dev/null +++ b/botocore/data/fis/2020-12-01/service-2.json @@ -0,0 +1,1610 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-12-01", + "endpointPrefix":"fis", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"FIS", + "serviceFullName":"AWS Fault Injection Simulator", + "serviceId":"fis", + "signatureVersion":"v4", + "signingName":"fis", + "uid":"fis-2020-12-01" + }, + "operations":{ + "CreateExperimentTemplate":{ + "name":"CreateExperimentTemplate", + "http":{ + "method":"POST", + "requestUri":"/experimentTemplates", + "responseCode":200 + }, + "input":{"shape":"CreateExperimentTemplateRequest"}, + "output":{"shape":"CreateExperimentTemplateResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Creates an experiment template.

To create a template, specify the following information:

For more information, see the AWS Fault Injection Simulator User Guide.

" + }, + "DeleteExperimentTemplate":{ + "name":"DeleteExperimentTemplate", + "http":{ + "method":"DELETE", + "requestUri":"/experimentTemplates/{id}", + "responseCode":200 + }, + "input":{"shape":"DeleteExperimentTemplateRequest"}, + "output":{"shape":"DeleteExperimentTemplateResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Deletes the specified experiment template.

" + }, + "GetAction":{ + "name":"GetAction", + "http":{ + "method":"GET", + "requestUri":"/actions/{id}", + "responseCode":200 + }, + "input":{"shape":"GetActionRequest"}, + "output":{"shape":"GetActionResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Gets information about the specified AWS FIS action.

" + }, + "GetExperiment":{ + "name":"GetExperiment", + "http":{ + "method":"GET", + "requestUri":"/experiments/{id}", + "responseCode":200 + }, + "input":{"shape":"GetExperimentRequest"}, + "output":{"shape":"GetExperimentResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Gets information about the specified experiment.

" + }, + "GetExperimentTemplate":{ + "name":"GetExperimentTemplate", + "http":{ + "method":"GET", + "requestUri":"/experimentTemplates/{id}", + "responseCode":200 + }, + "input":{"shape":"GetExperimentTemplateRequest"}, + "output":{"shape":"GetExperimentTemplateResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Gets information about the specified experiment template.

" + }, + "ListActions":{ + "name":"ListActions", + "http":{ + "method":"GET", + "requestUri":"/actions", + "responseCode":200 + }, + "input":{"shape":"ListActionsRequest"}, + "output":{"shape":"ListActionsResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

Lists the available AWS FIS actions.

" + }, + "ListExperimentTemplates":{ + "name":"ListExperimentTemplates", + "http":{ + "method":"GET", + "requestUri":"/experimentTemplates", + "responseCode":200 + }, + "input":{"shape":"ListExperimentTemplatesRequest"}, + "output":{"shape":"ListExperimentTemplatesResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

Lists your experiment templates.

" + }, + "ListExperiments":{ + "name":"ListExperiments", + "http":{ + "method":"GET", + "requestUri":"/experiments", + "responseCode":200 + }, + "input":{"shape":"ListExperimentsRequest"}, + "output":{"shape":"ListExperimentsResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

Lists your experiments.

" + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "documentation":"

Lists the tags for the specified resource.

" + }, + "StartExperiment":{ + "name":"StartExperiment", + "http":{ + "method":"POST", + "requestUri":"/experiments", + "responseCode":200 + }, + "input":{"shape":"StartExperimentRequest"}, + "output":{"shape":"StartExperimentResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Starts running an experiment from the specified experiment template.

" + }, + "StopExperiment":{ + "name":"StopExperiment", + "http":{ + "method":"DELETE", + "requestUri":"/experiments/{id}", + "responseCode":200 + }, + "input":{"shape":"StopExperimentRequest"}, + "output":{"shape":"StopExperimentResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Stops the specified experiment.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "documentation":"

Applies the specified tags to the specified resource.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "documentation":"

Removes the specified tags from the specified resource.

" + }, + "UpdateExperimentTemplate":{ + "name":"UpdateExperimentTemplate", + "http":{ + "method":"PATCH", + "requestUri":"/experimentTemplates/{id}", + "responseCode":200 + }, + "input":{"shape":"UpdateExperimentTemplateRequest"}, + "output":{"shape":"UpdateExperimentTemplateResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Updates the specified experiment template.

" + } + }, + "shapes":{ + "Action":{ + "type":"structure", + "members":{ + "id":{ + "shape":"ActionId", + "documentation":"

The ID of the action.

" + }, + "description":{ + "shape":"ActionDescription", + "documentation":"

The description for the action.

" + }, + "parameters":{ + "shape":"ActionParameterMap", + "documentation":"

The action parameters, if applicable.

" + }, + "targets":{ + "shape":"ActionTargetMap", + "documentation":"

The supported targets for the action.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The tags for the action.

" + } + }, + "documentation":"

Describes an action. For more information, see AWS FIS actions in the AWS Fault Injection Simulator User Guide.

" + }, + "ActionDescription":{ + "type":"string", + "max":512, + "pattern":"[\\s\\S]+" + }, + "ActionId":{ + "type":"string", + "max":128, + "pattern":"[\\S]+" + }, + "ActionParameter":{ + "type":"structure", + "members":{ + "description":{ + "shape":"ActionParameterDescription", + "documentation":"

The parameter description.

" + }, + "required":{ + "shape":"ActionParameterRequired", + "documentation":"

Indicates whether the parameter is required.

", + "box":true + } + }, + "documentation":"

Describes a parameter for an action.

" + }, + "ActionParameterDescription":{ + "type":"string", + "max":512, + "pattern":"[\\s\\S]+" + }, + "ActionParameterMap":{ + "type":"map", + "key":{"shape":"ActionParameterName"}, + "value":{"shape":"ActionParameter"} + }, + "ActionParameterName":{ + "type":"string", + "max":64, + "pattern":"[\\S]+" + }, + "ActionParameterRequired":{"type":"boolean"}, + "ActionSummary":{ + "type":"structure", + "members":{ + "id":{ + "shape":"ActionId", + "documentation":"

The ID of the action.

" + }, + "description":{ + "shape":"ActionDescription", + "documentation":"

The description for the action.

" + }, + "targets":{ + "shape":"ActionTargetMap", + "documentation":"

The targets for the action.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The tags for the action.

" + } + }, + "documentation":"

Provides a summary of an action.

" + }, + "ActionSummaryList":{ + "type":"list", + "member":{"shape":"ActionSummary"} + }, + "ActionTarget":{ + "type":"structure", + "members":{ + "resourceType":{ + "shape":"TargetResourceType", + "documentation":"

The resource type of the target.

" + } + }, + "documentation":"

Describes a target for an action.

" + }, + "ActionTargetMap":{ + "type":"map", + "key":{"shape":"ActionTargetName"}, + "value":{"shape":"ActionTarget"} + }, + "ActionTargetName":{ + "type":"string", + "max":64, + "pattern":"[\\S]+" + }, + "ClientToken":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"[\\S]+" + }, + "ConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The request could not be processed because of a conflict.

", + "error":{"httpStatusCode":409}, + "exception":true + }, + "CreateExperimentTemplateActionInput":{ + "type":"structure", + "required":["actionId"], + "members":{ + "actionId":{ + "shape":"ActionId", + "documentation":"

The ID of the action.

" + }, + "description":{ + "shape":"ExperimentTemplateActionDescription", + "documentation":"

A description for the action.

" + }, + "parameters":{ + "shape":"ExperimentTemplateActionParameterMap", + "documentation":"

The parameters for the action, if applicable.

" + }, + "targets":{ + "shape":"ExperimentTemplateActionTargetMap", + "documentation":"

The targets for the action.

" + }, + "startAfter":{ + "shape":"ExperimentTemplateActionStartAfterList", + "documentation":"

The name of the action that must be completed before the current action starts. Omit this parameter to run the action at the start of the experiment.

" + } + }, + "documentation":"

Specifies an action for an experiment template.

" + }, + "CreateExperimentTemplateActionInputMap":{ + "type":"map", + "key":{"shape":"ExperimentTemplateActionName"}, + "value":{"shape":"CreateExperimentTemplateActionInput"} + }, + "CreateExperimentTemplateRequest":{ + "type":"structure", + "required":[ + "clientToken", + "description", + "stopConditions", + "actions", + "roleArn" + ], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", + "idempotencyToken":true + }, + "description":{ + "shape":"ExperimentTemplateDescription", + "documentation":"

A description for the experiment template. Can contain up to 64 letters (A-Z and a-z).

" + }, + "stopConditions":{ + "shape":"CreateExperimentTemplateStopConditionInputList", + "documentation":"

The stop conditions.

" + }, + "targets":{ + "shape":"CreateExperimentTemplateTargetInputMap", + "documentation":"

The targets for the experiment.

" + }, + "actions":{ + "shape":"CreateExperimentTemplateActionInputMap", + "documentation":"

The actions for the experiment.

" + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) of an IAM role that grants the AWS FIS service permission to perform service actions on your behalf.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The tags to apply to the experiment template.

" + } + } + }, + "CreateExperimentTemplateResponse":{ + "type":"structure", + "members":{ + "experimentTemplate":{ + "shape":"ExperimentTemplate", + "documentation":"

Information about the experiment template.

" + } + } + }, + "CreateExperimentTemplateStopConditionInput":{ + "type":"structure", + "required":["source"], + "members":{ + "source":{ + "shape":"StopConditionSource", + "documentation":"

The source for the stop condition. Specify aws:cloudwatch:alarm if the stop condition is defined by a CloudWatch alarm. Specify none if there is no stop condition.

" + }, + "value":{ + "shape":"StopConditionValue", + "documentation":"

The Amazon Resource Name (ARN) of the CloudWatch alarm. This is required if the source is a CloudWatch alarm.

" + } + }, + "documentation":"

Specifies a stop condition for an experiment template.

" + }, + "CreateExperimentTemplateStopConditionInputList":{ + "type":"list", + "member":{"shape":"CreateExperimentTemplateStopConditionInput"} + }, + "CreateExperimentTemplateTargetInput":{ + "type":"structure", + "required":[ + "resourceType", + "selectionMode" + ], + "members":{ + "resourceType":{ + "shape":"ResourceType", + "documentation":"

The AWS resource type. The resource type must be supported for the specified action.

" + }, + "resourceArns":{ + "shape":"ResourceArnList", + "documentation":"

The Amazon Resource Names (ARNs) of the resources.

" + }, + "resourceTags":{ + "shape":"TagMap", + "documentation":"

The tags for the target resources.

" + }, + "filters":{ + "shape":"ExperimentTemplateTargetFilterInputList", + "documentation":"

The filters to apply to identify target resources using specific attributes.

" + }, + "selectionMode":{ + "shape":"ExperimentTemplateTargetSelectionMode", + "documentation":"

Scopes the identified resources to a specific count of the resources at random, or a percentage of the resources. All identified resources are included in the target.

" + } + }, + "documentation":"

Specifies a target for an experiment. You must specify at least one Amazon Resource Name (ARN) or at least one resource tag. You cannot specify both ARNs and tags.

" + }, + "CreateExperimentTemplateTargetInputMap":{ + "type":"map", + "key":{"shape":"ExperimentTemplateTargetName"}, + "value":{"shape":"CreateExperimentTemplateTargetInput"} + }, + "CreationTime":{"type":"timestamp"}, + "DeleteExperimentTemplateRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"ExperimentTemplateId", + "documentation":"

The ID of the experiment template.

", + "location":"uri", + "locationName":"id" + } + } + }, + "DeleteExperimentTemplateResponse":{ + "type":"structure", + "members":{ + "experimentTemplate":{ + "shape":"ExperimentTemplate", + "documentation":"

Information about the experiment template.

" + } + } + }, + "ExceptionMessage":{ + "type":"string", + "max":1024, + "pattern":"[\\s\\S]+" + }, + "Experiment":{ + "type":"structure", + "members":{ + "id":{ + "shape":"ExperimentId", + "documentation":"

The ID of the experiment.

" + }, + "experimentTemplateId":{ + "shape":"ExperimentTemplateId", + "documentation":"

The ID of the experiment template.

" + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) of an IAM role that grants the AWS FIS service permission to perform service actions on your behalf.

" + }, + "state":{ + "shape":"ExperimentState", + "documentation":"

The state of the experiment.

" + }, + "targets":{ + "shape":"ExperimentTargetMap", + "documentation":"

The targets for the experiment.

" + }, + "actions":{ + "shape":"ExperimentActionMap", + "documentation":"

The actions for the experiment.

" + }, + "stopConditions":{ + "shape":"ExperimentStopConditionList", + "documentation":"

The stop conditions for the experiment.

" + }, + "creationTime":{ + "shape":"CreationTime", + "documentation":"

The time the experiment was created.

" + }, + "startTime":{ + "shape":"ExperimentStartTime", + "documentation":"

The time that the experiment was started.

" + }, + "endTime":{ + "shape":"ExperimentEndTime", + "documentation":"

The time that the experiment ended.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The tags for the experiment.

" + } + }, + "documentation":"

Describes an experiment.

" + }, + "ExperimentAction":{ + "type":"structure", + "members":{ + "actionId":{ + "shape":"ActionId", + "documentation":"

The ID of the action.

" + }, + "description":{ + "shape":"ExperimentActionDescription", + "documentation":"

The description for the action.

" + }, + "parameters":{ + "shape":"ExperimentActionParameterMap", + "documentation":"

The parameters for the action.

" + }, + "targets":{ + "shape":"ExperimentActionTargetMap", + "documentation":"

The targets for the action.

" + }, + "startAfter":{ + "shape":"ExperimentActionStartAfterList", + "documentation":"

The name of the action that must be completed before this action starts.

" + }, + "state":{ + "shape":"ExperimentActionState", + "documentation":"

The state of the action.

" + } + }, + "documentation":"

Describes the action for an experiment.

" + }, + "ExperimentActionDescription":{ + "type":"string", + "max":512, + "pattern":"[\\s\\S]+" + }, + "ExperimentActionMap":{ + "type":"map", + "key":{"shape":"ExperimentActionName"}, + "value":{"shape":"ExperimentAction"} + }, + "ExperimentActionName":{ + "type":"string", + "max":64, + "pattern":"[\\S]+" + }, + "ExperimentActionParameter":{ + "type":"string", + "max":1024, + "pattern":"[\\S]+" + }, + "ExperimentActionParameterMap":{ + "type":"map", + "key":{"shape":"ExperimentActionParameterName"}, + "value":{"shape":"ExperimentActionParameter"} + }, + "ExperimentActionParameterName":{ + "type":"string", + "max":64, + "pattern":"[\\S]+" + }, + "ExperimentActionStartAfter":{ + "type":"string", + "max":64, + "pattern":"[\\S]+" + }, + "ExperimentActionStartAfterList":{ + "type":"list", + "member":{"shape":"ExperimentActionStartAfter"} + }, + "ExperimentActionState":{ + "type":"structure", + "members":{ + "status":{ + "shape":"ExperimentActionStatus", + "documentation":"

The state of the action.

" + }, + "reason":{ + "shape":"ExperimentActionStatusReason", + "documentation":"

The reason for the state.

" + } + }, + "documentation":"

Describes the state of an action.

" + }, + "ExperimentActionStatus":{ + "type":"string", + "enum":[ + "pending", + "initiating", + "running", + "completed", + "cancelled", + "stopping", + "stopped", + "failed" + ] + }, + "ExperimentActionStatusReason":{ + "type":"string", + "max":512, + "pattern":"[\\s\\S]+" + }, + "ExperimentActionTargetMap":{ + "type":"map", + "key":{"shape":"ExperimentActionTargetName"}, + "value":{"shape":"ExperimentTargetName"} + }, + "ExperimentActionTargetName":{ + "type":"string", + "max":64, + "pattern":"[\\S]+" + }, + "ExperimentEndTime":{"type":"timestamp"}, + "ExperimentId":{ + "type":"string", + "max":64, + "pattern":"[\\S]+" + }, + "ExperimentStartTime":{"type":"timestamp"}, + "ExperimentState":{ + "type":"structure", + "members":{ + "status":{ + "shape":"ExperimentStatus", + "documentation":"

The state of the experiment.

" + }, + "reason":{ + "shape":"ExperimentStatusReason", + "documentation":"

The reason for the state.

" + } + }, + "documentation":"

Describes the state of an experiment.

" + }, + "ExperimentStatus":{ + "type":"string", + "enum":[ + "pending", + "initiating", + "running", + "completed", + "stopping", + "stopped", + "failed" + ] + }, + "ExperimentStatusReason":{ + "type":"string", + "max":512, + "pattern":"[\\s\\S]+" + }, + "ExperimentStopCondition":{ + "type":"structure", + "members":{ + "source":{ + "shape":"StopConditionSource", + "documentation":"

The source for the stop condition.

" + }, + "value":{ + "shape":"StopConditionValue", + "documentation":"

The Amazon Resource Name (ARN) of the CloudWatch alarm, if applicable.

" + } + }, + "documentation":"

Describes the stop condition for an experiment.

" + }, + "ExperimentStopConditionList":{ + "type":"list", + "member":{"shape":"ExperimentStopCondition"} + }, + "ExperimentSummary":{ + "type":"structure", + "members":{ + "id":{ + "shape":"ExperimentId", + "documentation":"

The ID of the experiment.

" + }, + "experimentTemplateId":{ + "shape":"ExperimentTemplateId", + "documentation":"

The ID of the experiment template.

" + }, + "state":{ + "shape":"ExperimentState", + "documentation":"

The state of the experiment.

" + }, + "creationTime":{ + "shape":"CreationTime", + "documentation":"

The time that the experiment was created.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The tags for the experiment.

" + } + }, + "documentation":"

Provides a summary of an experiment.

" + }, + "ExperimentSummaryList":{ + "type":"list", + "member":{"shape":"ExperimentSummary"} + }, + "ExperimentTarget":{ + "type":"structure", + "members":{ + "resourceType":{ + "shape":"ResourceType", + "documentation":"

The resource type.

" + }, + "resourceArns":{ + "shape":"ResourceArnList", + "documentation":"

The Amazon Resource Names (ARNs) of the resources.

" + }, + "resourceTags":{ + "shape":"TagMap", + "documentation":"

The tags for the target resources.

" + }, + "filters":{ + "shape":"ExperimentTargetFilterList", + "documentation":"

The filters to apply to identify target resources using specific attributes.

" + }, + "selectionMode":{ + "shape":"ExperimentTargetSelectionMode", + "documentation":"

Scopes the identified resources to a specific count or percentage.

" + } + }, + "documentation":"

Describes a target for an experiment.

" + }, + "ExperimentTargetFilter":{ + "type":"structure", + "members":{ + "path":{ + "shape":"ExperimentTargetFilterPath", + "documentation":"

The attribute path for the filter.

" + }, + "values":{ + "shape":"ExperimentTargetFilterValues", + "documentation":"

The attribute values for the filter.

" + } + }, + "documentation":"

Describes a filter used for the target resources in an experiment.

" + }, + "ExperimentTargetFilterList":{ + "type":"list", + "member":{"shape":"ExperimentTargetFilter"} + }, + "ExperimentTargetFilterPath":{ + "type":"string", + "max":256, + "pattern":"[\\S]+" + }, + "ExperimentTargetFilterValue":{ + "type":"string", + "max":128, + "pattern":"[\\S]+" + }, + "ExperimentTargetFilterValues":{ + "type":"list", + "member":{"shape":"ExperimentTargetFilterValue"} + }, + "ExperimentTargetMap":{ + "type":"map", + "key":{"shape":"ExperimentTargetName"}, + "value":{"shape":"ExperimentTarget"} + }, + "ExperimentTargetName":{ + "type":"string", + "max":64, + "pattern":"[\\S]+" + }, + "ExperimentTargetSelectionMode":{ + "type":"string", + "max":64, + "pattern":"[\\S]+" + }, + "ExperimentTemplate":{ + "type":"structure", + "members":{ + "id":{ + "shape":"ExperimentTemplateId", + "documentation":"

The ID of the experiment template.

" + }, + "description":{ + "shape":"ExperimentTemplateDescription", + "documentation":"

The description for the experiment template.

" + }, + "targets":{ + "shape":"ExperimentTemplateTargetMap", + "documentation":"

The targets for the experiment.

" + }, + "actions":{ + "shape":"ExperimentTemplateActionMap", + "documentation":"

The actions for the experiment.

" + }, + "stopConditions":{ + "shape":"ExperimentTemplateStopConditionList", + "documentation":"

The stop conditions for the experiment.

" + }, + "creationTime":{ + "shape":"CreationTime", + "documentation":"

The time the experiment template was created.

" + }, + "lastUpdateTime":{ + "shape":"LastUpdateTime", + "documentation":"

The time the experiment template was last updated.

" + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) of an IAM role.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The tags for the experiment template.

" + } + }, + "documentation":"

Describes an experiment template.

" + }, + "ExperimentTemplateAction":{ + "type":"structure", + "members":{ + "actionId":{ + "shape":"ActionId", + "documentation":"

The ID of the action.

" + }, + "description":{ + "shape":"ExperimentTemplateActionDescription", + "documentation":"

A description for the action.

" + }, + "parameters":{ + "shape":"ExperimentTemplateActionParameterMap", + "documentation":"

The parameters for the action.

" + }, + "targets":{ + "shape":"ExperimentTemplateActionTargetMap", + "documentation":"

The targets for the action.

" + }, + "startAfter":{ + "shape":"ExperimentTemplateActionStartAfterList", + "documentation":"

The name of the action that must be completed before the current action starts.

" + } + }, + "documentation":"

Describes an action for an experiment template.

" + }, + "ExperimentTemplateActionDescription":{ + "type":"string", + "max":512, + "pattern":"[\\s\\S]+" + }, + "ExperimentTemplateActionMap":{ + "type":"map", + "key":{"shape":"ExperimentTemplateActionName"}, + "value":{"shape":"ExperimentTemplateAction"} + }, + "ExperimentTemplateActionName":{ + "type":"string", + "max":64, + "pattern":"[\\S]+" + }, + "ExperimentTemplateActionParameter":{ + "type":"string", + "max":1024, + "pattern":"[\\S]+" + }, + "ExperimentTemplateActionParameterMap":{ + "type":"map", + "key":{"shape":"ExperimentTemplateActionParameterName"}, + "value":{"shape":"ExperimentTemplateActionParameter"} + }, + "ExperimentTemplateActionParameterName":{ + "type":"string", + "max":64, + "pattern":"[\\S]+" + }, + "ExperimentTemplateActionStartAfter":{ + "type":"string", + "max":64, + "pattern":"[\\S]+" + }, + "ExperimentTemplateActionStartAfterList":{ + "type":"list", + "member":{"shape":"ExperimentTemplateActionStartAfter"} + }, + "ExperimentTemplateActionTargetMap":{ + "type":"map", + "key":{"shape":"ExperimentTemplateActionTargetName"}, + "value":{"shape":"ExperimentTemplateTargetName"} + }, + "ExperimentTemplateActionTargetName":{ + "type":"string", + "max":64, + "pattern":"[\\S]+" + }, + "ExperimentTemplateDescription":{ + "type":"string", + "max":512, + "pattern":"[\\s\\S]+" + }, + "ExperimentTemplateId":{ + "type":"string", + "max":64, + "pattern":"[\\S]+" + }, + "ExperimentTemplateStopCondition":{ + "type":"structure", + "members":{ + "source":{ + "shape":"StopConditionSource", + "documentation":"

The source for the stop condition.

" + }, + "value":{ + "shape":"StopConditionValue", + "documentation":"

The Amazon Resource Name (ARN) of the CloudWatch alarm, if applicable.

" + } + }, + "documentation":"

Describes a stop condition for an experiment template.

" + }, + "ExperimentTemplateStopConditionList":{ + "type":"list", + "member":{"shape":"ExperimentTemplateStopCondition"} + }, + "ExperimentTemplateSummary":{ + "type":"structure", + "members":{ + "id":{ + "shape":"ExperimentTemplateId", + "documentation":"

The ID of the experiment template.

" + }, + "description":{ + "shape":"ExperimentTemplateDescription", + "documentation":"

The description of the experiment template.

" + }, + "creationTime":{ + "shape":"CreationTime", + "documentation":"

The time that the experiment template was created.

" + }, + "lastUpdateTime":{ + "shape":"LastUpdateTime", + "documentation":"

The time that the experiment template was last updated.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The tags for the experiment template.

" + } + }, + "documentation":"

Provides a summary of an experiment template.

" + }, + "ExperimentTemplateSummaryList":{ + "type":"list", + "member":{"shape":"ExperimentTemplateSummary"} + }, + "ExperimentTemplateTarget":{ + "type":"structure", + "members":{ + "resourceType":{ + "shape":"ResourceType", + "documentation":"

The resource type.

" + }, + "resourceArns":{ + "shape":"ResourceArnList", + "documentation":"

The Amazon Resource Names (ARNs) of the targets.

" + }, + "resourceTags":{ + "shape":"TagMap", + "documentation":"

The tags for the target resources.

" + }, + "filters":{ + "shape":"ExperimentTemplateTargetFilterList", + "documentation":"

The filters to apply to identify target resources using specific attributes.

" + }, + "selectionMode":{ + "shape":"ExperimentTemplateTargetSelectionMode", + "documentation":"

Scopes the identified resources to a specific count or percentage.

" + } + }, + "documentation":"

Describes a target for an experiment template.

" + }, + "ExperimentTemplateTargetFilter":{ + "type":"structure", + "members":{ + "path":{ + "shape":"ExperimentTemplateTargetFilterPath", + "documentation":"

The attribute path for the filter.

" + }, + "values":{ + "shape":"ExperimentTemplateTargetFilterValues", + "documentation":"

The attribute values for the filter.

" + } + }, + "documentation":"

Describes a filter used for the target resources in an experiment template.

" + }, + "ExperimentTemplateTargetFilterInputList":{ + "type":"list", + "member":{"shape":"ExperimentTemplateTargetInputFilter"} + }, + "ExperimentTemplateTargetFilterList":{ + "type":"list", + "member":{"shape":"ExperimentTemplateTargetFilter"} + }, + "ExperimentTemplateTargetFilterPath":{ + "type":"string", + "max":256, + "pattern":"[\\S]+" + }, + "ExperimentTemplateTargetFilterValue":{ + "type":"string", + "max":128, + "pattern":"[\\S]+" + }, + "ExperimentTemplateTargetFilterValues":{ + "type":"list", + "member":{"shape":"ExperimentTemplateTargetFilterValue"} + }, + "ExperimentTemplateTargetInputFilter":{ + "type":"structure", + "required":[ + "path", + "values" + ], + "members":{ + "path":{ + "shape":"ExperimentTemplateTargetFilterPath", + "documentation":"

The attribute path for the filter.

" + }, + "values":{ + "shape":"ExperimentTemplateTargetFilterValues", + "documentation":"

The attribute values for the filter.

" + } + }, + "documentation":"

Describes a filter used for the target resource input in an experiment template.

" + }, + "ExperimentTemplateTargetMap":{ + "type":"map", + "key":{"shape":"ExperimentTemplateTargetName"}, + "value":{"shape":"ExperimentTemplateTarget"} + }, + "ExperimentTemplateTargetName":{ + "type":"string", + "max":64, + "pattern":"[\\S]+" + }, + "ExperimentTemplateTargetSelectionMode":{ + "type":"string", + "max":64, + "pattern":"[\\S]+" + }, + "GetActionRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"ActionId", + "documentation":"

The ID of the action.

", + "location":"uri", + "locationName":"id" + } + } + }, + "GetActionResponse":{ + "type":"structure", + "members":{ + "action":{ + "shape":"Action", + "documentation":"

Information about the action.

" + } + } + }, + "GetExperimentRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"ExperimentId", + "documentation":"

The ID of the experiment.

", + "location":"uri", + "locationName":"id" + } + } + }, + "GetExperimentResponse":{ + "type":"structure", + "members":{ + "experiment":{ + "shape":"Experiment", + "documentation":"

Information about the experiment.

" + } + } + }, + "GetExperimentTemplateRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"ExperimentTemplateId", + "documentation":"

The ID of the experiment template.

", + "location":"uri", + "locationName":"id" + } + } + }, + "GetExperimentTemplateResponse":{ + "type":"structure", + "members":{ + "experimentTemplate":{ + "shape":"ExperimentTemplate", + "documentation":"

Information about the experiment template.

" + } + } + }, + "LastUpdateTime":{"type":"timestamp"}, + "ListActionsMaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "ListActionsRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"ListActionsMaxResults", + "documentation":"

The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

", + "box":true, + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next page of results.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListActionsResponse":{ + "type":"structure", + "members":{ + "actions":{ + "shape":"ActionSummaryList", + "documentation":"

The actions.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

" + } + } + }, + "ListExperimentTemplatesMaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "ListExperimentTemplatesRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"ListExperimentTemplatesMaxResults", + "documentation":"

The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

", + "box":true, + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next page of results.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListExperimentTemplatesResponse":{ + "type":"structure", + "members":{ + "experimentTemplates":{ + "shape":"ExperimentTemplateSummaryList", + "documentation":"

The experiment templates.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

" + } + } + }, + "ListExperimentsMaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "ListExperimentsRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"ListExperimentsMaxResults", + "documentation":"

The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

", + "box":true, + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next page of results.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListExperimentsResponse":{ + "type":"structure", + "members":{ + "experiments":{ + "shape":"ExperimentSummaryList", + "documentation":"

The experiments.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

" + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource.

", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagMap", + "documentation":"

The tags for the resource.

" + } + } + }, + "NextToken":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"[\\S]+" + }, + "ResourceArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"[\\S]+" + }, + "ResourceArnList":{ + "type":"list", + "member":{"shape":"ResourceArn"}, + "max":5 + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The specified resource cannot be found.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "ResourceType":{ + "type":"string", + "max":64, + "pattern":"[\\S]+" + }, + "RoleArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"[\\S]+" + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

You have exceeded your service quota.

", + "error":{"httpStatusCode":402}, + "exception":true + }, + "StartExperimentRequest":{ + "type":"structure", + "required":[ + "clientToken", + "experimentTemplateId" + ], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", + "idempotencyToken":true + }, + "experimentTemplateId":{ + "shape":"ExperimentTemplateId", + "documentation":"

The ID of the experiment template.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The tags to apply to the experiment.

" + } + } + }, + "StartExperimentResponse":{ + "type":"structure", + "members":{ + "experiment":{ + "shape":"Experiment", + "documentation":"

Information about the experiment.

" + } + } + }, + "StopConditionSource":{ + "type":"string", + "max":64, + "pattern":"[\\S]+" + }, + "StopConditionValue":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"[\\s\\S]+" + }, + "StopExperimentRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"ExperimentId", + "documentation":"

The ID of the experiment.

", + "location":"uri", + "locationName":"id" + } + } + }, + "StopExperimentResponse":{ + "type":"structure", + "members":{ + "experiment":{ + "shape":"Experiment", + "documentation":"

Information about the experiment.

" + } + } + }, + "TagKey":{ + "type":"string", + "max":128, + "pattern":"[\\s\\S]+" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"} + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource.

", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The tags for the resource.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "pattern":"[\\s\\S]*" + }, + "TargetResourceType":{ + "type":"string", + "max":128, + "pattern":"[\\S]+" + }, + "UntagResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource.

", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

The tag keys to remove.

", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateExperimentTemplateActionInputItem":{ + "type":"structure", + "members":{ + "actionId":{ + "shape":"ActionId", + "documentation":"

The ID of the action.

" + }, + "description":{ + "shape":"ExperimentTemplateActionDescription", + "documentation":"

A description for the action.

" + }, + "parameters":{ + "shape":"ExperimentTemplateActionParameterMap", + "documentation":"

The parameters for the action, if applicable.

" + }, + "targets":{ + "shape":"ExperimentTemplateActionTargetMap", + "documentation":"

The targets for the action.

" + }, + "startAfter":{ + "shape":"ExperimentTemplateActionStartAfterList", + "documentation":"

The name of the action that must be completed before the current action starts. Omit this parameter to run the action at the start of the experiment.

" + } + }, + "documentation":"

Specifies an action for an experiment template.

" + }, + "UpdateExperimentTemplateActionInputMap":{ + "type":"map", + "key":{"shape":"ExperimentTemplateActionName"}, + "value":{"shape":"UpdateExperimentTemplateActionInputItem"} + }, + "UpdateExperimentTemplateRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"ExperimentTemplateId", + "documentation":"

The ID of the experiment template.

", + "location":"uri", + "locationName":"id" + }, + "description":{ + "shape":"ExperimentTemplateDescription", + "documentation":"

A description for the template.

" + }, + "stopConditions":{ + "shape":"UpdateExperimentTemplateStopConditionInputList", + "documentation":"

The stop conditions for the experiment.

" + }, + "targets":{ + "shape":"UpdateExperimentTemplateTargetInputMap", + "documentation":"

The targets for the experiment.

" + }, + "actions":{ + "shape":"UpdateExperimentTemplateActionInputMap", + "documentation":"

The actions for the experiment.

" + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) of an IAM role that grants the AWS FIS service permission to perform service actions on your behalf.

" + } + } + }, + "UpdateExperimentTemplateResponse":{ + "type":"structure", + "members":{ + "experimentTemplate":{ + "shape":"ExperimentTemplate", + "documentation":"

Information about the experiment template.

" + } + } + }, + "UpdateExperimentTemplateStopConditionInput":{ + "type":"structure", + "required":["source"], + "members":{ + "source":{ + "shape":"StopConditionSource", + "documentation":"

The source for the stop condition. Specify aws:cloudwatch:alarm if the stop condition is defined by a CloudWatch alarm. Specify none if there is no stop condition.

" + }, + "value":{ + "shape":"StopConditionValue", + "documentation":"

The Amazon Resource Name (ARN) of the CloudWatch alarm.

" + } + }, + "documentation":"

Specifies a stop condition for an experiment. You can define a stop condition as a CloudWatch alarm.

" + }, + "UpdateExperimentTemplateStopConditionInputList":{ + "type":"list", + "member":{"shape":"UpdateExperimentTemplateStopConditionInput"} + }, + "UpdateExperimentTemplateTargetInput":{ + "type":"structure", + "required":[ + "resourceType", + "selectionMode" + ], + "members":{ + "resourceType":{ + "shape":"ResourceType", + "documentation":"

The AWS resource type. The resource type must be supported for the specified action.

" + }, + "resourceArns":{ + "shape":"ResourceArnList", + "documentation":"

The Amazon Resource Names (ARNs) of the targets.

" + }, + "resourceTags":{ + "shape":"TagMap", + "documentation":"

The tags for the target resources.

" + }, + "filters":{ + "shape":"ExperimentTemplateTargetFilterInputList", + "documentation":"

The filters to apply to identify target resources using specific attributes.

" + }, + "selectionMode":{ + "shape":"ExperimentTemplateTargetSelectionMode", + "documentation":"

Scopes the identified resources to a specific count or percentage.

" + } + }, + "documentation":"

Specifies a target for an experiment. You must specify at least one Amazon Resource Name (ARN) or at least one resource tag. You cannot specify both.

" + }, + "UpdateExperimentTemplateTargetInputMap":{ + "type":"map", + "key":{"shape":"ExperimentTemplateTargetName"}, + "value":{"shape":"UpdateExperimentTemplateTargetInput"} + }, + "ValidationException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The specified input is not valid, or fails to satisfy the constraints for the request.

", + "error":{"httpStatusCode":400}, + "exception":true + } + }, + "documentation":"

AWS Fault Injection Simulator is a managed service that enables you to perform fault injection experiments on your AWS workloads. For more information, see the AWS Fault Injection Simulator User Guide.

" +} diff --git a/botocore/data/forecast/2018-06-26/service-2.json b/botocore/data/forecast/2018-06-26/service-2.json index 737ddd36..17b95493 100644 --- a/botocore/data/forecast/2018-06-26/service-2.json +++ b/botocore/data/forecast/2018-06-26/service-2.json @@ -473,6 +473,21 @@ ], "documentation":"

Lists the tags for an Amazon Forecast resource.

" }, + "StopResource":{ + "name":"StopResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopResourceRequest"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Stops a resource.

The resource undergoes the following states: CREATE_STOPPING and CREATE_STOPPED. You cannot resume a resource once it has been stopped.

This operation can be applied to the following resources (and their corresponding child resources):

", + "idempotent":true + }, "TagResource":{ "name":"TagResource", "http":{ @@ -968,7 +983,7 @@ }, "Status":{ "shape":"Status", - "documentation":"

The status of the dataset import job. The status is reflected in the status of the dataset. For example, when the import job status is CREATE_IN_PROGRESS, the status of the dataset is UPDATE_IN_PROGRESS. States include:

" + "documentation":"

The status of the dataset import job. States include:

" }, "Message":{ "shape":"ErrorMessage", @@ -980,7 +995,7 @@ }, "LastModificationTime":{ "shape":"Timestamp", - "documentation":"

The last time that the dataset was modified. The time depends on the status of the job, as follows:

" + "documentation":"

The last time the resource was modified. The timestamp depends on the status of the job:

" } }, "documentation":"

Provides a summary of the dataset import job properties used in the ListDatasetImportJobs operation. To get the complete set of properties, call the DescribeDatasetImportJob operation, and provide the DatasetImportJobArn.

" @@ -1199,7 +1214,7 @@ }, "Status":{ "shape":"Status", - "documentation":"

The status of the dataset import job. The status is reflected in the status of the dataset. For example, when the import job status is CREATE_IN_PROGRESS, the status of the dataset is UPDATE_IN_PROGRESS. States include:

" + "documentation":"

The status of the dataset import job. States include:

" }, "Message":{ "shape":"Message", @@ -1211,7 +1226,7 @@ }, "LastModificationTime":{ "shape":"Timestamp", - "documentation":"

The last time that the dataset was modified. The time depends on the status of the job, as follows:

" + "documentation":"

The last time the resource was modified. The timestamp depends on the status of the job:

" } } }, @@ -1305,7 +1320,7 @@ }, "Status":{ "shape":"Status", - "documentation":"

The status of the forecast export job. States include:

The Status of the forecast export job must be ACTIVE before you can access the forecast in your S3 bucket.

" + "documentation":"

The status of the forecast export job. States include:

The Status of the forecast export job must be ACTIVE before you can access the forecast in your S3 bucket.

" }, "CreationTime":{ "shape":"Timestamp", @@ -1313,7 +1328,7 @@ }, "LastModificationTime":{ "shape":"Timestamp", - "documentation":"

When the last successful export job finished.

" + "documentation":"

The last time the resource was modified. The timestamp depends on the status of the job:

" } } }, @@ -1352,7 +1367,7 @@ }, "Status":{ "shape":"String", - "documentation":"

The status of the forecast. States include:

The Status of the forecast must be ACTIVE before you can query or export the forecast.

" + "documentation":"

The status of the forecast. States include:

The Status of the forecast must be ACTIVE before you can query or export the forecast.

" }, "Message":{ "shape":"ErrorMessage", @@ -1364,7 +1379,7 @@ }, "LastModificationTime":{ "shape":"Timestamp", - "documentation":"

Initially, the same as CreationTime (status is CREATE_PENDING). Updated when inference (creating the forecast) starts (status changed to CREATE_IN_PROGRESS), and when inference is complete (status changed to ACTIVE) or fails (status changed to CREATE_FAILED).

" + "documentation":"

The last time the resource was modified. The timestamp depends on the status of the job:

" } } }, @@ -1400,7 +1415,7 @@ }, "Status":{ "shape":"Status", - "documentation":"

The status of the predictor backtest export job. States include:

" + "documentation":"

The status of the predictor backtest export job. States include:

" }, "CreationTime":{ "shape":"Timestamp", @@ -1408,7 +1423,7 @@ }, "LastModificationTime":{ "shape":"Timestamp", - "documentation":"

When the last successful export job finished.

" + "documentation":"

The last time the resource was modified. The timestamp depends on the status of the job:

" } } }, @@ -1491,7 +1506,7 @@ }, "Status":{ "shape":"Status", - "documentation":"

The status of the predictor. States include:

The Status of the predictor must be ACTIVE before you can use the predictor to create a forecast.

" + "documentation":"

The status of the predictor. States include:

The Status of the predictor must be ACTIVE before you can use the predictor to create a forecast.

" }, "Message":{ "shape":"Message", @@ -1503,7 +1518,7 @@ }, "LastModificationTime":{ "shape":"Timestamp", - "documentation":"

Initially, the same as CreationTime (when the status is CREATE_PENDING). This value is updated when training starts (when the status changes to CREATE_IN_PROGRESS), and when training has completed (when the status changes to ACTIVE) or fails (when the status changes to CREATE_FAILED).

" + "documentation":"

The last time the resource was modified. The timestamp depends on the status of the job:

" } } }, @@ -1730,7 +1745,7 @@ }, "Status":{ "shape":"Status", - "documentation":"

The status of the forecast export job. States include:

The Status of the forecast export job must be ACTIVE before you can access the forecast in your S3 bucket.

" + "documentation":"

The status of the forecast export job. States include:

The Status of the forecast export job must be ACTIVE before you can access the forecast in your S3 bucket.

" }, "Message":{ "shape":"ErrorMessage", @@ -1742,7 +1757,7 @@ }, "LastModificationTime":{ "shape":"Timestamp", - "documentation":"

When the last successful export job finished.

" + "documentation":"

The last time the resource was modified. The timestamp depends on the status of the job:

" } }, "documentation":"

Provides a summary of the forecast export job properties used in the ListForecastExportJobs operation. To get the complete set of properties, call the DescribeForecastExportJob operation, and provide the listed ForecastExportJobArn.

" @@ -1772,7 +1787,7 @@ }, "Status":{ "shape":"Status", - "documentation":"

The status of the forecast. States include:

The Status of the forecast must be ACTIVE before you can query or export the forecast.

" + "documentation":"

The status of the forecast. States include:

The Status of the forecast must be ACTIVE before you can query or export the forecast.

" }, "Message":{ "shape":"ErrorMessage", @@ -1784,7 +1799,7 @@ }, "LastModificationTime":{ "shape":"Timestamp", - "documentation":"

Initially, the same as CreationTime (status is CREATE_PENDING). Updated when inference (creating the forecast) starts (status changed to CREATE_IN_PROGRESS), and when inference is complete (status changed to ACTIVE) or fails (status changed to CREATE_FAILED).

" + "documentation":"

The last time the resource was modified. The timestamp depends on the status of the job:

" } }, "documentation":"

Provides a summary of the forecast properties used in the ListForecasts operation. To get the complete set of properties, call the DescribeForecast operation, and provide the ForecastArn that is listed in the summary.

" @@ -2074,7 +2089,7 @@ }, "Filters":{ "shape":"Filters", - "documentation":"

An array of filters. For each filter, provide a condition and a match statement. The condition is either IS or IS_NOT, which specifies whether to include or exclude the predictor backtest export jobs that match the statement from the list. The match statement consists of a key and a value.

Filter properties

" + "documentation":"

An array of filters. For each filter, provide a condition and a match statement. The condition is either IS or IS_NOT, which specifies whether to include or exclude the predictor backtest export jobs that match the statement from the list. The match statement consists of a key and a value.

Filter properties

" } } }, @@ -2219,7 +2234,7 @@ "Destination":{"shape":"DataDestination"}, "Status":{ "shape":"Status", - "documentation":"

The status of the predictor backtest export job. States include:

" + "documentation":"

The status of the predictor backtest export job. States include:

" }, "Message":{ "shape":"ErrorMessage", @@ -2231,7 +2246,7 @@ }, "LastModificationTime":{ "shape":"Timestamp", - "documentation":"

When the last successful export job finished.

" + "documentation":"

The last time the resource was modified. The timestamp depends on the status of the job:

" } }, "documentation":"

Provides a summary of the predictor backtest export job properties used in the ListPredictorBacktestExportJobs operation. To get a complete set of properties, call the DescribePredictorBacktestExportJob operation, and provide the listed PredictorBacktestExportJobArn.

" @@ -2291,7 +2306,7 @@ }, "Status":{ "shape":"Status", - "documentation":"

The status of the predictor. States include:

The Status of the predictor must be ACTIVE before you can use the predictor to create a forecast.

" + "documentation":"

The status of the predictor. States include:

The Status of the predictor must be ACTIVE before you can use the predictor to create a forecast.

" }, "Message":{ "shape":"ErrorMessage", @@ -2303,7 +2318,7 @@ }, "LastModificationTime":{ "shape":"Timestamp", - "documentation":"

Initially, the same as CreationTime (status is CREATE_PENDING). Updated when training starts (status changed to CREATE_IN_PROGRESS), and when training is complete (status changed to ACTIVE) or fails (status changed to CREATE_FAILED).

" + "documentation":"

The last time the resource was modified. The timestamp depends on the status of the job:

" } }, "documentation":"

Provides a summary of the predictor properties that are used in the ListPredictors operation. To get the complete set of properties, call the DescribePredictor operation, and provide the listed PredictorArn.

" @@ -2443,6 +2458,16 @@ "type":"string", "max":256 }, + "StopResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that identifies the resource to stop. The supported ARNs are DatasetImportJobArn, PredictorArn, PredictorBacktestExportJobArn, ForecastArn, and ForecastExportJobArn.

" + } + } + }, "String":{ "type":"string", "max":256, diff --git a/botocore/data/gamelift/2015-10-01/service-2.json b/botocore/data/gamelift/2015-10-01/service-2.json index 3b533c0e..f25ff2b7 100644 --- a/botocore/data/gamelift/2015-10-01/service-2.json +++ b/botocore/data/gamelift/2015-10-01/service-2.json @@ -26,7 +26,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

Registers a player's acceptance or rejection of a proposed FlexMatch match. A matchmaking configuration may require player acceptance; if so, then matches built with that configuration cannot be completed unless all players accept the proposed match within a specified time limit.

When FlexMatch builds a match, all the matchmaking tickets involved in the proposed match are placed into status REQUIRES_ACCEPTANCE. This is a trigger for your game to get acceptance from all players in the ticket. Acceptances are only valid for tickets when they are in this status; all other acceptances result in an error.

To register acceptance, specify the ticket ID, a response, and one or more players. Once all players have registered acceptance, the matchmaking tickets advance to status PLACING, where a new game session is created for the match.

If any player rejects the match, or if acceptances are not received before a specified timeout, the proposed match is dropped. The matchmaking tickets are then handled in one of two ways: For tickets where one or more players rejected the match, the ticket status is returned to SEARCHING to find a new match. For tickets where one or more players failed to respond, the ticket status is set to CANCELLED, and processing is terminated. A new matchmaking request for these players can be submitted as needed.

Learn more

Add FlexMatch to a Game Client

FlexMatch Events Reference

Related operations

" + "documentation":"

Registers a player's acceptance or rejection of a proposed FlexMatch match. A matchmaking configuration may require player acceptance; if so, then matches built with that configuration cannot be completed unless all players accept the proposed match within a specified time limit.

When FlexMatch builds a match, all the matchmaking tickets involved in the proposed match are placed into status REQUIRES_ACCEPTANCE. This is a trigger for your game to get acceptance from all players in the ticket. Acceptances are only valid for tickets when they are in this status; all other acceptances result in an error.

To register acceptance, specify the ticket ID, a response, and one or more players. Once all players have registered acceptance, the matchmaking tickets advance to status PLACING, where a new game session is created for the match.

If any player rejects the match, or if acceptances are not received before a specified timeout, the proposed match is dropped. The matchmaking tickets are then handled in one of two ways: For tickets where one or more players rejected the match, the ticket status is returned to SEARCHING to find a new match. For tickets where one or more players failed to respond, the ticket status is set to CANCELLED, and processing is terminated. A new matchmaking request for these players can be submitted as needed.

Learn more

Add FlexMatch to a game client

FlexMatch events (reference)

Related actions

StartMatchmaking | DescribeMatchmaking | StopMatchmaking | AcceptMatch | StartMatchBackfill | All APIs by task

" }, "ClaimGameServer":{ "name":"ClaimGameServer", @@ -44,7 +44,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

Locates an available game server and temporarily reserves it to host gameplay and players. This operation is called from a game client or client service (such as a matchmaker) to request hosting resources for a new game session. In response, GameLift FleetIQ locates an available game server, places it in CLAIMED status for 60 seconds, and returns connection information that players can use to connect to the game server.

To claim a game server, identify a game server group. You can also specify a game server ID, although this approach bypasses GameLift FleetIQ placement optimization. Optionally, include game data to pass to the game server at the start of a game session, such as a game map or player information.

When a game server is successfully claimed, connection information is returned. A claimed game server's utilization status remains AVAILABLE while the claim status is set to CLAIMED for up to 60 seconds. This time period gives the game server time to update its status to UTILIZED (using UpdateGameServer) once players join. If the game server's status is not updated within 60 seconds, the game server reverts to unclaimed status and is available to be claimed by another request. The claim time period is a fixed value and is not configurable.

If you try to claim a specific game server, this request will fail in the following cases:

When claiming a specific game server, this request will succeed even if the game server is running on an instance in DRAINING status. To avoid this, first check the instance status by calling DescribeGameServerInstances.

Learn more

GameLift FleetIQ Guide

Related operations

" + "documentation":"

This operation is used with the GameLift FleetIQ solution and game server groups.

Locates an available game server and temporarily reserves it to host gameplay and players. This operation is called from a game client or client service (such as a matchmaker) to request hosting resources for a new game session. In response, GameLift FleetIQ locates an available game server, places it in CLAIMED status for 60 seconds, and returns connection information that players can use to connect to the game server.

To claim a game server, identify a game server group. You can also specify a game server ID, although this approach bypasses GameLift FleetIQ placement optimization. Optionally, include game data to pass to the game server at the start of a game session, such as a game map or player information.

When a game server is successfully claimed, connection information is returned. A claimed game server's utilization status remains AVAILABLE while the claim status is set to CLAIMED for up to 60 seconds. This time period gives the game server time to update its status to UTILIZED (using UpdateGameServer) once players join. If the game server's status is not updated within 60 seconds, the game server reverts to unclaimed status and is available to be claimed by another request. The claim time period is a fixed value and is not configurable.

If you try to claim a specific game server, this request will fail in the following cases:

When claiming a specific game server, this request will succeed even if the game server is running on an instance in DRAINING status. To avoid this, first check the instance status by calling DescribeGameServerInstances.

Learn more

GameLift FleetIQ Guide

Related actions

RegisterGameServer | ListGameServers | ClaimGameServer | DescribeGameServer | UpdateGameServer | DeregisterGameServer | All APIs by task

" }, "CreateAlias":{ "name":"CreateAlias", @@ -62,7 +62,7 @@ {"shape":"LimitExceededException"}, {"shape":"TaggingFailedException"} ], - "documentation":"

Creates an alias for a fleet. In most situations, you can use an alias ID in place of a fleet ID. An alias provides a level of abstraction for a fleet that is useful when redirecting player traffic from one fleet to another, such as when updating your game build.

Amazon GameLift supports two types of routing strategies for aliases: simple and terminal. A simple alias points to an active fleet. A terminal alias is used to display messaging or link to a URL instead of routing players to an active fleet. For example, you might use a terminal alias when a game version is no longer supported and you want to direct players to an upgrade site.

To create a fleet alias, specify an alias name, routing strategy, and optional description. Each simple alias can point to only one fleet, but a fleet can have multiple aliases. If successful, a new alias record is returned, including an alias ID and an ARN. You can reassign an alias to another fleet by calling UpdateAlias.

" + "documentation":"

Creates an alias for a fleet. In most situations, you can use an alias ID in place of a fleet ID. An alias provides a level of abstraction for a fleet that is useful when redirecting player traffic from one fleet to another, such as when updating your game build.

Amazon GameLift supports two types of routing strategies for aliases: simple and terminal. A simple alias points to an active fleet. A terminal alias is used to display messaging or link to a URL instead of routing players to an active fleet. For example, you might use a terminal alias when a game version is no longer supported and you want to direct players to an upgrade site.

To create a fleet alias, specify an alias name, routing strategy, and optional description. Each simple alias can point to only one fleet, but a fleet can have multiple aliases. If successful, a new alias record is returned, including an alias ID and an ARN. You can reassign an alias to another fleet by calling UpdateAlias.

Related actions

CreateAlias | ListAliases | DescribeAlias | UpdateAlias | DeleteAlias | ResolveAlias | All APIs by task

" }, "CreateBuild":{ "name":"CreateBuild", @@ -79,7 +79,7 @@ {"shape":"TaggingFailedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Creates a new Amazon GameLift build resource for your game server binary files. Game server binaries must be combined into a zip file for use with Amazon GameLift.

When setting up a new game build for GameLift, we recommend using the AWS CLI command upload-build . This helper command combines two tasks: (1) it uploads your build files from a file directory to a GameLift Amazon S3 location, and (2) it creates a new build resource.

The CreateBuild operation can used in the following scenarios:

If successful, this operation creates a new build resource with a unique build ID and places it in INITIALIZED status. A build must be in READY status before you can create fleets with it.

Learn more

Uploading Your Game

Create a Build with Files in Amazon S3

Related operations

" + "documentation":"

Creates a new Amazon GameLift build resource for your game server binary files. Game server binaries must be combined into a zip file for use with Amazon GameLift.

When setting up a new game build for GameLift, we recommend using the AWS CLI command upload-build . This helper command combines two tasks: (1) it uploads your build files from a file directory to a GameLift Amazon S3 location, and (2) it creates a new build resource.

The CreateBuild operation can used in the following scenarios:

If successful, this operation creates a new build resource with a unique build ID and places it in INITIALIZED status. A build must be in READY status before you can create fleets with it.

Learn more

Uploading Your Game

Create a Build with Files in Amazon S3

Related actions

CreateBuild | ListBuilds | DescribeBuild | UpdateBuild | DeleteBuild | All APIs by task

" }, "CreateFleet":{ "name":"CreateFleet", @@ -98,7 +98,24 @@ {"shape":"UnauthorizedException"}, {"shape":"TaggingFailedException"} ], - "documentation":"

Creates a new fleet to run your game servers. whether they are custom game builds or Realtime Servers with game-specific script. A fleet is a set of Amazon Elastic Compute Cloud (Amazon EC2) instances, each of which can host multiple game sessions. When creating a fleet, you choose the hardware specifications, set some configuration options, and specify the game server to deploy on the new fleet.

To create a new fleet, provide the following: (1) a fleet name, (2) an EC2 instance type and fleet type (spot or on-demand), (3) the build ID for your game build or script ID if using Realtime Servers, and (4) a runtime configuration, which determines how game servers will run on each instance in the fleet.

If the CreateFleet call is successful, Amazon GameLift performs the following tasks. You can track the process of a fleet by checking the fleet status or by monitoring fleet creation events:

Learn more

Setting Up Fleets

Debug Fleet Creation Issues

Related operations

" + "documentation":"

Creates a fleet of Amazon Elastic Compute Cloud (Amazon EC2) instances to host your custom game server or Realtime Servers. Use this operation to configure the computing resources for your fleet and provide instructions for running game servers on each instance.

Most GameLift fleets can deploy instances to multiple locations, including the home Region (where the fleet is created) and an optional set of remote locations. Fleets that are created in the following AWS Regions support multiple locations: us-east-1 (N. Virginia), us-west-2 (Oregon), eu-central-1 (Frankfurt), eu-west-1 (Ireland), ap-southeast-2 (Sydney), ap-northeast-1 (Tokyo), and ap-northeast-2 (Seoul). Fleets that are created in other GameLift Regions can deploy instances in the fleet's home Region only. All fleet instances use the same configuration regardless of location; however, you can adjust capacity settings and turn auto-scaling on/off for each location.

To create a fleet, choose the hardware for your instances, specify a game server build or Realtime script to deploy, and provide a runtime configuration to direct GameLift how to start and run game servers on each instance in the fleet. Set permissions for inbound traffic to your game servers, and enable optional features as needed. When creating a multi-location fleet, provide a list of additional remote locations.

If successful, this operation creates a new Fleet resource and places it in NEW status, which prompts GameLift to initiate the fleet creation workflow. You can track fleet creation by checking fleet status using DescribeFleetAttributes and DescribeFleetLocationAttributes/, or by monitoring fleet creation events using DescribeFleetEvents. As soon as the fleet status changes to ACTIVE, you can enable automatic scaling for the fleet with PutScalingPolicy and set capacity for the home Region with UpdateFleetCapacity. When the status of each remote location reaches ACTIVE, you can set capacity by location using UpdateFleetCapacity.

Learn more

Setting up fleets

Debug fleet creation issues

Multi-location fleets

Related actions

CreateFleet | UpdateFleetCapacity | PutScalingPolicy | DescribeEC2InstanceLimits | DescribeFleetAttributes | DescribeFleetLocationAttributes | UpdateFleetAttributes | StopFleetActions | DeleteFleet | All APIs by task

" + }, + "CreateFleetLocations":{ + "name":"CreateFleetLocations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateFleetLocationsInput"}, + "output":{"shape":"CreateFleetLocationsOutput"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"}, + {"shape":"InvalidFleetStatusException"} + ], + "documentation":"

Adds remote locations to a fleet and begins populating the new locations with EC2 instances. The new instances conform to the fleet's instance type, auto-scaling, and other configuration settings.

This operation cannot be used with fleets that don't support remote locations. Fleets can have multiple locations only if they reside in AWS Regions that support this feature (see CreateFleet for the complete list) and were created after the feature was released in March 2021.

To add fleet locations, specify the fleet to be updated and provide a list of one or more locations.

If successful, this operation returns the list of added locations with their status set to NEW. GameLift initiates the process of starting an instance in each added location. You can track the status of each new location by monitoring location creation events using DescribeFleetEvents. Alternatively, you can poll location status by calling DescribeFleetLocationAttributes. After a location status becomes ACTIVE, you can adjust the location's capacity as needed with UpdateFleetCapacity.

Learn more

Setting up fleets

Multi-location fleets

Related actions

CreateFleetLocations | DescribeFleetLocationAttributes | DescribeFleetLocationCapacity | DescribeFleetLocationUtilization | DescribeFleetAttributes | DescribeFleetCapacity | DescribeFleetUtilization | UpdateFleetCapacity | StopFleetActions | DeleteFleetLocations | All APIs by task

" }, "CreateGameServerGroup":{ "name":"CreateGameServerGroup", @@ -115,7 +132,7 @@ {"shape":"InternalServiceException"}, {"shape":"LimitExceededException"} ], - "documentation":"

This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

Creates a GameLift FleetIQ game server group for managing game hosting on a collection of Amazon EC2 instances for game hosting. This operation creates the game server group, creates an Auto Scaling group in your AWS account, and establishes a link between the two groups. You can view the status of your game server groups in the GameLift console. Game server group metrics and events are emitted to Amazon CloudWatch.

Before creating a new game server group, you must have the following:

To create a new game server group, specify a unique group name, IAM role and Amazon EC2 launch template, and provide a list of instance types that can be used in the group. You must also set initial maximum and minimum limits on the group's instance count. You can optionally set an Auto Scaling policy with target tracking based on a GameLift FleetIQ metric.

Once the game server group and corresponding Auto Scaling group are created, you have full access to change the Auto Scaling group's configuration as needed. Several properties that are set when creating a game server group, including maximum/minimum size and auto-scaling policy settings, must be updated directly in the Auto Scaling group. Keep in mind that some Auto Scaling group properties are periodically updated by GameLift FleetIQ as part of its balancing activities to optimize for availability and cost.

Learn more

GameLift FleetIQ Guide

Related operations

" + "documentation":"

This operation is used with the GameLift FleetIQ solution and game server groups.

Creates a GameLift FleetIQ game server group for managing game hosting on a collection of Amazon EC2 instances for game hosting. This operation creates the game server group, creates an Auto Scaling group in your AWS account, and establishes a link between the two groups. You can view the status of your game server groups in the GameLift console. Game server group metrics and events are emitted to Amazon CloudWatch.

Before creating a new game server group, you must have the following:

To create a new game server group, specify a unique group name, IAM role and Amazon EC2 launch template, and provide a list of instance types that can be used in the group. You must also set initial maximum and minimum limits on the group's instance count. You can optionally set an Auto Scaling policy with target tracking based on a GameLift FleetIQ metric.

Once the game server group and corresponding Auto Scaling group are created, you have full access to change the Auto Scaling group's configuration as needed. Several properties that are set when creating a game server group, including maximum/minimum size and auto-scaling policy settings, must be updated directly in the Auto Scaling group. Keep in mind that some Auto Scaling group properties are periodically updated by GameLift FleetIQ as part of its balancing activities to optimize for availability and cost.

Learn more

GameLift FleetIQ Guide

Related actions

CreateGameServerGroup | ListGameServerGroups | DescribeGameServerGroup | UpdateGameServerGroup | DeleteGameServerGroup | ResumeGameServerGroup | SuspendGameServerGroup | DescribeGameServerInstances | All APIs by task

" }, "CreateGameSession":{ "name":"CreateGameSession", @@ -137,7 +154,7 @@ {"shape":"LimitExceededException"}, {"shape":"IdempotentParameterMismatchException"} ], - "documentation":"

Creates a multiplayer game session for players. This operation creates a game session record and assigns an available server process in the specified fleet to host the game session. A fleet must have an ACTIVE status before a game session can be created in it.

To create a game session, specify either fleet ID or alias ID and indicate a maximum number of players to allow in the game session. You can also provide a name and game-specific properties for this game session. If successful, a GameSession object is returned containing the game session properties and other settings you specified.

Idempotency tokens. You can add a token that uniquely identifies game session requests. This is useful for ensuring that game session requests are idempotent. Multiple requests with the same idempotency token are processed only once; subsequent requests return the original result. All response values are the same with the exception of game session status, which may change.

Resource creation limits. If you are creating a game session on a fleet with a resource creation limit policy in force, then you must specify a creator ID. Without this ID, Amazon GameLift has no way to evaluate the policy for this new game session request.

Player acceptance policy. By default, newly created game sessions are open to new players. You can restrict new player access by using UpdateGameSession to change the game session's player session creation policy.

Game session logs. Logs are retained for all active game sessions for 14 days. To access the logs, call GetGameSessionLogUrl to download the log files.

Available in Amazon GameLift Local.

" + "documentation":"

Creates a multiplayer game session for players in a specific fleet location. This operation prompts an available server process to start a game session and retrieves connection information for the new game session. As an alternative, consider using the GameLift game session placement feature with

with StartGameSessionPlacement, which uses FleetIQ algorithms and queues to optimize the placement process.

When creating a game session, you specify exactly where you want to place it and provide a set of game session configuration settings. The fleet must be in ACTIVE status before a game session can be created in it.

This operation can be used in the following ways:

If successful, a workflow is initiated to start a new game session. A GameSession object is returned containing the game session configuration and status. When the status is ACTIVE, game session connection information is provided and player sessions can be created for the game session. By default, newly created game sessions are open to new players. You can restrict new player access by using UpdateGameSession to change the game session's player session creation policy.

Game session logs are retained for all active game sessions for 14 days. To access the logs, call GetGameSessionLogUrl to download the log files.

Available in GameLift Local.

Learn more

Start a game session

Related actions

CreateGameSession | DescribeGameSessions | DescribeGameSessionDetails | SearchGameSessions | UpdateGameSession | GetGameSessionLogUrl | StartGameSessionPlacement | DescribeGameSessionPlacement | StopGameSessionPlacement | All APIs by task

" }, "CreateGameSessionQueue":{ "name":"CreateGameSessionQueue", @@ -152,9 +169,10 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"}, {"shape":"LimitExceededException"}, + {"shape":"NotFoundException"}, {"shape":"TaggingFailedException"} ], - "documentation":"

Establishes a new queue for processing requests to place new game sessions. A queue identifies where new game sessions can be hosted -- by specifying a list of destinations (fleets or aliases) -- and how long requests can wait in the queue before timing out. You can set up a queue to try to place game sessions on fleets in multiple Regions. To add placement requests to a queue, call StartGameSessionPlacement and reference the queue name.

Destination order. When processing a request for a game session, Amazon GameLift tries each destination in order until it finds one with available resources to host the new game session. A queue's default order is determined by how destinations are listed. The default order is overridden when a game session placement request provides player latency information. Player latency information enables Amazon GameLift to prioritize destinations where players report the lowest average latency, as a result placing the new game session where the majority of players will have the best possible gameplay experience.

Player latency policies. For placement requests containing player latency information, use player latency policies to protect individual players from very high latencies. With a latency cap, even when a destination can deliver a low latency for most players, the game is not placed where any individual player is reporting latency higher than a policy's maximum. A queue can have multiple latency policies, which are enforced consecutively starting with the policy with the lowest latency cap. Use multiple policies to gradually relax latency controls; for example, you might set a policy with a low latency cap for the first 60 seconds, a second policy with a higher cap for the next 60 seconds, etc.

To create a new queue, provide a name, timeout value, a list of destinations and, if desired, a set of latency policies. If successful, a new queue object is returned.

Learn more

Design a Game Session Queue

Create a Game Session Queue

Related operations

" + "documentation":"

Creates a placement queue that processes requests for new game sessions. A queue uses FleetIQ algorithms to determine the best placement locations and find an available game server there, then prompts the game server process to start a new game session.

A game session queue is configured with a set of destinations (GameLift fleets or aliases), which determine the locations where the queue can place new game sessions. These destinations can span multiple fleet types (Spot and On-Demand), instance types, and AWS Regions. If the queue includes multi-location fleets, the queue is able to place game sessions in all of a fleet's remote locations. You can opt to filter out individual locations if needed.

The queue configuration also determines how FleetIQ selects the best available placement for a new game session. Before searching for an available game server, FleetIQ first prioritizes the queue's destinations and locations, with the best placement locations on top. You can set up the queue to use the FleetIQ default prioritization or provide an alternate set of priorities.

To create a new queue, provide a name, timeout value, and a list of destinations. Optionally, specify a sort configuration and/or a filter, and define a set of latency cap policies. You can also include the ARN for an Amazon Simple Notification Service (SNS) topic to receive notifications of game session placement activity. Notifications using SNS or CloudWatch events is the preferred way to track placement activity.

If successful, a new GameSessionQueue object is returned with an assigned queue ARN. New game session requests, which are submitted to the queue with StartGameSessionPlacement or StartMatchmaking, reference a queue's name or ARN.

Learn more

Design a game session queue

Create a game session queue

Related actions

CreateGameSessionQueue | DescribeGameSessionQueues | UpdateGameSessionQueue | DeleteGameSessionQueue | All APIs by task

" }, "CreateMatchmakingConfiguration":{ "name":"CreateMatchmakingConfiguration", @@ -172,7 +190,7 @@ {"shape":"UnsupportedRegionException"}, {"shape":"TaggingFailedException"} ], - "documentation":"

Defines a new matchmaking configuration for use with FlexMatch. Whether your are using FlexMatch with GameLift hosting or as a standalone matchmaking service, the matchmaking configuration sets out rules for matching players and forming teams. If you're also using GameLift hosting, it defines how to start game sessions for each match. Your matchmaking system can use multiple configurations to handle different game scenarios. All matchmaking requests (StartMatchmaking or StartMatchBackfill) identify the matchmaking configuration to use and provide player attributes consistent with that configuration.

To create a matchmaking configuration, you must provide the following: configuration name and FlexMatch mode (with or without GameLift hosting); a rule set that specifies how to evaluate players and find acceptable matches; whether player acceptance is required; and the maximum time allowed for a matchmaking attempt. When using FlexMatch with GameLift hosting, you also need to identify the game session queue to use when starting a game session for the match.

In addition, you must set up an Amazon Simple Notification Service (SNS) to receive matchmaking notifications, and provide the topic ARN in the matchmaking configuration. An alternative method, continuously polling ticket status with DescribeMatchmaking, is only suitable for games in development with low matchmaking usage.

Learn more

FlexMatch Developer Guide

Design a FlexMatch Matchmaker

Set Up FlexMatch Event Notification

Related operations

" + "documentation":"

Defines a new matchmaking configuration for use with FlexMatch. Whether your are using FlexMatch with GameLift hosting or as a standalone matchmaking service, the matchmaking configuration sets out rules for matching players and forming teams. If you're also using GameLift hosting, it defines how to start game sessions for each match. Your matchmaking system can use multiple configurations to handle different game scenarios. All matchmaking requests (StartMatchmaking or StartMatchBackfill) identify the matchmaking configuration to use and provide player attributes consistent with that configuration.

To create a matchmaking configuration, you must provide the following: configuration name and FlexMatch mode (with or without GameLift hosting); a rule set that specifies how to evaluate players and find acceptable matches; whether player acceptance is required; and the maximum time allowed for a matchmaking attempt. When using FlexMatch with GameLift hosting, you also need to identify the game session queue to use when starting a game session for the match.

In addition, you must set up an Amazon Simple Notification Service (SNS) topic to receive matchmaking notifications. Provide the topic ARN in the matchmaking configuration. An alternative method, continuously polling ticket status with DescribeMatchmaking, is only suitable for games in development with low matchmaking usage.

Learn more

Design a FlexMatch matchmaker

Set up FlexMatch event notification

Related actions

CreateMatchmakingConfiguration | DescribeMatchmakingConfigurations | UpdateMatchmakingConfiguration | DeleteMatchmakingConfiguration | CreateMatchmakingRuleSet | DescribeMatchmakingRuleSets | ValidateMatchmakingRuleSet | DeleteMatchmakingRuleSet | All APIs by task

" }, "CreateMatchmakingRuleSet":{ "name":"CreateMatchmakingRuleSet", @@ -188,7 +206,7 @@ {"shape":"UnsupportedRegionException"}, {"shape":"TaggingFailedException"} ], - "documentation":"

Creates a new rule set for FlexMatch matchmaking. A rule set describes the type of match to create, such as the number and size of teams. It also sets the parameters for acceptable player matches, such as minimum skill level or character type. A rule set is used by a MatchmakingConfiguration.

To create a matchmaking rule set, provide unique rule set name and the rule set body in JSON format. Rule sets must be defined in the same Region as the matchmaking configuration they are used with.

Since matchmaking rule sets cannot be edited, it is a good idea to check the rule set syntax using ValidateMatchmakingRuleSet before creating a new rule set.

Learn more

Related operations

" + "documentation":"

Creates a new rule set for FlexMatch matchmaking. A rule set describes the type of match to create, such as the number and size of teams. It also sets the parameters for acceptable player matches, such as minimum skill level or character type. A rule set is used by a MatchmakingConfiguration.

To create a matchmaking rule set, provide unique rule set name and the rule set body in JSON format. Rule sets must be defined in the same Region as the matchmaking configuration they are used with.

Since matchmaking rule sets cannot be edited, it is a good idea to check the rule set syntax using ValidateMatchmakingRuleSet before creating a new rule set.

Learn more

Related actions

CreateMatchmakingConfiguration | DescribeMatchmakingConfigurations | UpdateMatchmakingConfiguration | DeleteMatchmakingConfiguration | CreateMatchmakingRuleSet | DescribeMatchmakingRuleSets | ValidateMatchmakingRuleSet | DeleteMatchmakingRuleSet | All APIs by task

" }, "CreatePlayerSession":{ "name":"CreatePlayerSession", @@ -207,7 +225,7 @@ {"shape":"InvalidRequestException"}, {"shape":"NotFoundException"} ], - "documentation":"

Reserves an open player slot in an active game session. Before a player can be added, a game session must have an ACTIVE status, have a creation policy of ALLOW_ALL, and have an open player slot. To add a group of players to a game session, use CreatePlayerSessions. When the player connects to the game server and references a player session ID, the game server contacts the Amazon GameLift service to validate the player reservation and accept the player.

To create a player session, specify a game session ID, player ID, and optionally a string of player data. If successful, a slot is reserved in the game session for the player and a new PlayerSession object is returned. Player sessions cannot be updated.

Available in Amazon GameLift Local.

" + "documentation":"

Reserves an open player slot in a game session for a player. New player sessions can be created in any game session with an open slot that is in ACTIVE status and has a player creation policy of ACCEPT_ALL. You can add a group of players to a game session with CreatePlayerSessions.

To create a player session, specify a game session ID, player ID, and optionally a set of player data.

If successful, a slot is reserved in the game session for the player and a new PlayerSession object is returned with a player session ID. The player references the player session ID when sending a connection request to the game session, and the game server can use it to validate the player reservation with the GameLift service. Player sessions cannot be updated.

Available in Amazon GameLift Local.

Related actions

CreatePlayerSession | CreatePlayerSessions | DescribePlayerSessions | StartGameSessionPlacement | DescribeGameSessionPlacement | All APIs by task

" }, "CreatePlayerSessions":{ "name":"CreatePlayerSessions", @@ -226,7 +244,7 @@ {"shape":"InvalidRequestException"}, {"shape":"NotFoundException"} ], - "documentation":"

Reserves open slots in a game session for a group of players. Before players can be added, a game session must have an ACTIVE status, have a creation policy of ALLOW_ALL, and have an open player slot. To add a single player to a game session, use CreatePlayerSession. When a player connects to the game server and references a player session ID, the game server contacts the Amazon GameLift service to validate the player reservation and accept the player.

To create player sessions, specify a game session ID, a list of player IDs, and optionally a set of player data strings. If successful, a slot is reserved in the game session for each player and a set of new PlayerSession objects is returned. Player sessions cannot be updated.

Available in Amazon GameLift Local.

" + "documentation":"

Reserves open slots in a game session for a group of players. New player sessions can be created in any game session with an open slot that is in ACTIVE status and has a player creation policy of ACCEPT_ALL. To add a single player to a game session, use CreatePlayerSession.

To create player sessions, specify a game session ID and a list of player IDs. Optionally, provide a set of player data for each player ID.

If successful, a slot is reserved in the game session for each player, and new PlayerSession objects are returned with player session IDs. Each player references their player session ID when sending a connection request to the game session, and the game server can use it to validate the player reservation with the GameLift service. Player sessions cannot be updated.

Available in Amazon GameLift Local.

Related actions

CreatePlayerSession | CreatePlayerSessions | DescribePlayerSessions | StartGameSessionPlacement | DescribeGameSessionPlacement | All APIs by task

" }, "CreateScript":{ "name":"CreateScript", @@ -243,7 +261,7 @@ {"shape":"TaggingFailedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Creates a new script record for your Realtime Servers script. Realtime scripts are JavaScript that provide configuration settings and optional custom game logic for your game. The script is deployed when you create a Realtime Servers fleet to host your game sessions. Script logic is executed during an active game session.

To create a new script record, specify a script name and provide the script file(s). The script files and all dependencies must be zipped into a single file. You can pull the zip file from either of these locations:

If the call is successful, a new script record is created with a unique script ID. If the script file is provided as a local file, the file is uploaded to an Amazon GameLift-owned S3 bucket and the script record's storage location reflects this location. If the script file is provided as an S3 bucket, Amazon GameLift accesses the file at this storage location as needed for deployment.

Learn more

Amazon GameLift Realtime Servers

Set Up a Role for Amazon GameLift Access

Related operations

" + "documentation":"

Creates a new script record for your Realtime Servers script. Realtime scripts are JavaScript that provide configuration settings and optional custom game logic for your game. The script is deployed when you create a Realtime Servers fleet to host your game sessions. Script logic is executed during an active game session.

To create a new script record, specify a script name and provide the script file(s). The script files and all dependencies must be zipped into a single file. You can pull the zip file from either of these locations:

If the call is successful, a new script record is created with a unique script ID. If the script file is provided as a local file, the file is uploaded to an Amazon GameLift-owned S3 bucket and the script record's storage location reflects this location. If the script file is provided as an S3 bucket, Amazon GameLift accesses the file at this storage location as needed for deployment.

Learn more

Amazon GameLift Realtime Servers

Set Up a Role for Amazon GameLift Access

Related actions

CreateScript | ListScripts | DescribeScript | UpdateScript | DeleteScript | All APIs by task

" }, "CreateVpcPeeringAuthorization":{ "name":"CreateVpcPeeringAuthorization", @@ -259,7 +277,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Requests authorization to create or delete a peer connection between the VPC for your Amazon GameLift fleet and a virtual private cloud (VPC) in your AWS account. VPC peering enables the game servers on your fleet to communicate directly with other AWS resources. Once you've received authorization, call CreateVpcPeeringConnection to establish the peering connection. For more information, see VPC Peering with Amazon GameLift Fleets.

You can peer with VPCs that are owned by any AWS account you have access to, including the account that you use to manage your Amazon GameLift fleets. You cannot peer with VPCs that are in different Regions.

To request authorization to create a connection, call this operation from the AWS account with the VPC that you want to peer to your Amazon GameLift fleet. For example, to enable your game servers to retrieve data from a DynamoDB table, use the account that manages that DynamoDB resource. Identify the following values: (1) The ID of the VPC that you want to peer with, and (2) the ID of the AWS account that you use to manage Amazon GameLift. If successful, VPC peering is authorized for the specified VPC.

To request authorization to delete a connection, call this operation from the AWS account with the VPC that is peered with your Amazon GameLift fleet. Identify the following values: (1) VPC ID that you want to delete the peering connection for, and (2) ID of the AWS account that you use to manage Amazon GameLift.

The authorization remains valid for 24 hours unless it is canceled by a call to DeleteVpcPeeringAuthorization. You must create or delete the peering connection while the authorization is valid.

" + "documentation":"

Requests authorization to create or delete a peer connection between the VPC for your Amazon GameLift fleet and a virtual private cloud (VPC) in your AWS account. VPC peering enables the game servers on your fleet to communicate directly with other AWS resources. Once you've received authorization, call CreateVpcPeeringConnection to establish the peering connection. For more information, see VPC Peering with Amazon GameLift Fleets.

You can peer with VPCs that are owned by any AWS account you have access to, including the account that you use to manage your Amazon GameLift fleets. You cannot peer with VPCs that are in different Regions.

To request authorization to create a connection, call this operation from the AWS account with the VPC that you want to peer to your Amazon GameLift fleet. For example, to enable your game servers to retrieve data from a DynamoDB table, use the account that manages that DynamoDB resource. Identify the following values: (1) The ID of the VPC that you want to peer with, and (2) the ID of the AWS account that you use to manage Amazon GameLift. If successful, VPC peering is authorized for the specified VPC.

To request authorization to delete a connection, call this operation from the AWS account with the VPC that is peered with your Amazon GameLift fleet. Identify the following values: (1) VPC ID that you want to delete the peering connection for, and (2) ID of the AWS account that you use to manage Amazon GameLift.

The authorization remains valid for 24 hours unless it is canceled by a call to DeleteVpcPeeringAuthorization. You must create or delete the peering connection while the authorization is valid.

Related actions

CreateVpcPeeringAuthorization | DescribeVpcPeeringAuthorizations | DeleteVpcPeeringAuthorization | CreateVpcPeeringConnection | DescribeVpcPeeringConnections | DeleteVpcPeeringConnection | All APIs by task

" }, "CreateVpcPeeringConnection":{ "name":"CreateVpcPeeringConnection", @@ -275,7 +293,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Establishes a VPC peering connection between a virtual private cloud (VPC) in an AWS account with the VPC for your Amazon GameLift fleet. VPC peering enables the game servers on your fleet to communicate directly with other AWS resources. You can peer with VPCs in any AWS account that you have access to, including the account that you use to manage your Amazon GameLift fleets. You cannot peer with VPCs that are in different Regions. For more information, see VPC Peering with Amazon GameLift Fleets.

Before calling this operation to establish the peering connection, you first need to call CreateVpcPeeringAuthorization and identify the VPC you want to peer with. Once the authorization for the specified VPC is issued, you have 24 hours to establish the connection. These two operations handle all tasks necessary to peer the two VPCs, including acceptance, updating routing tables, etc.

To establish the connection, call this operation from the AWS account that is used to manage the Amazon GameLift fleets. Identify the following values: (1) The ID of the fleet you want to be enable a VPC peering connection for; (2) The AWS account with the VPC that you want to peer with; and (3) The ID of the VPC you want to peer with. This operation is asynchronous. If successful, a VpcPeeringConnection request is created. You can use continuous polling to track the request's status using DescribeVpcPeeringConnections, or by monitoring fleet events for success or failure using DescribeFleetEvents.

" + "documentation":"

Establishes a VPC peering connection between a virtual private cloud (VPC) in an AWS account with the VPC for your Amazon GameLift fleet. VPC peering enables the game servers on your fleet to communicate directly with other AWS resources. You can peer with VPCs in any AWS account that you have access to, including the account that you use to manage your Amazon GameLift fleets. You cannot peer with VPCs that are in different Regions. For more information, see VPC Peering with Amazon GameLift Fleets.

Before calling this operation to establish the peering connection, you first need to call CreateVpcPeeringAuthorization and identify the VPC you want to peer with. Once the authorization for the specified VPC is issued, you have 24 hours to establish the connection. These two operations handle all tasks necessary to peer the two VPCs, including acceptance, updating routing tables, etc.

To establish the connection, call this operation from the AWS account that is used to manage the Amazon GameLift fleets. Identify the following values: (1) The ID of the fleet you want to be enable a VPC peering connection for; (2) The AWS account with the VPC that you want to peer with; and (3) The ID of the VPC you want to peer with. This operation is asynchronous. If successful, a VpcPeeringConnection request is created. You can use continuous polling to track the request's status using DescribeVpcPeeringConnections, or by monitoring fleet events for success or failure using DescribeFleetEvents.

Related actions

CreateVpcPeeringAuthorization | DescribeVpcPeeringAuthorizations | DeleteVpcPeeringAuthorization | CreateVpcPeeringConnection | DescribeVpcPeeringConnections | DeleteVpcPeeringConnection | All APIs by task

" }, "DeleteAlias":{ "name":"DeleteAlias", @@ -291,7 +309,7 @@ {"shape":"TaggingFailedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Deletes an alias. This operation removes all record of the alias. Game clients attempting to access a server process using the deleted alias receive an error. To delete an alias, specify the alias ID to be deleted.

" + "documentation":"

Deletes an alias. This operation removes all record of the alias. Game clients attempting to access a server process using the deleted alias receive an error. To delete an alias, specify the alias ID to be deleted.

Related actions

CreateAlias | ListAliases | DescribeAlias | UpdateAlias | DeleteAlias | ResolveAlias | All APIs by task

" }, "DeleteBuild":{ "name":"DeleteBuild", @@ -307,7 +325,7 @@ {"shape":"TaggingFailedException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Deletes a build. This operation permanently deletes the build resource and any uploaded build files. Deleting a build does not affect the status of any active fleets using the build, but you can no longer create new fleets with the deleted build.

To delete a build, specify the build ID.

Learn more

Upload a Custom Server Build

Related operations

" + "documentation":"

Deletes a build. This operation permanently deletes the build resource and any uploaded build files. Deleting a build does not affect the status of any active fleets using the build, but you can no longer create new fleets with the deleted build.

To delete a build, specify the build ID.

Learn more

Upload a Custom Server Build

Related actions

CreateBuild | ListBuilds | DescribeBuild | UpdateBuild | DeleteBuild | All APIs by task

" }, "DeleteFleet":{ "name":"DeleteFleet", @@ -324,7 +342,23 @@ {"shape":"InvalidRequestException"}, {"shape":"TaggingFailedException"} ], - "documentation":"

Deletes everything related to a fleet. Before deleting a fleet, you must set the fleet's desired capacity to zero. See UpdateFleetCapacity.

If the fleet being deleted has a VPC peering connection, you first need to get a valid authorization (good for 24 hours) by calling CreateVpcPeeringAuthorization. You do not need to explicitly delete the VPC peering connection--this is done as part of the delete fleet process.

This operation removes the fleet and its resources. Once a fleet is deleted, you can no longer use any of the resource in that fleet.

Learn more

Setting up GameLift Fleets

Related operations

" + "documentation":"

Deletes all resources and information related a fleet. Any current fleet instances, including those in remote locations, are shut down. You don't need to call DeleteFleetLocations separately.

If the fleet being deleted has a VPC peering connection, you first need to get a valid authorization (good for 24 hours) by calling CreateVpcPeeringAuthorization. You do not need to explicitly delete the VPC peering connection--this is done as part of the delete fleet process.

To delete a fleet, specify the fleet ID to be terminated. During the deletion process the fleet status is changed to DELETING. When completed, the status switches to TERMINATED and the fleet event FLEET_DELETED is sent.

Learn more

Setting up GameLift Fleets

Related actions

CreateFleetLocations | UpdateFleetAttributes | UpdateFleetCapacity | UpdateFleetPortSettings | UpdateRuntimeConfiguration | StopFleetActions | StartFleetActions | PutScalingPolicy | DeleteFleet | DeleteFleetLocations | DeleteScalingPolicy | All APIs by task

" + }, + "DeleteFleetLocations":{ + "name":"DeleteFleetLocations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteFleetLocationsInput"}, + "output":{"shape":"DeleteFleetLocationsOutput"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

Removes locations from a multi-location fleet. When deleting a location, all game server process and all instances that are still active in the location are shut down.

To delete fleet locations, identify the fleet ID and provide a list of the locations to be deleted.

If successful, GameLift sets the location status to DELETING, and begins to shut down existing server processes and terminate instances in each location being deleted. When completed, the location status changes to TERMINATED.

Learn more

Setting up GameLift fleets

Related actions

CreateFleetLocations | DescribeFleetLocationAttributes | DescribeFleetLocationCapacity | DescribeFleetLocationUtilization | DescribeFleetAttributes | DescribeFleetCapacity | DescribeFleetUtilization | UpdateFleetCapacity | StopFleetActions | DeleteFleetLocations | All APIs by task

" }, "DeleteGameServerGroup":{ "name":"DeleteGameServerGroup", @@ -340,7 +374,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

Terminates a game server group and permanently deletes the game server group record. You have several options for how these resources are impacted when deleting the game server group. Depending on the type of delete operation selected, this operation might affect these resources:

To delete a game server group, identify the game server group to delete and specify the type of delete operation to initiate. Game server groups can only be deleted if they are in ACTIVE or ERROR status.

If the delete request is successful, a series of operations are kicked off. The game server group status is changed to DELETE_SCHEDULED, which prevents new game servers from being registered and stops automatic scaling activity. Once all game servers in the game server group are deregistered, GameLift FleetIQ can begin deleting resources. If any of the delete operations fail, the game server group is placed in ERROR status.

GameLift FleetIQ emits delete events to Amazon CloudWatch.

Learn more

GameLift FleetIQ Guide

Related operations

" + "documentation":"

This operation is used with the GameLift FleetIQ solution and game server groups.

Terminates a game server group and permanently deletes the game server group record. You have several options for how these resources are impacted when deleting the game server group. Depending on the type of delete operation selected, this operation might affect these resources:

To delete a game server group, identify the game server group to delete and specify the type of delete operation to initiate. Game server groups can only be deleted if they are in ACTIVE or ERROR status.

If the delete request is successful, a series of operations are kicked off. The game server group status is changed to DELETE_SCHEDULED, which prevents new game servers from being registered and stops automatic scaling activity. Once all game servers in the game server group are deregistered, GameLift FleetIQ can begin deleting resources. If any of the delete operations fail, the game server group is placed in ERROR status.

GameLift FleetIQ emits delete events to Amazon CloudWatch.

Learn more

GameLift FleetIQ Guide

Related actions

CreateGameServerGroup | ListGameServerGroups | DescribeGameServerGroup | UpdateGameServerGroup | DeleteGameServerGroup | ResumeGameServerGroup | SuspendGameServerGroup | DescribeGameServerInstances | All APIs by task

" }, "DeleteGameSessionQueue":{ "name":"DeleteGameSessionQueue", @@ -357,7 +391,7 @@ {"shape":"UnauthorizedException"}, {"shape":"TaggingFailedException"} ], - "documentation":"

Deletes a game session queue. Once a queue is successfully deleted, unfulfilled StartGameSessionPlacement requests that reference the queue will fail. To delete a queue, specify the queue name.

Learn more

Using Multi-Region Queues

Related operations

" + "documentation":"

Deletes a game session queue. Once a queue is successfully deleted, unfulfilled StartGameSessionPlacement requests that reference the queue will fail. To delete a queue, specify the queue name.

Learn more

Using Multi-Region Queues

Related actions

CreateGameSessionQueue | DescribeGameSessionQueues | UpdateGameSessionQueue | DeleteGameSessionQueue | All APIs by task

" }, "DeleteMatchmakingConfiguration":{ "name":"DeleteMatchmakingConfiguration", @@ -374,7 +408,7 @@ {"shape":"UnsupportedRegionException"}, {"shape":"TaggingFailedException"} ], - "documentation":"

Permanently removes a FlexMatch matchmaking configuration. To delete, specify the configuration name. A matchmaking configuration cannot be deleted if it is being used in any active matchmaking tickets.

Related operations

" + "documentation":"

Permanently removes a FlexMatch matchmaking configuration. To delete, specify the configuration name. A matchmaking configuration cannot be deleted if it is being used in any active matchmaking tickets.

Related actions

CreateMatchmakingConfiguration | DescribeMatchmakingConfigurations | UpdateMatchmakingConfiguration | DeleteMatchmakingConfiguration | CreateMatchmakingRuleSet | DescribeMatchmakingRuleSets | ValidateMatchmakingRuleSet | DeleteMatchmakingRuleSet | All APIs by task

" }, "DeleteMatchmakingRuleSet":{ "name":"DeleteMatchmakingRuleSet", @@ -391,7 +425,7 @@ {"shape":"NotFoundException"}, {"shape":"TaggingFailedException"} ], - "documentation":"

Deletes an existing matchmaking rule set. To delete the rule set, provide the rule set name. Rule sets cannot be deleted if they are currently being used by a matchmaking configuration.

Learn more

Related operations

" + "documentation":"

Deletes an existing matchmaking rule set. To delete the rule set, provide the rule set name. Rule sets cannot be deleted if they are currently being used by a matchmaking configuration.

Learn more

Related actions

CreateMatchmakingConfiguration | DescribeMatchmakingConfigurations | UpdateMatchmakingConfiguration | DeleteMatchmakingConfiguration | CreateMatchmakingRuleSet | DescribeMatchmakingRuleSets | ValidateMatchmakingRuleSet | DeleteMatchmakingRuleSet | All APIs by task

" }, "DeleteScalingPolicy":{ "name":"DeleteScalingPolicy", @@ -406,7 +440,7 @@ {"shape":"UnauthorizedException"}, {"shape":"NotFoundException"} ], - "documentation":"

Deletes a fleet scaling policy. Once deleted, the policy is no longer in force and GameLift removes all record of it. To delete a scaling policy, specify both the scaling policy name and the fleet ID it is associated with.

To temporarily suspend scaling policies, call StopFleetActions. This operation suspends all policies for the fleet.

" + "documentation":"

Deletes a fleet scaling policy. Once deleted, the policy is no longer in force and GameLift removes all record of it. To delete a scaling policy, specify both the scaling policy name and the fleet ID it is associated with.

To temporarily suspend scaling policies, call StopFleetActions. This operation suspends all policies for the fleet.

Related actions

DescribeFleetCapacity | UpdateFleetCapacity | DescribeEC2InstanceLimits | PutScalingPolicy | DescribeScalingPolicies | DeleteScalingPolicy | StopFleetActions | StartFleetActions | All APIs by task

" }, "DeleteScript":{ "name":"DeleteScript", @@ -422,7 +456,7 @@ {"shape":"TaggingFailedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Deletes a Realtime script. This operation permanently deletes the script record. If script files were uploaded, they are also deleted (files stored in an S3 bucket are not deleted).

To delete a script, specify the script ID. Before deleting a script, be sure to terminate all fleets that are deployed with the script being deleted. Fleet instances periodically check for script updates, and if the script record no longer exists, the instance will go into an error state and be unable to host game sessions.

Learn more

Amazon GameLift Realtime Servers

Related operations

" + "documentation":"

Deletes a Realtime script. This operation permanently deletes the script record. If script files were uploaded, they are also deleted (files stored in an S3 bucket are not deleted).

To delete a script, specify the script ID. Before deleting a script, be sure to terminate all fleets that are deployed with the script being deleted. Fleet instances periodically check for script updates, and if the script record no longer exists, the instance will go into an error state and be unable to host game sessions.

Learn more

Amazon GameLift Realtime Servers

Related actions

CreateScript | ListScripts | DescribeScript | UpdateScript | DeleteScript | All APIs by task

" }, "DeleteVpcPeeringAuthorization":{ "name":"DeleteVpcPeeringAuthorization", @@ -438,7 +472,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Cancels a pending VPC peering authorization for the specified VPC. If you need to delete an existing VPC peering connection, call DeleteVpcPeeringConnection.

" + "documentation":"

Cancels a pending VPC peering authorization for the specified VPC. If you need to delete an existing VPC peering connection, call DeleteVpcPeeringConnection.

Related actions

CreateVpcPeeringAuthorization | DescribeVpcPeeringAuthorizations | DeleteVpcPeeringAuthorization | CreateVpcPeeringConnection | DescribeVpcPeeringConnections | DeleteVpcPeeringConnection | All APIs by task

" }, "DeleteVpcPeeringConnection":{ "name":"DeleteVpcPeeringConnection", @@ -454,7 +488,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Removes a VPC peering connection. To delete the connection, you must have a valid authorization for the VPC peering connection that you want to delete. You can check for an authorization by calling DescribeVpcPeeringAuthorizations or request a new one using CreateVpcPeeringAuthorization.

Once a valid authorization exists, call this operation from the AWS account that is used to manage the Amazon GameLift fleets. Identify the connection to delete by the connection ID and fleet ID. If successful, the connection is removed.

" + "documentation":"

Removes a VPC peering connection. To delete the connection, you must have a valid authorization for the VPC peering connection that you want to delete. You can check for an authorization by calling DescribeVpcPeeringAuthorizations or request a new one using CreateVpcPeeringAuthorization.

Once a valid authorization exists, call this operation from the AWS account that is used to manage the Amazon GameLift fleets. Identify the connection to delete by the connection ID and fleet ID. If successful, the connection is removed.

Related actions

CreateVpcPeeringAuthorization | DescribeVpcPeeringAuthorizations | DeleteVpcPeeringAuthorization | CreateVpcPeeringConnection | DescribeVpcPeeringConnections | DeleteVpcPeeringConnection | All APIs by task

" }, "DeregisterGameServer":{ "name":"DeregisterGameServer", @@ -469,7 +503,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

Removes the game server from a game server group. As a result of this operation, the deregistered game server can no longer be claimed and will not be returned in a list of active game servers.

To deregister a game server, specify the game server group and game server ID. If successful, this operation emits a CloudWatch event with termination timestamp and reason.

Learn more

GameLift FleetIQ Guide

Related operations

" + "documentation":"

This operation is used with the GameLift FleetIQ solution and game server groups.

Removes the game server from a game server group. As a result of this operation, the deregistered game server can no longer be claimed and will not be returned in a list of active game servers.

To deregister a game server, specify the game server group and game server ID. If successful, this operation emits a CloudWatch event with termination timestamp and reason.

Learn more

GameLift FleetIQ Guide

Related actions

RegisterGameServer | ListGameServers | ClaimGameServer | DescribeGameServer | UpdateGameServer | DeregisterGameServer | All APIs by task

" }, "DescribeAlias":{ "name":"DescribeAlias", @@ -485,7 +519,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Retrieves properties for an alias. This operation returns all alias metadata and settings. To get an alias's target fleet ID only, use ResolveAlias.

To get alias properties, specify the alias ID. If successful, the requested alias record is returned.

" + "documentation":"

Retrieves properties for an alias. This operation returns all alias metadata and settings. To get an alias's target fleet ID only, use ResolveAlias.

To get alias properties, specify the alias ID. If successful, the requested alias record is returned.

Related actions

CreateAlias | ListAliases | DescribeAlias | UpdateAlias | DeleteAlias | ResolveAlias | All APIs by task

" }, "DescribeBuild":{ "name":"DescribeBuild", @@ -501,7 +535,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Retrieves properties for a custom game build. To request a build resource, specify a build ID. If successful, an object containing the build properties is returned.

Learn more

Upload a Custom Server Build

Related operations

" + "documentation":"

Retrieves properties for a custom game build. To request a build resource, specify a build ID. If successful, an object containing the build properties is returned.

Learn more

Upload a Custom Server Build

Related actions

CreateBuild | ListBuilds | DescribeBuild | UpdateBuild | DeleteBuild | All APIs by task

" }, "DescribeEC2InstanceLimits":{ "name":"DescribeEC2InstanceLimits", @@ -516,7 +550,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves the following information for the specified EC2 instance type:

To learn more about the capabilities of each instance type, see Amazon EC2 Instance Types. Note that the instance types offered may vary depending on the region.

Learn more

Setting up GameLift Fleets

Related operations

" + "documentation":"

The GameLift service limits and current utilization for an AWS Region or location. Instance limits control the number of instances, per instance type, per location, that your AWS account can use. Learn more at Amazon EC2 Instance Types. The information returned includes the maximum number of instances allowed and your account's current usage across all fleets. This information can affect your ability to scale your GameLift fleets. You can request a limit increase for your account by using the Service limits page in the GameLift console.

Instance limits differ based on whether the instances are deployed in a fleet's home Region or in a remote location. For remote locations, limits also differ based on the combination of home Region and remote location. All requests must specify an AWS Region (either explicitly or as your default settings). To get the limit for a remote location, you must also specify the location. For example, the following requests all return different results:

This operation can be used in the following ways:

If successful, an EC2InstanceLimits object is returned with limits and usage data for each requested instance type.

Learn more

Setting up GameLift fleets

Related actions

CreateFleet | UpdateFleetCapacity | PutScalingPolicy | DescribeEC2InstanceLimits | DescribeFleetAttributes | DescribeFleetLocationAttributes | UpdateFleetAttributes | StopFleetActions | DeleteFleet | All APIs by task

" }, "DescribeFleetAttributes":{ "name":"DescribeFleetAttributes", @@ -532,7 +566,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves core properties, including configuration, status, and metadata, for a fleet.

To get attributes for one or more fleets, provide a list of fleet IDs or fleet ARNs. To get attributes for all fleets, do not specify a fleet identifier. When requesting attributes for multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetAttributes object is returned for each fleet requested, unless the fleet identifier is not found.

Some API operations may limit the number of fleet IDs allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed number.

Learn more

Setting up GameLift Fleets

Related operations

" + "documentation":"

Retrieves core fleet-wide properties, including the computing hardware and deployment configuration for all instances in the fleet.

This operation can be used in the following ways:

When requesting attributes for multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages.

If successful, a FleetAttributes object is returned for each fleet requested, unless the fleet identifier is not found.

Some API operations limit the number of fleet IDs that allowed in one request. If a request exceeds this limit, the request fails and the error message contains the maximum allowed number.

Learn more

Setting up GameLift fleets

Related actions

ListFleets | DescribeEC2InstanceLimits | DescribeFleetAttributes | DescribeFleetCapacity | DescribeFleetEvents | DescribeFleetLocationAttributes | DescribeFleetPortSettings | DescribeFleetUtilization | DescribeRuntimeConfiguration | DescribeScalingPolicies | All APIs by task

" }, "DescribeFleetCapacity":{ "name":"DescribeFleetCapacity", @@ -548,7 +582,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves the current capacity statistics for one or more fleets. These statistics present a snapshot of the fleet's instances and provide insight on current or imminent scaling activity. To get statistics on game hosting activity in the fleet, see DescribeFleetUtilization.

You can request capacity for all fleets or specify a list of one or more fleet identifiers. When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetCapacity object is returned for each requested fleet ID. When a list of fleet IDs is provided, attribute objects are returned only for fleets that currently exist.

Some API operations may limit the number of fleet IDs allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed.

Learn more

Setting up GameLift Fleets

GameLift Metrics for Fleets

Related operations

" + "documentation":"

Retrieves the resource capacity settings for one or more fleets. The data returned includes the current fleet capacity (number of EC2 instances), and settings that can control how capacity scaling. For fleets with remote locations, this operation retrieves data for the fleet's home Region only. See DescribeFleetLocationCapacity to get capacity settings for a fleet's remote locations.

This operation can be used in the following ways:

When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages.

If successful, a FleetCapacity object is returned for each requested fleet ID. Each FleetCapacity object includes a Location property, which is set to the fleet's home Region. When a list of fleet IDs is provided, attribute objects are returned only for fleets that currently exist.

Some API operations may limit the number of fleet IDs that are allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed.

Learn more

Setting up GameLift fleets

GameLift metrics for fleets

Related actions

ListFleets | DescribeEC2InstanceLimits | DescribeFleetAttributes | DescribeFleetCapacity | DescribeFleetEvents | DescribeFleetLocationAttributes | DescribeFleetPortSettings | DescribeFleetUtilization | DescribeRuntimeConfiguration | DescribeScalingPolicies | All APIs by task

" }, "DescribeFleetEvents":{ "name":"DescribeFleetEvents", @@ -564,7 +598,55 @@ {"shape":"UnauthorizedException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Retrieves entries from the specified fleet's event log. You can specify a time range to limit the result set. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, a collection of event log entries matching the request are returned.

Learn more

Setting up GameLift Fleets

Related operations

" + "documentation":"

Retrieves entries from a fleet's event log. Fleet events are initiated by changes in status, such as during fleet creation and termination, changes in capacity, etc. If a fleet has multiple locations, events are also initiated by changes to status and capacity in remote locations.

You can specify a time range to limit the result set. Use the pagination parameters to retrieve results as a set of sequential pages.

If successful, a collection of event log entries matching the request are returned.

Learn more

Setting up GameLift fleets

Related actions

ListFleets | DescribeEC2InstanceLimits | DescribeFleetAttributes | DescribeFleetCapacity | DescribeFleetEvents | DescribeFleetLocationAttributes | DescribeFleetPortSettings | DescribeFleetUtilization | DescribeRuntimeConfiguration | DescribeScalingPolicies | All APIs by task

" + }, + "DescribeFleetLocationAttributes":{ + "name":"DescribeFleetLocationAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFleetLocationAttributesInput"}, + "output":{"shape":"DescribeFleetLocationAttributesOutput"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

Retrieves information on a fleet's remote locations, including life-cycle status and any suspended fleet activity.

This operation can be used in the following ways:

When requesting attributes for multiple locations, use the pagination parameters to retrieve results as a set of sequential pages.

If successful, a LocationAttributes object is returned for each requested location. If the fleet does not have a requested location, no information is returned. This operation does not return the home Region. To get information on a fleet's home Region, call DescribeFleetAttributes.

Learn more

Setting up GameLift fleets

Related actions

CreateFleetLocations | DescribeFleetLocationAttributes | DescribeFleetLocationCapacity | DescribeFleetLocationUtilization | DescribeFleetAttributes | DescribeFleetCapacity | DescribeFleetUtilization | UpdateFleetCapacity | StopFleetActions | DeleteFleetLocations | All APIs by task

" + }, + "DescribeFleetLocationCapacity":{ + "name":"DescribeFleetLocationCapacity", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFleetLocationCapacityInput"}, + "output":{"shape":"DescribeFleetLocationCapacityOutput"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

Retrieves the resource capacity settings for a fleet location. The data returned includes the current capacity (number of EC2 instances) and some scaling settings for the requested fleet location. Use this operation to retrieve capacity information for a fleet's remote location or home Region (you can also retrieve home Region capacity by calling DescribeFleetCapacity).

To retrieve capacity data, identify a fleet and location.

If successful, a FleetCapacity object is returned for the requested fleet location.

Learn more

Setting up GameLift fleets

GameLift metrics for fleets

Related actions

CreateFleetLocations | DescribeFleetLocationAttributes | DescribeFleetLocationCapacity | DescribeFleetLocationUtilization | DescribeFleetAttributes | DescribeFleetCapacity | DescribeFleetUtilization | UpdateFleetCapacity | StopFleetActions | DeleteFleetLocations | All APIs by task

" + }, + "DescribeFleetLocationUtilization":{ + "name":"DescribeFleetLocationUtilization", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFleetLocationUtilizationInput"}, + "output":{"shape":"DescribeFleetLocationUtilizationOutput"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"UnauthorizedException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

Retrieves current usage data for a fleet location. Utilization data provides a snapshot of current game hosting activity at the requested location. Use this operation to retrieve utilization information for a fleet's remote location or home Region (you can also retrieve home Region utilization by calling DescribeFleetUtilization).

To retrieve utilization data, identify a fleet and location.

If successful, a FleetUtilization object is returned for the requested fleet location.

Learn more

Setting up GameLift fleets

GameLift metrics for fleets

Related actions

CreateFleetLocations | DescribeFleetLocationAttributes | DescribeFleetLocationCapacity | DescribeFleetLocationUtilization | DescribeFleetAttributes | DescribeFleetCapacity | DescribeFleetUtilization | UpdateFleetCapacity | StopFleetActions | DeleteFleetLocations | All APIs by task

" }, "DescribeFleetPortSettings":{ "name":"DescribeFleetPortSettings", @@ -580,7 +662,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves a fleet's inbound connection permissions. Connection permissions specify the range of IP addresses and port settings that incoming traffic can use to access server processes in the fleet. Game sessions that are running on instances in the fleet use connections that fall in this range.

To get a fleet's inbound connection permissions, specify the fleet's unique identifier. If successful, a collection of IpPermission objects is returned for the requested fleet ID. If the requested fleet has been deleted, the result set is empty.

Learn more

Setting up GameLift Fleets

Related operations

" + "documentation":"

Retrieves a fleet's inbound connection permissions. Connection permissions specify the range of IP addresses and port settings that incoming traffic can use to access server processes in the fleet. Game sessions that are running on instances in the fleet must use connections that fall in this range.

This operation can be used in the following ways:

If successful, a set of IpPermission objects is returned for the requested fleet ID. When a location is specified, a pending status is included. If the requested fleet has been deleted, the result set is empty.

Learn more

Setting up GameLift fleets

Related actions

ListFleets | DescribeEC2InstanceLimits | DescribeFleetAttributes | DescribeFleetCapacity | DescribeFleetEvents | DescribeFleetLocationAttributes | DescribeFleetPortSettings | DescribeFleetUtilization | DescribeRuntimeConfiguration | DescribeScalingPolicies | All APIs by task

" }, "DescribeFleetUtilization":{ "name":"DescribeFleetUtilization", @@ -596,7 +678,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves utilization statistics for one or more fleets. These statistics provide insight into how available hosting resources are currently being used. To get statistics on available hosting resources, see DescribeFleetCapacity.

You can request utilization data for all fleets, or specify a list of one or more fleet IDs. When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetUtilization object is returned for each requested fleet ID, unless the fleet identifier is not found.

Some API operations may limit the number of fleet IDs allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed.

Learn more

Setting up GameLift Fleets

GameLift Metrics for Fleets

Related operations

" + "documentation":"

Retrieves utilization statistics for one or more fleets. Utilization data provides a snapshot of how the fleet's hosting resources are currently being used. For fleets with remote locations, this operation retrieves data for the fleet's home Region only. See DescribeFleetLocationUtilization to get utilization statistics for a fleet's remote locations.

This operation can be used in the following ways:

When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages.

If successful, a FleetUtilization object is returned for each requested fleet ID, unless the fleet identifier is not found. Each fleet utilization object includes a Location property, which is set to the fleet's home Region.

Some API operations may limit the number of fleet IDs allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed.

Learn more

Setting up GameLift Fleets

GameLift Metrics for Fleets

Related actions

ListFleets | DescribeEC2InstanceLimits | DescribeFleetAttributes | DescribeFleetCapacity | DescribeFleetEvents | DescribeFleetLocationAttributes | DescribeFleetPortSettings | DescribeFleetUtilization | DescribeRuntimeConfiguration | DescribeScalingPolicies | All APIs by task

" }, "DescribeGameServer":{ "name":"DescribeGameServer", @@ -612,7 +694,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

Retrieves information for a registered game server. Information includes game server status, health check info, and the instance that the game server is running on.

To retrieve game server information, specify the game server ID. If successful, the requested game server object is returned.

Learn more

GameLift FleetIQ Guide

Related operations

" + "documentation":"

This operation is used with the GameLift FleetIQ solution and game server groups.

Retrieves information for a registered game server. Information includes game server status, health check info, and the instance that the game server is running on.

To retrieve game server information, specify the game server ID. If successful, the requested game server object is returned.

Learn more

GameLift FleetIQ Guide

Related actions

RegisterGameServer | ListGameServers | ClaimGameServer | DescribeGameServer | UpdateGameServer | DeregisterGameServer | All APIs by task

" }, "DescribeGameServerGroup":{ "name":"DescribeGameServerGroup", @@ -628,7 +710,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

Retrieves information on a game server group. This operation returns only properties related to GameLift FleetIQ. To view or update properties for the corresponding Auto Scaling group, such as launch template, auto scaling policies, and maximum/minimum group size, access the Auto Scaling group directly.

To get attributes for a game server group, provide a group name or ARN value. If successful, a GameServerGroup object is returned.

Learn more

GameLift FleetIQ Guide

Related operations

" + "documentation":"

This operation is used with the GameLift FleetIQ solution and game server groups.

Retrieves information on a game server group. This operation returns only properties related to GameLift FleetIQ. To view or update properties for the corresponding Auto Scaling group, such as launch template, auto scaling policies, and maximum/minimum group size, access the Auto Scaling group directly.

To get attributes for a game server group, provide a group name or ARN value. If successful, a GameServerGroup object is returned.

Learn more

GameLift FleetIQ Guide

Related actions

CreateGameServerGroup | ListGameServerGroups | DescribeGameServerGroup | UpdateGameServerGroup | DeleteGameServerGroup | ResumeGameServerGroup | SuspendGameServerGroup | DescribeGameServerInstances | All APIs by task

" }, "DescribeGameServerInstances":{ "name":"DescribeGameServerInstances", @@ -644,7 +726,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

Retrieves status information about the Amazon EC2 instances associated with a GameLift FleetIQ game server group. Use this operation to detect when instances are active or not available to host new game servers. If you are looking for instance configuration information, call DescribeGameServerGroup or access the corresponding Auto Scaling group properties.

To request status for all instances in the game server group, provide a game server group ID only. To request status for specific instances, provide the game server group ID and one or more instance IDs. Use the pagination parameters to retrieve results in sequential segments. If successful, a collection of GameServerInstance objects is returned.

This operation is not designed to be called with every game server claim request; this practice can cause you to exceed your API limit, which results in errors. Instead, as a best practice, cache the results and refresh your cache no more than once every 10 seconds.

Learn more

GameLift FleetIQ Guide

Related operations

" + "documentation":"

This operation is used with the GameLift FleetIQ solution and game server groups.

Retrieves status information about the Amazon EC2 instances associated with a GameLift FleetIQ game server group. Use this operation to detect when instances are active or not available to host new game servers. If you are looking for instance configuration information, call DescribeGameServerGroup or access the corresponding Auto Scaling group properties.

To request status for all instances in the game server group, provide a game server group ID only. To request status for specific instances, provide the game server group ID and one or more instance IDs. Use the pagination parameters to retrieve results in sequential segments. If successful, a collection of GameServerInstance objects is returned.

This operation is not designed to be called with every game server claim request; this practice can cause you to exceed your API limit, which results in errors. Instead, as a best practice, cache the results and refresh your cache no more than once every 10 seconds.

Learn more

GameLift FleetIQ Guide

Related actions

CreateGameServerGroup | ListGameServerGroups | DescribeGameServerGroup | UpdateGameServerGroup | DeleteGameServerGroup | ResumeGameServerGroup | SuspendGameServerGroup | DescribeGameServerInstances | All APIs by task

" }, "DescribeGameSessionDetails":{ "name":"DescribeGameSessionDetails", @@ -661,7 +743,7 @@ {"shape":"UnauthorizedException"}, {"shape":"TerminalRoutingStrategyException"} ], - "documentation":"

Retrieves properties, including the protection policy in force, for one or more game sessions. This operation can be used in several ways: (1) provide a GameSessionId or GameSessionArn to request details for a specific game session; (2) provide either a FleetId or an AliasId to request properties for all game sessions running on a fleet.

To get game session record(s), specify just one of the following: game session ID, fleet ID, or alias ID. You can filter this request by game session status. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, a GameSessionDetail object is returned for each session matching the request.

" + "documentation":"

Retrieves additional game session properties, including the game session protection policy in force, a set of one or more game sessions in a specific fleet location. You can optionally filter the results by current game session status. Alternatively, use SearchGameSessions to request a set of active game sessions that are filtered by certain criteria. To retrieve all game session properties, use DescribeGameSessions.

This operation can be used in the following ways:

Use the pagination parameters to retrieve results as a set of sequential pages.

If successful, a GameSessionDetail object is returned for each game session that matches the request.

Learn more

Find a game session

Related actions

CreateGameSession | DescribeGameSessions | DescribeGameSessionDetails | SearchGameSessions | UpdateGameSession | GetGameSessionLogUrl | StartGameSessionPlacement | DescribeGameSessionPlacement | StopGameSessionPlacement | All APIs by task

" }, "DescribeGameSessionPlacement":{ "name":"DescribeGameSessionPlacement", @@ -677,7 +759,7 @@ {"shape":"NotFoundException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves properties and current status of a game session placement request. To get game session placement details, specify the placement ID. If successful, a GameSessionPlacement object is returned.

" + "documentation":"

Retrieves information, including current status, about a game session placement request.

To get game session placement details, specify the placement ID.

If successful, a GameSessionPlacement object is returned.

Related actions

CreateGameSession | DescribeGameSessions | DescribeGameSessionDetails | SearchGameSessions | UpdateGameSession | GetGameSessionLogUrl | StartGameSessionPlacement | DescribeGameSessionPlacement | StopGameSessionPlacement | All APIs by task

" }, "DescribeGameSessionQueues":{ "name":"DescribeGameSessionQueues", @@ -693,7 +775,7 @@ {"shape":"NotFoundException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves the properties for one or more game session queues. When requesting multiple queues, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a GameSessionQueue object is returned for each requested queue. When specifying a list of queues, objects are returned only for queues that currently exist in the Region.

Learn more

View Your Queues

Related operations

" + "documentation":"

Retrieves the properties for one or more game session queues. When requesting multiple queues, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a GameSessionQueue object is returned for each requested queue. When specifying a list of queues, objects are returned only for queues that currently exist in the Region.

Learn more

View Your Queues

Related actions

CreateGameSessionQueue | DescribeGameSessionQueues | UpdateGameSessionQueue | DeleteGameSessionQueue | All APIs by task

" }, "DescribeGameSessions":{ "name":"DescribeGameSessions", @@ -710,7 +792,7 @@ {"shape":"UnauthorizedException"}, {"shape":"TerminalRoutingStrategyException"} ], - "documentation":"

Retrieves a set of one or more game sessions. Request a specific game session or request all game sessions on a fleet. Alternatively, use SearchGameSessions to request a set of active game sessions that are filtered by certain criteria. To retrieve protection policy settings for game sessions, use DescribeGameSessionDetails.

To get game sessions, specify one of the following: game session ID, fleet ID, or alias ID. You can filter this request by game session status. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, a GameSession object is returned for each game session matching the request.

Available in Amazon GameLift Local.

" + "documentation":"

Retrieves a set of one or more game sessions in a specific fleet location. You can optionally filter the results by current game session status. Alternatively, use SearchGameSessions to request a set of active game sessions that are filtered by certain criteria. To retrieve the protection policy for game sessions, use DescribeGameSessionDetails.

This operation can be used in the following ways:

Use the pagination parameters to retrieve results as a set of sequential pages.

If successful, a GameSession object is returned for each game session that matches the request.

Available in GameLift Local.

Learn more

Find a game session

Related actions

CreateGameSession | DescribeGameSessions | DescribeGameSessionDetails | SearchGameSessions | UpdateGameSession | GetGameSessionLogUrl | StartGameSessionPlacement | DescribeGameSessionPlacement | StopGameSessionPlacement | All APIs by task

" }, "DescribeInstances":{ "name":"DescribeInstances", @@ -726,7 +808,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Retrieves information about a fleet's instances, including instance IDs. Use this operation to get details on all instances in the fleet or get details on one specific instance.

To get a specific instance, specify fleet ID and instance ID. To get all instances in a fleet, specify a fleet ID only. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, an Instance object is returned for each result.

Learn more

Remotely Access Fleet Instances

Debug Fleet Issues

Related operations

" + "documentation":"

Retrieves information about a fleet's instances, including instance IDs, connection data, and status.

This operation can be used in the following ways:

Use the pagination parameters to retrieve results as a set of sequential pages.

If successful, an Instance object is returned for each requested instance. Instances are not returned in any particular order.

Learn more

Remotely Access Fleet Instances

Debug Fleet Issues

Related actions

DescribeInstances | GetInstanceAccess | DescribeEC2InstanceLimits | All APIs by task

" }, "DescribeMatchmaking":{ "name":"DescribeMatchmaking", @@ -741,7 +823,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

Retrieves one or more matchmaking tickets. Use this operation to retrieve ticket information, including--after a successful match is made--connection information for the resulting new game session.

To request matchmaking tickets, provide a list of up to 10 ticket IDs. If the request is successful, a ticket object is returned for each requested ID that currently exists.

This operation is not designed to be continually called to track matchmaking ticket status. This practice can cause you to exceed your API limit, which results in errors. Instead, as a best practice, set up an Amazon Simple Notification Service (SNS) to receive notifications, and provide the topic ARN in the matchmaking configuration. Continuously poling ticket status with DescribeMatchmaking should only be used for games in development with low matchmaking usage.

Learn more

Add FlexMatch to a Game Client

Set Up FlexMatch Event Notification

Related operations

" + "documentation":"

Retrieves one or more matchmaking tickets. Use this operation to retrieve ticket information, including--after a successful match is made--connection information for the resulting new game session.

To request matchmaking tickets, provide a list of up to 10 ticket IDs. If the request is successful, a ticket object is returned for each requested ID that currently exists.

This operation is not designed to be continually called to track matchmaking ticket status. This practice can cause you to exceed your API limit, which results in errors. Instead, as a best practice, set up an Amazon Simple Notification Service (SNS) to receive notifications, and provide the topic ARN in the matchmaking configuration. Continuously poling ticket status with DescribeMatchmaking should only be used for games in development with low matchmaking usage.

Learn more

Add FlexMatch to a game client

Set Up FlexMatch event notification

Related actions

StartMatchmaking | DescribeMatchmaking | StopMatchmaking | AcceptMatch | StartMatchBackfill | All APIs by task

" }, "DescribeMatchmakingConfigurations":{ "name":"DescribeMatchmakingConfigurations", @@ -756,7 +838,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

Retrieves the details of FlexMatch matchmaking configurations.

This operation offers the following options: (1) retrieve all matchmaking configurations, (2) retrieve configurations for a specified list, or (3) retrieve all configurations that use a specified rule set name. When requesting multiple items, use the pagination parameters to retrieve results as a set of sequential pages.

If successful, a configuration is returned for each requested name. When specifying a list of names, only configurations that currently exist are returned.

Learn more

Setting Up FlexMatch Matchmakers

Related operations

" + "documentation":"

Retrieves the details of FlexMatch matchmaking configurations.

This operation offers the following options: (1) retrieve all matchmaking configurations, (2) retrieve configurations for a specified list, or (3) retrieve all configurations that use a specified rule set name. When requesting multiple items, use the pagination parameters to retrieve results as a set of sequential pages.

If successful, a configuration is returned for each requested name. When specifying a list of names, only configurations that currently exist are returned.

Learn more

Setting up FlexMatch matchmakers

Related actions

CreateMatchmakingConfiguration | DescribeMatchmakingConfigurations | UpdateMatchmakingConfiguration | DeleteMatchmakingConfiguration | CreateMatchmakingRuleSet | DescribeMatchmakingRuleSets | ValidateMatchmakingRuleSet | DeleteMatchmakingRuleSet | All APIs by task

" }, "DescribeMatchmakingRuleSets":{ "name":"DescribeMatchmakingRuleSets", @@ -772,7 +854,7 @@ {"shape":"NotFoundException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

Retrieves the details for FlexMatch matchmaking rule sets. You can request all existing rule sets for the Region, or provide a list of one or more rule set names. When requesting multiple items, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a rule set is returned for each requested name.

Learn more

Related operations

" + "documentation":"

Retrieves the details for FlexMatch matchmaking rule sets. You can request all existing rule sets for the Region, or provide a list of one or more rule set names. When requesting multiple items, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a rule set is returned for each requested name.

Learn more

Related actions

CreateMatchmakingConfiguration | DescribeMatchmakingConfigurations | UpdateMatchmakingConfiguration | DeleteMatchmakingConfiguration | CreateMatchmakingRuleSet | DescribeMatchmakingRuleSets | ValidateMatchmakingRuleSet | DeleteMatchmakingRuleSet | All APIs by task

" }, "DescribePlayerSessions":{ "name":"DescribePlayerSessions", @@ -788,7 +870,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves properties for one or more player sessions. This operation can be used in several ways: (1) provide a PlayerSessionId to request properties for a specific player session; (2) provide a GameSessionId to request properties for all player sessions in the specified game session; (3) provide a PlayerId to request properties for all player sessions of a specified player.

To get game session record(s), specify only one of the following: a player session ID, a game session ID, or a player ID. You can filter this request by player session status. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, a PlayerSession object is returned for each session matching the request.

Available in Amazon GameLift Local.

" + "documentation":"

Retrieves properties for one or more player sessions.

This action can be used in the following ways:

To request player sessions, specify either a player session ID, game session ID, or player ID. You can filter this request by player session status. Use the pagination parameters to retrieve results as a set of sequential pages.

If successful, a PlayerSession object is returned for each session that matches the request.

Available in Amazon GameLift Local.

Related actions

CreatePlayerSession | CreatePlayerSessions | DescribePlayerSessions | StartGameSessionPlacement | DescribeGameSessionPlacement | All APIs by task

" }, "DescribeRuntimeConfiguration":{ "name":"DescribeRuntimeConfiguration", @@ -804,7 +886,7 @@ {"shape":"InternalServiceException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Retrieves a fleet's runtime configuration settings. The runtime configuration tells Amazon GameLift which server processes to run (and how) on each instance in the fleet.

To get a runtime configuration, specify the fleet's unique identifier. If successful, a RuntimeConfiguration object is returned for the requested fleet. If the requested fleet has been deleted, the result set is empty.

Learn more

Setting up GameLift Fleets

Running Multiple Processes on a Fleet

Related operations

" + "documentation":"

Retrieves a fleet's runtime configuration settings. The runtime configuration tells GameLift which server processes to run (and how) on each instance in the fleet.

To get the runtime configuration that is currently in forces for a fleet, provide the fleet ID.

If successful, a RuntimeConfiguration object is returned for the requested fleet. If the requested fleet has been deleted, the result set is empty.

Learn more

Setting up GameLift fleets

Running multiple processes on a fleet

Related actions

ListFleets | DescribeEC2InstanceLimits | DescribeFleetAttributes | DescribeFleetCapacity | DescribeFleetEvents | DescribeFleetLocationAttributes | DescribeFleetPortSettings | DescribeFleetUtilization | DescribeRuntimeConfiguration | DescribeScalingPolicies | All APIs by task

" }, "DescribeScalingPolicies":{ "name":"DescribeScalingPolicies", @@ -820,7 +902,7 @@ {"shape":"UnauthorizedException"}, {"shape":"NotFoundException"} ], - "documentation":"

Retrieves all scaling policies applied to a fleet.

To get a fleet's scaling policies, specify the fleet ID. You can filter this request by policy status, such as to retrieve only active scaling policies. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, set of ScalingPolicy objects is returned for the fleet.

A fleet may have all of its scaling policies suspended (StopFleetActions). This operation does not affect the status of the scaling policies, which remains ACTIVE. To see whether a fleet's scaling policies are in force or suspended, call DescribeFleetAttributes and check the stopped actions.

" + "documentation":"

Retrieves all scaling policies applied to a fleet.

To get a fleet's scaling policies, specify the fleet ID. You can filter this request by policy status, such as to retrieve only active scaling policies. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, set of ScalingPolicy objects is returned for the fleet.

A fleet may have all of its scaling policies suspended (StopFleetActions). This operation does not affect the status of the scaling policies, which remains ACTIVE. To see whether a fleet's scaling policies are in force or suspended, call DescribeFleetAttributes and check the stopped actions.

Related actions

DescribeFleetCapacity | UpdateFleetCapacity | DescribeEC2InstanceLimits | PutScalingPolicy | DescribeScalingPolicies | DeleteScalingPolicy | StopFleetActions | StartFleetActions | All APIs by task

" }, "DescribeScript":{ "name":"DescribeScript", @@ -836,7 +918,7 @@ {"shape":"InternalServiceException"}, {"shape":"NotFoundException"} ], - "documentation":"

Retrieves properties for a Realtime script.

To request a script record, specify the script ID. If successful, an object containing the script properties is returned.

Learn more

Amazon GameLift Realtime Servers

Related operations

" + "documentation":"

Retrieves properties for a Realtime script.

To request a script record, specify the script ID. If successful, an object containing the script properties is returned.

Learn more

Amazon GameLift Realtime Servers

Related actions

CreateScript | ListScripts | DescribeScript | UpdateScript | DeleteScript | All APIs by task

" }, "DescribeVpcPeeringAuthorizations":{ "name":"DescribeVpcPeeringAuthorizations", @@ -851,7 +933,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Retrieves valid VPC peering authorizations that are pending for the AWS account. This operation returns all VPC peering authorizations and requests for peering. This includes those initiated and received by this account.

" + "documentation":"

Retrieves valid VPC peering authorizations that are pending for the AWS account. This operation returns all VPC peering authorizations and requests for peering. This includes those initiated and received by this account.

Related actions

CreateVpcPeeringAuthorization | DescribeVpcPeeringAuthorizations | DeleteVpcPeeringAuthorization | CreateVpcPeeringConnection | DescribeVpcPeeringConnections | DeleteVpcPeeringConnection | All APIs by task

" }, "DescribeVpcPeeringConnections":{ "name":"DescribeVpcPeeringConnections", @@ -867,7 +949,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Retrieves information on VPC peering connections. Use this operation to get peering information for all fleets or for one specific fleet ID.

To retrieve connection information, call this operation from the AWS account that is used to manage the Amazon GameLift fleets. Specify a fleet ID or leave the parameter empty to retrieve all connection records. If successful, the retrieved information includes both active and pending connections. Active connections identify the IpV4 CIDR block that the VPC uses to connect.

" + "documentation":"

Retrieves information on VPC peering connections. Use this operation to get peering information for all fleets or for one specific fleet ID.

To retrieve connection information, call this operation from the AWS account that is used to manage the Amazon GameLift fleets. Specify a fleet ID or leave the parameter empty to retrieve all connection records. If successful, the retrieved information includes both active and pending connections. Active connections identify the IpV4 CIDR block that the VPC uses to connect.

Related actions

CreateVpcPeeringAuthorization | DescribeVpcPeeringAuthorizations | DeleteVpcPeeringAuthorization | CreateVpcPeeringConnection | DescribeVpcPeeringConnections | DeleteVpcPeeringConnection | All APIs by task

" }, "GetGameSessionLogUrl":{ "name":"GetGameSessionLogUrl", @@ -883,7 +965,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Retrieves the location of stored game session logs for a specified game session. When a game session is terminated, Amazon GameLift automatically stores the logs in Amazon S3 and retains them for 14 days. Use this URL to download the logs.

See the AWS Service Limits page for maximum log file sizes. Log files that exceed this limit are not saved.

" + "documentation":"

Retrieves the location of stored game session logs for a specified game session. When a game session is terminated, GameLift automatically stores the logs in Amazon S3 and retains them for 14 days. Use this URL to download the logs.

See the AWS Service Limits page for maximum log file sizes. Log files that exceed this limit are not saved.

Related actions

CreateGameSession | DescribeGameSessions | DescribeGameSessionDetails | SearchGameSessions | UpdateGameSession | GetGameSessionLogUrl | StartGameSessionPlacement | DescribeGameSessionPlacement | StopGameSessionPlacement | All APIs by task

" }, "GetInstanceAccess":{ "name":"GetInstanceAccess", @@ -899,7 +981,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Requests remote access to a fleet instance. Remote access is useful for debugging, gathering benchmarking data, or observing activity in real time.

To remotely access an instance, you need credentials that match the operating system of the instance. For a Windows instance, Amazon GameLift returns a user name and password as strings for use with a Windows Remote Desktop client. For a Linux instance, Amazon GameLift returns a user name and RSA private key, also as strings, for use with an SSH client. The private key must be saved in the proper format to a .pem file before using. If you're making this request using the AWS CLI, saving the secret can be handled as part of the GetInstanceAccess request, as shown in one of the examples for this operation.

To request access to a specific instance, specify the IDs of both the instance and the fleet it belongs to. You can retrieve a fleet's instance IDs by calling DescribeInstances. If successful, an InstanceAccess object is returned that contains the instance's IP address and a set of credentials.

Learn more

Remotely Access Fleet Instances

Debug Fleet Issues

Related operations

" + "documentation":"

Requests remote access to a fleet instance. Remote access is useful for debugging, gathering benchmarking data, or observing activity in real time.

To remotely access an instance, you need credentials that match the operating system of the instance. For a Windows instance, GameLift returns a user name and password as strings for use with a Windows Remote Desktop client. For a Linux instance, GameLift returns a user name and RSA private key, also as strings, for use with an SSH client. The private key must be saved in the proper format to a .pem file before using. If you're making this request using the AWS CLI, saving the secret can be handled as part of the GetInstanceAccess request, as shown in one of the examples for this operation.

To request access to a specific instance, specify the IDs of both the instance and the fleet it belongs to. You can retrieve a fleet's instance IDs by calling DescribeInstances. If successful, an InstanceAccess object is returned that contains the instance's IP address and a set of credentials.

Learn more

Remotely Access Fleet Instances

Debug Fleet Issues

Related actions

DescribeInstances | GetInstanceAccess | DescribeEC2InstanceLimits | All APIs by task

" }, "ListAliases":{ "name":"ListAliases", @@ -914,7 +996,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Retrieves all aliases for this AWS account. You can filter the result set by alias name and/or routing strategy type. Use the pagination parameters to retrieve results in sequential pages.

Returned aliases are not listed in any particular order.

" + "documentation":"

Retrieves all aliases for this AWS account. You can filter the result set by alias name and/or routing strategy type. Use the pagination parameters to retrieve results in sequential pages.

Returned aliases are not listed in any particular order.

Related actions

CreateAlias | ListAliases | DescribeAlias | UpdateAlias | DeleteAlias | ResolveAlias | All APIs by task

" }, "ListBuilds":{ "name":"ListBuilds", @@ -929,7 +1011,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Retrieves build resources for all builds associated with the AWS account in use. You can limit results to builds that are in a specific status by using the Status parameter. Use the pagination parameters to retrieve results in a set of sequential pages.

Build resources are not listed in any particular order.

Learn more

Upload a Custom Server Build

Related operations

" + "documentation":"

Retrieves build resources for all builds associated with the AWS account in use. You can limit results to builds that are in a specific status by using the Status parameter. Use the pagination parameters to retrieve results in a set of sequential pages.

Build resources are not listed in any particular order.

Learn more

Upload a Custom Server Build

Related actions

CreateBuild | ListBuilds | DescribeBuild | UpdateBuild | DeleteBuild | All APIs by task

" }, "ListFleets":{ "name":"ListFleets", @@ -945,7 +1027,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves a collection of fleet resources for this AWS account. You can filter the result set to find only those fleets that are deployed with a specific build or script. Use the pagination parameters to retrieve results in sequential pages.

Fleet resources are not listed in a particular order.

Learn more

Setting up GameLift Fleets

Related operations

" + "documentation":"

Retrieves a collection of fleet resources in an AWS Region. You can call this operation to get fleets in a previously selected default Region (see https://docs.aws.amazon.com/credref/latest/refdocs/setting-global-region.htmlor specify a Region in your request. You can filter the result set to find only those fleets that are deployed with a specific build or script. For fleets that have multiple locations, this operation retrieves fleets based on their home Region only.

This operation can be used in the following ways:

Use the pagination parameters to retrieve results as a set of sequential pages.

If successful, a list of fleet IDs that match the request parameters is returned. A NextToken value is also returned if there are more result pages to retrieve.

Fleet resources are not listed in a particular order.

Learn more

Setting up GameLift fleets

Related actions

CreateFleet | UpdateFleetCapacity | PutScalingPolicy | DescribeEC2InstanceLimits | DescribeFleetAttributes | DescribeFleetLocationAttributes | UpdateFleetAttributes | StopFleetActions | DeleteFleet | All APIs by task

" }, "ListGameServerGroups":{ "name":"ListGameServerGroups", @@ -960,7 +1042,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

Retrieves information on all game servers groups that exist in the current AWS account for the selected Region. Use the pagination parameters to retrieve results in a set of sequential segments.

Learn more

GameLift FleetIQ Guide

Related operations

" + "documentation":"

This operation is used with the GameLift FleetIQ solution and game server groups.

Retrieves information on all game servers groups that exist in the current AWS account for the selected Region. Use the pagination parameters to retrieve results in a set of sequential segments.

Learn more

GameLift FleetIQ Guide

Related actions

CreateGameServerGroup | ListGameServerGroups | DescribeGameServerGroup | UpdateGameServerGroup | DeleteGameServerGroup | ResumeGameServerGroup | SuspendGameServerGroup | DescribeGameServerInstances | All APIs by task

" }, "ListGameServers":{ "name":"ListGameServers", @@ -975,7 +1057,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

Retrieves information on all game servers that are currently active in a specified game server group. You can opt to sort the list by game server age. Use the pagination parameters to retrieve results in a set of sequential segments.

Learn more

GameLift FleetIQ Guide

Related operations

" + "documentation":"

This operation is used with the GameLift FleetIQ solution and game server groups.

Retrieves information on all game servers that are currently active in a specified game server group. You can opt to sort the list by game server age. Use the pagination parameters to retrieve results in a set of sequential segments.

Learn more

GameLift FleetIQ Guide

Related actions

RegisterGameServer | ListGameServers | ClaimGameServer | DescribeGameServer | UpdateGameServer | DeregisterGameServer | All APIs by task

" }, "ListScripts":{ "name":"ListScripts", @@ -990,7 +1072,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Retrieves script records for all Realtime scripts that are associated with the AWS account in use.

Learn more

Amazon GameLift Realtime Servers

Related operations

" + "documentation":"

Retrieves script records for all Realtime scripts that are associated with the AWS account in use.

Learn more

Amazon GameLift Realtime Servers

Related actions

CreateScript | ListScripts | DescribeScript | UpdateScript | DeleteScript | All APIs by task

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -1006,7 +1088,7 @@ {"shape":"TaggingFailedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Retrieves all tags that are assigned to a GameLift resource. Resource tags are used to organize AWS resources for a range of purposes. This operation handles the permissions necessary to manage tags for the following GameLift resource types:

To list tags for a resource, specify the unique ARN value for the resource.

Learn more

Tagging AWS Resources in the AWS General Reference

AWS Tagging Strategies

Related operations

" + "documentation":"

Retrieves all tags that are assigned to a GameLift resource. Resource tags are used to organize AWS resources for a range of purposes. This operation handles the permissions necessary to manage tags for the following GameLift resource types:

To list tags for a resource, specify the unique ARN value for the resource.

Learn more

Tagging AWS Resources in the AWS General Reference

AWS Tagging Strategies

Related actions

TagResource | UntagResource | ListTagsForResource | All APIs by task

" }, "PutScalingPolicy":{ "name":"PutScalingPolicy", @@ -1022,7 +1104,7 @@ {"shape":"UnauthorizedException"}, {"shape":"NotFoundException"} ], - "documentation":"

Creates or updates a scaling policy for a fleet. Scaling policies are used to automatically scale a fleet's hosting capacity to meet player demand. An active scaling policy instructs Amazon GameLift to track a fleet metric and automatically change the fleet's capacity when a certain threshold is reached. There are two types of scaling policies: target-based and rule-based. Use a target-based policy to quickly and efficiently manage fleet scaling; this option is the most commonly used. Use rule-based policies when you need to exert fine-grained control over auto-scaling.

Fleets can have multiple scaling policies of each type in force at the same time; you can have one target-based policy, one or multiple rule-based scaling policies, or both. We recommend caution, however, because multiple auto-scaling policies can have unintended consequences.

You can temporarily suspend all scaling policies for a fleet by calling StopFleetActions with the fleet action AUTO_SCALING. To resume scaling policies, call StartFleetActions with the same fleet action. To stop just one scaling policy--or to permanently remove it, you must delete the policy with DeleteScalingPolicy.

Learn more about how to work with auto-scaling in Set Up Fleet Automatic Scaling.

Target-based policy

A target-based policy tracks a single metric: PercentAvailableGameSessions. This metric tells us how much of a fleet's hosting capacity is ready to host game sessions but is not currently in use. This is the fleet's buffer; it measures the additional player demand that the fleet could handle at current capacity. With a target-based policy, you set your ideal buffer size and leave it to Amazon GameLift to take whatever action is needed to maintain that target.

For example, you might choose to maintain a 10% buffer for a fleet that has the capacity to host 100 simultaneous game sessions. This policy tells Amazon GameLift to take action whenever the fleet's available capacity falls below or rises above 10 game sessions. Amazon GameLift will start new instances or stop unused instances in order to return to the 10% buffer.

To create or update a target-based policy, specify a fleet ID and name, and set the policy type to \"TargetBased\". Specify the metric to track (PercentAvailableGameSessions) and reference a TargetConfiguration object with your desired buffer value. Exclude all other parameters. On a successful request, the policy name is returned. The scaling policy is automatically in force as soon as it's successfully created. If the fleet's auto-scaling actions are temporarily suspended, the new policy will be in force once the fleet actions are restarted.

Rule-based policy

A rule-based policy tracks specified fleet metric, sets a threshold value, and specifies the type of action to initiate when triggered. With a rule-based policy, you can select from several available fleet metrics. Each policy specifies whether to scale up or scale down (and by how much), so you need one policy for each type of action.

For example, a policy may make the following statement: \"If the percentage of idle instances is greater than 20% for more than 15 minutes, then reduce the fleet capacity by 10%.\"

A policy's rule statement has the following structure:

If [MetricName] is [ComparisonOperator] [Threshold] for [EvaluationPeriods] minutes, then [ScalingAdjustmentType] to/by [ScalingAdjustment].

To implement the example, the rule statement would look like this:

If [PercentIdleInstances] is [GreaterThanThreshold] [20] for [15] minutes, then [PercentChangeInCapacity] to/by [10].

To create or update a scaling policy, specify a unique combination of name and fleet ID, and set the policy type to \"RuleBased\". Specify the parameter values for a policy rule statement. On a successful request, the policy name is returned. Scaling policies are automatically in force as soon as they're successfully created. If the fleet's auto-scaling actions are temporarily suspended, the new policy will be in force once the fleet actions are restarted.

" + "documentation":"

Creates or updates a scaling policy for a fleet. Scaling policies are used to automatically scale a fleet's hosting capacity to meet player demand. An active scaling policy instructs Amazon GameLift to track a fleet metric and automatically change the fleet's capacity when a certain threshold is reached. There are two types of scaling policies: target-based and rule-based. Use a target-based policy to quickly and efficiently manage fleet scaling; this option is the most commonly used. Use rule-based policies when you need to exert fine-grained control over auto-scaling.

Fleets can have multiple scaling policies of each type in force at the same time; you can have one target-based policy, one or multiple rule-based scaling policies, or both. We recommend caution, however, because multiple auto-scaling policies can have unintended consequences.

You can temporarily suspend all scaling policies for a fleet by calling StopFleetActions with the fleet action AUTO_SCALING. To resume scaling policies, call StartFleetActions with the same fleet action. To stop just one scaling policy--or to permanently remove it, you must delete the policy with DeleteScalingPolicy.

Learn more about how to work with auto-scaling in Set Up Fleet Automatic Scaling.

Target-based policy

A target-based policy tracks a single metric: PercentAvailableGameSessions. This metric tells us how much of a fleet's hosting capacity is ready to host game sessions but is not currently in use. This is the fleet's buffer; it measures the additional player demand that the fleet could handle at current capacity. With a target-based policy, you set your ideal buffer size and leave it to Amazon GameLift to take whatever action is needed to maintain that target.

For example, you might choose to maintain a 10% buffer for a fleet that has the capacity to host 100 simultaneous game sessions. This policy tells Amazon GameLift to take action whenever the fleet's available capacity falls below or rises above 10 game sessions. Amazon GameLift will start new instances or stop unused instances in order to return to the 10% buffer.

To create or update a target-based policy, specify a fleet ID and name, and set the policy type to \"TargetBased\". Specify the metric to track (PercentAvailableGameSessions) and reference a TargetConfiguration object with your desired buffer value. Exclude all other parameters. On a successful request, the policy name is returned. The scaling policy is automatically in force as soon as it's successfully created. If the fleet's auto-scaling actions are temporarily suspended, the new policy will be in force once the fleet actions are restarted.

Rule-based policy

A rule-based policy tracks specified fleet metric, sets a threshold value, and specifies the type of action to initiate when triggered. With a rule-based policy, you can select from several available fleet metrics. Each policy specifies whether to scale up or scale down (and by how much), so you need one policy for each type of action.

For example, a policy may make the following statement: \"If the percentage of idle instances is greater than 20% for more than 15 minutes, then reduce the fleet capacity by 10%.\"

A policy's rule statement has the following structure:

If [MetricName] is [ComparisonOperator] [Threshold] for [EvaluationPeriods] minutes, then [ScalingAdjustmentType] to/by [ScalingAdjustment].

To implement the example, the rule statement would look like this:

If [PercentIdleInstances] is [GreaterThanThreshold] [20] for [15] minutes, then [PercentChangeInCapacity] to/by [10].

To create or update a scaling policy, specify a unique combination of name and fleet ID, and set the policy type to \"RuleBased\". Specify the parameter values for a policy rule statement. On a successful request, the policy name is returned. Scaling policies are automatically in force as soon as they're successfully created. If the fleet's auto-scaling actions are temporarily suspended, the new policy will be in force once the fleet actions are restarted.

Related actions

DescribeFleetCapacity | UpdateFleetCapacity | DescribeEC2InstanceLimits | PutScalingPolicy | DescribeScalingPolicies | DeleteScalingPolicy | StopFleetActions | StartFleetActions | All APIs by task

" }, "RegisterGameServer":{ "name":"RegisterGameServer", @@ -1039,7 +1121,7 @@ {"shape":"InternalServiceException"}, {"shape":"LimitExceededException"} ], - "documentation":"

This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

Creates a new game server resource and notifies GameLift FleetIQ that the game server is ready to host gameplay and players. This operation is called by a game server process that is running on an instance in a game server group. Registering game servers enables GameLift FleetIQ to track available game servers and enables game clients and services to claim a game server for a new game session.

To register a game server, identify the game server group and instance where the game server is running, and provide a unique identifier for the game server. You can also include connection and game server data. When a game client or service requests a game server by calling ClaimGameServer, this information is returned in the response.

Once a game server is successfully registered, it is put in status AVAILABLE. A request to register a game server may fail if the instance it is running on is in the process of shutting down as part of instance balancing or scale-down activity.

Learn more

GameLift FleetIQ Guide

Related operations

" + "documentation":"

This operation is used with the GameLift FleetIQ solution and game server groups.

Creates a new game server resource and notifies GameLift FleetIQ that the game server is ready to host gameplay and players. This operation is called by a game server process that is running on an instance in a game server group. Registering game servers enables GameLift FleetIQ to track available game servers and enables game clients and services to claim a game server for a new game session.

To register a game server, identify the game server group and instance where the game server is running, and provide a unique identifier for the game server. You can also include connection and game server data. When a game client or service requests a game server by calling ClaimGameServer, this information is returned in the response.

Once a game server is successfully registered, it is put in status AVAILABLE. A request to register a game server may fail if the instance it is running on is in the process of shutting down as part of instance balancing or scale-down activity.

Learn more

GameLift FleetIQ Guide

Related actions

RegisterGameServer | ListGameServers | ClaimGameServer | DescribeGameServer | UpdateGameServer | DeregisterGameServer | All APIs by task

" }, "RequestUploadCredentials":{ "name":"RequestUploadCredentials", @@ -1055,7 +1137,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Retrieves a fresh set of credentials for use when uploading a new set of game build files to Amazon GameLift's Amazon S3. This is done as part of the build creation process; see CreateBuild.

To request new credentials, specify the build ID as returned with an initial CreateBuild request. If successful, a new set of credentials are returned, along with the S3 storage location associated with the build ID.

Learn more

Create a Build with Files in S3

Related operations

" + "documentation":"

Retrieves a fresh set of credentials for use when uploading a new set of game build files to Amazon GameLift's Amazon S3. This is done as part of the build creation process; see CreateBuild.

To request new credentials, specify the build ID as returned with an initial CreateBuild request. If successful, a new set of credentials are returned, along with the S3 storage location associated with the build ID.

Learn more

Create a Build with Files in S3

Related actions

CreateBuild | ListBuilds | DescribeBuild | UpdateBuild | DeleteBuild | All APIs by task

" }, "ResolveAlias":{ "name":"ResolveAlias", @@ -1072,7 +1154,7 @@ {"shape":"TerminalRoutingStrategyException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Retrieves the fleet ID that an alias is currently pointing to.

" + "documentation":"

Retrieves the fleet ID that an alias is currently pointing to.

Related actions

CreateAlias | ListAliases | DescribeAlias | UpdateAlias | DeleteAlias | ResolveAlias | All APIs by task

" }, "ResumeGameServerGroup":{ "name":"ResumeGameServerGroup", @@ -1088,7 +1170,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

Reinstates activity on a game server group after it has been suspended. A game server group might be suspended by theSuspendGameServerGroup operation, or it might be suspended involuntarily due to a configuration problem. In the second case, you can manually resume activity on the group once the configuration problem has been resolved. Refer to the game server group status and status reason for more information on why group activity is suspended.

To resume activity, specify a game server group ARN and the type of activity to be resumed. If successful, a GameServerGroup object is returned showing that the resumed activity is no longer listed in SuspendedActions.

Learn more

GameLift FleetIQ Guide

Related operations

" + "documentation":"

This operation is used with the GameLift FleetIQ solution and game server groups.

Reinstates activity on a game server group after it has been suspended. A game server group might be suspended by theSuspendGameServerGroup operation, or it might be suspended involuntarily due to a configuration problem. In the second case, you can manually resume activity on the group once the configuration problem has been resolved. Refer to the game server group status and status reason for more information on why group activity is suspended.

To resume activity, specify a game server group ARN and the type of activity to be resumed. If successful, a GameServerGroup object is returned showing that the resumed activity is no longer listed in SuspendedActions.

Learn more

GameLift FleetIQ Guide

Related actions

CreateGameServerGroup | ListGameServerGroups | DescribeGameServerGroup | UpdateGameServerGroup | DeleteGameServerGroup | ResumeGameServerGroup | SuspendGameServerGroup | DescribeGameServerInstances | All APIs by task

" }, "SearchGameSessions":{ "name":"SearchGameSessions", @@ -1105,7 +1187,7 @@ {"shape":"UnauthorizedException"}, {"shape":"TerminalRoutingStrategyException"} ], - "documentation":"

Retrieves all active game sessions that match a set of search criteria and sorts them in a specified order. You can search or sort by the following game session attributes:

Returned values for playerSessionCount and hasAvailablePlayerSessions change quickly as players join sessions and others drop out. Results should be considered a snapshot in time. Be sure to refresh search results often, and handle sessions that fill up before a player can join.

To search or sort, specify either a fleet ID or an alias ID, and provide a search filter expression, a sort expression, or both. If successful, a collection of GameSession objects matching the request is returned. Use the pagination parameters to retrieve results as a set of sequential pages.

You can search for game sessions one fleet at a time only. To find game sessions across multiple fleets, you must search each fleet separately and combine the results. This search feature finds only game sessions that are in ACTIVE status. To locate games in statuses other than active, use DescribeGameSessionDetails.

" + "documentation":"

Retrieves all active game sessions that match a set of search criteria and sorts them into a specified order.

When searching for game sessions, you specify exactly where you want to search and provide a search filter expression, a sort expression, or both. A search request can search only one fleet, but it can search all of a fleet's locations.

This operation can be used in the following ways:

Use the pagination parameters to retrieve results as a set of sequential pages.

If successful, a GameSession object is returned for each game session that matches the request. Search finds game sessions that are in ACTIVE status only. To retrieve information on game sessions in other statuses, use DescribeGameSessions.

You can search or sort by the following game session attributes:

Returned values for playerSessionCount and hasAvailablePlayerSessions change quickly as players join sessions and others drop out. Results should be considered a snapshot in time. Be sure to refresh search results often, and handle sessions that fill up before a player can join.

Related actions

CreateGameSession | DescribeGameSessions | DescribeGameSessionDetails | SearchGameSessions | UpdateGameSession | GetGameSessionLogUrl | StartGameSessionPlacement | DescribeGameSessionPlacement | StopGameSessionPlacement | All APIs by task

" }, "StartFleetActions":{ "name":"StartFleetActions", @@ -1121,7 +1203,7 @@ {"shape":"UnauthorizedException"}, {"shape":"NotFoundException"} ], - "documentation":"

Resumes activity on a fleet that was suspended with StopFleetActions. Currently, this operation is used to restart a fleet's auto-scaling activity.

To start fleet actions, specify the fleet ID and the type of actions to restart. When auto-scaling fleet actions are restarted, Amazon GameLift once again initiates scaling events as triggered by the fleet's scaling policies. If actions on the fleet were never stopped, this operation will have no effect. You can view a fleet's stopped actions using DescribeFleetAttributes.

Learn more

Setting up GameLift Fleets

Related operations

" + "documentation":"

Resumes certain types of activity on fleet instances that were suspended with StopFleetActions. For multi-location fleets, fleet actions are managed separately for each location. Currently, this operation is used to restart a fleet's auto-scaling activity.

This operation can be used in the following ways:

If successful, GameLift once again initiates scaling events as triggered by the fleet's scaling policies. If actions on the fleet location were never stopped, this operation will have no effect. You can view a fleet's stopped actions using DescribeFleetAttributes or DescribeFleetLocationAttributes.

Learn more

Setting up GameLift fleets

Related actions

CreateFleet | UpdateFleetCapacity | PutScalingPolicy | DescribeEC2InstanceLimits | DescribeFleetAttributes | DescribeFleetLocationAttributes | UpdateFleetAttributes | StopFleetActions | DeleteFleet | All APIs by task

" }, "StartGameSessionPlacement":{ "name":"StartGameSessionPlacement", @@ -1137,7 +1219,7 @@ {"shape":"NotFoundException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Places a request for a new game session in a queue (see CreateGameSessionQueue). When processing a placement request, Amazon GameLift searches for available resources on the queue's destinations, scanning each until it finds resources or the placement request times out.

A game session placement request can also request player sessions. When a new game session is successfully created, Amazon GameLift creates a player session for each player included in the request.

When placing a game session, by default Amazon GameLift tries each fleet in the order they are listed in the queue configuration. Ideally, a queue's destinations are listed in preference order.

Alternatively, when requesting a game session with players, you can also provide latency data for each player in relevant Regions. Latency data indicates the performance lag a player experiences when connected to a fleet in the Region. Amazon GameLift uses latency data to reorder the list of destinations to place the game session in a Region with minimal lag. If latency data is provided for multiple players, Amazon GameLift calculates each Region's average lag for all players and reorders to get the best game play across all players.

To place a new game session request, specify the following:

If successful, a new game session placement is created.

To track the status of a placement request, call DescribeGameSessionPlacement and check the request's status. If the status is FULFILLED, a new game session has been created and a game session ARN and Region are referenced. If the placement request times out, you can resubmit the request or retry it with a different queue.

" + "documentation":"

Places a request for a new game session in a queue (see CreateGameSessionQueue). When processing a placement request, Amazon GameLift searches for available resources on the queue's destinations, scanning each until it finds resources or the placement request times out.

A game session placement request can also request player sessions. When a new game session is successfully created, Amazon GameLift creates a player session for each player included in the request.

When placing a game session, by default Amazon GameLift tries each fleet in the order they are listed in the queue configuration. Ideally, a queue's destinations are listed in preference order.

Alternatively, when requesting a game session with players, you can also provide latency data for each player in relevant Regions. Latency data indicates the performance lag a player experiences when connected to a fleet in the Region. Amazon GameLift uses latency data to reorder the list of destinations to place the game session in a Region with minimal lag. If latency data is provided for multiple players, Amazon GameLift calculates each Region's average lag for all players and reorders to get the best game play across all players.

To place a new game session request, specify the following:

If successful, a new game session placement is created.

To track the status of a placement request, call DescribeGameSessionPlacement and check the request's status. If the status is FULFILLED, a new game session has been created and a game session ARN and Region are referenced. If the placement request times out, you can resubmit the request or retry it with a different queue.

Related actions

CreateGameSession | DescribeGameSessions | DescribeGameSessionDetails | SearchGameSessions | UpdateGameSession | GetGameSessionLogUrl | StartGameSessionPlacement | DescribeGameSessionPlacement | StopGameSessionPlacement | All APIs by task

" }, "StartMatchBackfill":{ "name":"StartMatchBackfill", @@ -1153,7 +1235,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

Finds new players to fill open slots in an existing game session. This operation can be used to add players to matched games that start with fewer than the maximum number of players or to replace players when they drop out. By backfilling with the same matchmaker used to create the original match, you ensure that new players meet the match criteria and maintain a consistent experience throughout the game session. You can backfill a match anytime after a game session has been created.

To request a match backfill, specify a unique ticket ID, the existing game session's ARN, a matchmaking configuration, and a set of data that describes all current players in the game session. If successful, a match backfill ticket is created and returned with status set to QUEUED. The ticket is placed in the matchmaker's ticket pool and processed. Track the status of the ticket to respond as needed.

The process of finding backfill matches is essentially identical to the initial matchmaking process. The matchmaker searches the pool and groups tickets together to form potential matches, allowing only one backfill ticket per potential match. Once the a match is formed, the matchmaker creates player sessions for the new players. All tickets in the match are updated with the game session's connection information, and the GameSession object is updated to include matchmaker data on the new players. For more detail on how match backfill requests are processed, see How Amazon GameLift FlexMatch Works.

Learn more

Backfill Existing Games with FlexMatch

How GameLift FlexMatch Works

Related operations

" + "documentation":"

Finds new players to fill open slots in currently running game sessions. The backfill match process is essentially identical to the process of forming new matches. Backfill requests use the same matchmaker that was used to make the original match, and they provide matchmaking data for all players currently in the game session. FlexMatch uses this information to select new players so that backfilled match continues to meet the original match requirements.

When using FlexMatch with GameLift managed hosting, you can request a backfill match from a client service by calling this operation with a GameSession identifier. You also have the option of making backfill requests directly from your game server. In response to a request, FlexMatch creates player sessions for the new players, updates the GameSession resource, and sends updated matchmaking data to the game server. You can request a backfill match at any point after a game session is started. Each game session can have only one active backfill request at a time; a subsequent request automatically replaces the earlier request.

When using FlexMatch as a standalone component, request a backfill match by calling this operation without a game session identifier. As with newly formed matches, matchmaking results are returned in a matchmaking event so that your game can update the game session that is being backfilled.

To request a backfill match, specify a unique ticket ID, the original matchmaking configuration, and matchmaking data for all current players in the game session being backfilled. Optionally, specify the GameSession ARN. If successful, a match backfill ticket is created and returned with status set to QUEUED. Track the status of backfill tickets using the same method for tracking tickets for new matches.

Learn more

Backfill existing games with FlexMatch

Matchmaking events (reference)

How GameLift FlexMatch works

Related actions

StartMatchmaking | DescribeMatchmaking | StopMatchmaking | AcceptMatch | StartMatchBackfill | All APIs by task

" }, "StartMatchmaking":{ "name":"StartMatchmaking", @@ -1169,7 +1251,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

Uses FlexMatch to create a game match for a group of players based on custom matchmaking rules. If you're also using GameLift hosting, a new game session is started for the matched players. Each matchmaking request identifies one or more players to find a match for, and specifies the type of match to build, including the team configuration and the rules for an acceptable match. When a matchmaking request identifies a group of players who want to play together, FlexMatch finds additional players to fill the match. Match type, rules, and other features are defined in a MatchmakingConfiguration.

To start matchmaking, provide a unique ticket ID, specify a matchmaking configuration, and include the players to be matched. For each player, you must also include the player attribute values that are required by the matchmaking configuration (in the rule set). If successful, a matchmaking ticket is returned with status set to QUEUED.

Track the status of the ticket to respond as needed. If you're also using GameLift hosting, a successfully completed ticket contains game session connection information. Ticket status updates are tracked using event notification through Amazon Simple Notification Service (SNS), which is defined in the matchmaking configuration.

Learn more

Add FlexMatch to a Game Client

Set Up FlexMatch Event Notification

FlexMatch Integration Roadmap

How GameLift FlexMatch Works

Related operations

" + "documentation":"

Uses FlexMatch to create a game match for a group of players based on custom matchmaking rules. With games that use GameLift managed hosting, this operation also triggers GameLift to find hosting resources and start a new game session for the new match. Each matchmaking request includes information on one or more players and specifies the FlexMatch matchmaker to use. When a request is for multiple players, FlexMatch attempts to build a match that includes all players in the request, placing them in the same team and finding additional players as needed to fill the match.

To start matchmaking, provide a unique ticket ID, specify a matchmaking configuration, and include the players to be matched. You must also include any player attributes that are required by the matchmaking configuration's rule set. If successful, a matchmaking ticket is returned with status set to QUEUED.

Track matchmaking events to respond as needed and acquire game session connection information for successfully completed matches. Ticket status updates are tracked using event notification through Amazon Simple Notification Service (SNS), which is defined in the matchmaking configuration.

Learn more

Add FlexMatch to a game client

Set Up FlexMatch event notification

How GameLift FlexMatch works

Related actions

StartMatchmaking | DescribeMatchmaking | StopMatchmaking | AcceptMatch | StartMatchBackfill | All APIs by task

" }, "StopFleetActions":{ "name":"StopFleetActions", @@ -1185,7 +1267,7 @@ {"shape":"UnauthorizedException"}, {"shape":"NotFoundException"} ], - "documentation":"

Suspends activity on a fleet. Currently, this operation is used to stop a fleet's auto-scaling activity. It is used to temporarily stop triggering scaling events. The policies can be retained and auto-scaling activity can be restarted using StartFleetActions. You can view a fleet's stopped actions using DescribeFleetAttributes.

To stop fleet actions, specify the fleet ID and the type of actions to suspend. When auto-scaling fleet actions are stopped, Amazon GameLift no longer initiates scaling events except in response to manual changes using UpdateFleetCapacity.

Learn more

Setting up GameLift Fleets

Related operations

" + "documentation":"

Suspends certain types of activity in a fleet location. Currently, this operation is used to stop auto-scaling activity. For multi-location fleets, fleet actions are managed separately for each location.

Stopping fleet actions has several potential purposes. It allows you to temporarily stop auto-scaling activity but retain your scaling policies for use in the future. For multi-location fleets, you can set up fleet-wide auto-scaling, and then opt out of it for certain locations.

This operation can be used in the following ways:

If successful, GameLift no longer initiates scaling events except in response to manual changes using UpdateFleetCapacity. You can view a fleet's stopped actions using DescribeFleetAttributes or DescribeFleetLocationAttributes. Suspended activity can be restarted using StartFleetActions.

Learn more

Setting up GameLift Fleets

Related actions

CreateFleet | UpdateFleetCapacity | PutScalingPolicy | DescribeEC2InstanceLimits | DescribeFleetAttributes | DescribeFleetLocationAttributes | UpdateFleetAttributes | StopFleetActions | DeleteFleet | All APIs by task

" }, "StopGameSessionPlacement":{ "name":"StopGameSessionPlacement", @@ -1201,7 +1283,7 @@ {"shape":"NotFoundException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Cancels a game session placement that is in PENDING status. To stop a placement, provide the placement ID values. If successful, the placement is moved to CANCELLED status.

" + "documentation":"

Cancels a game session placement that is in PENDING status. To stop a placement, provide the placement ID values. If successful, the placement is moved to CANCELLED status.

Related actions

CreateGameSession | DescribeGameSessions | DescribeGameSessionDetails | SearchGameSessions | UpdateGameSession | GetGameSessionLogUrl | StartGameSessionPlacement | DescribeGameSessionPlacement | StopGameSessionPlacement | All APIs by task

" }, "StopMatchmaking":{ "name":"StopMatchmaking", @@ -1217,7 +1299,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

Cancels a matchmaking ticket or match backfill ticket that is currently being processed. To stop the matchmaking operation, specify the ticket ID. If successful, work on the ticket is stopped, and the ticket status is changed to CANCELLED.

This call is also used to turn off automatic backfill for an individual game session. This is for game sessions that are created with a matchmaking configuration that has automatic backfill enabled. The ticket ID is included in the MatchmakerData of an updated game session object, which is provided to the game server.

If the operation is successful, the service sends back an empty JSON struct with the HTTP 200 response (not an empty HTTP body).

Learn more

Add FlexMatch to a Game Client

Related operations

" + "documentation":"

Cancels a matchmaking ticket or match backfill ticket that is currently being processed. To stop the matchmaking operation, specify the ticket ID. If successful, work on the ticket is stopped, and the ticket status is changed to CANCELLED.

This call is also used to turn off automatic backfill for an individual game session. This is for game sessions that are created with a matchmaking configuration that has automatic backfill enabled. The ticket ID is included in the MatchmakerData of an updated game session object, which is provided to the game server.

If the operation is successful, the service sends back an empty JSON struct with the HTTP 200 response (not an empty HTTP body).

Learn more

Add FlexMatch to a game client

Related actions

StartMatchmaking | DescribeMatchmaking | StopMatchmaking | AcceptMatch | StartMatchBackfill | All APIs by task

" }, "SuspendGameServerGroup":{ "name":"SuspendGameServerGroup", @@ -1233,7 +1315,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

Temporarily stops activity on a game server group without terminating instances or the game server group. You can restart activity by calling ResumeGameServerGroup. You can suspend the following activity:

To suspend activity, specify a game server group ARN and the type of activity to be suspended. If successful, a GameServerGroup object is returned showing that the activity is listed in SuspendedActions.

Learn more

GameLift FleetIQ Guide

Related operations

" + "documentation":"

This operation is used with the GameLift FleetIQ solution and game server groups.

Temporarily stops activity on a game server group without terminating instances or the game server group. You can restart activity by calling ResumeGameServerGroup. You can suspend the following activity:

To suspend activity, specify a game server group ARN and the type of activity to be suspended. If successful, a GameServerGroup object is returned showing that the activity is listed in SuspendedActions.

Learn more

GameLift FleetIQ Guide

Related actions

CreateGameServerGroup | ListGameServerGroups | DescribeGameServerGroup | UpdateGameServerGroup | DeleteGameServerGroup | ResumeGameServerGroup | SuspendGameServerGroup | DescribeGameServerInstances | All APIs by task

" }, "TagResource":{ "name":"TagResource", @@ -1249,7 +1331,7 @@ {"shape":"TaggingFailedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Assigns a tag to a GameLift resource. AWS resource tags provide an additional management tool set. You can use tags to organize resources, create IAM permissions policies to manage access to groups of resources, customize AWS cost breakdowns, etc. This operation handles the permissions necessary to manage tags for the following GameLift resource types:

To add a tag to a resource, specify the unique ARN value for the resource and provide a tag list containing one or more tags. The operation succeeds even if the list includes tags that are already assigned to the specified resource.

Learn more

Tagging AWS Resources in the AWS General Reference

AWS Tagging Strategies

Related operations

" + "documentation":"

Assigns a tag to a GameLift resource. AWS resource tags provide an additional management tool set. You can use tags to organize resources, create IAM permissions policies to manage access to groups of resources, customize AWS cost breakdowns, etc. This operation handles the permissions necessary to manage tags for the following GameLift resource types:

To add a tag to a resource, specify the unique ARN value for the resource and provide a tag list containing one or more tags. The operation succeeds even if the list includes tags that are already assigned to the specified resource.

Learn more

Tagging AWS Resources in the AWS General Reference

AWS Tagging Strategies

Related actions

TagResource | UntagResource | ListTagsForResource | All APIs by task

" }, "UntagResource":{ "name":"UntagResource", @@ -1265,7 +1347,7 @@ {"shape":"TaggingFailedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Removes a tag that is assigned to a GameLift resource. Resource tags are used to organize AWS resources for a range of purposes. This operation handles the permissions necessary to manage tags for the following GameLift resource types:

To remove a tag from a resource, specify the unique ARN value for the resource and provide a string list containing one or more tags to be removed. This operation succeeds even if the list includes tags that are not currently assigned to the specified resource.

Learn more

Tagging AWS Resources in the AWS General Reference

AWS Tagging Strategies

Related operations

" + "documentation":"

Removes a tag that is assigned to a GameLift resource. Resource tags are used to organize AWS resources for a range of purposes. This operation handles the permissions necessary to manage tags for the following GameLift resource types:

To remove a tag from a resource, specify the unique ARN value for the resource and provide a string list containing one or more tags to be removed. This operation succeeds even if the list includes tags that are not currently assigned to the specified resource.

Learn more

Tagging AWS Resources in the AWS General Reference

AWS Tagging Strategies

Related actions

TagResource | UntagResource | ListTagsForResource | All APIs by task

" }, "UpdateAlias":{ "name":"UpdateAlias", @@ -1281,7 +1363,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Updates properties for an alias. To update properties, specify the alias ID to be updated and provide the information to be changed. To reassign an alias to another fleet, provide an updated routing strategy. If successful, the updated alias record is returned.

" + "documentation":"

Updates properties for an alias. To update properties, specify the alias ID to be updated and provide the information to be changed. To reassign an alias to another fleet, provide an updated routing strategy. If successful, the updated alias record is returned.

Related actions

CreateAlias | ListAliases | DescribeAlias | UpdateAlias | DeleteAlias | ResolveAlias | All APIs by task

" }, "UpdateBuild":{ "name":"UpdateBuild", @@ -1297,7 +1379,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Updates metadata in a build resource, including the build name and version. To update the metadata, specify the build ID to update and provide the new values. If successful, a build object containing the updated metadata is returned.

Learn more

Upload a Custom Server Build

Related operations

" + "documentation":"

Updates metadata in a build resource, including the build name and version. To update the metadata, specify the build ID to update and provide the new values. If successful, a build object containing the updated metadata is returned.

Learn more

Upload a Custom Server Build

Related actions

CreateBuild | ListBuilds | DescribeBuild | UpdateBuild | DeleteBuild | All APIs by task

" }, "UpdateFleetAttributes":{ "name":"UpdateFleetAttributes", @@ -1316,7 +1398,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Updates fleet properties, including name and description, for a fleet. To update metadata, specify the fleet ID and the property values that you want to change. If successful, the fleet ID for the updated fleet is returned.

Learn more

Setting up GameLift Fleets

Related operations

" + "documentation":"

Updates a fleet's mutable attributes, including game session protection and resource creation limits.

To update fleet attributes, specify the fleet ID and the property values that you want to change.

If successful, an updated FleetAttributes object is returned.

Learn more

Setting up GameLift fleets

Related actions

CreateFleetLocations | UpdateFleetAttributes | UpdateFleetCapacity | UpdateFleetPortSettings | UpdateRuntimeConfiguration | StopFleetActions | StartFleetActions | PutScalingPolicy | DeleteFleet | DeleteFleetLocations | DeleteScalingPolicy | All APIs by task

" }, "UpdateFleetCapacity":{ "name":"UpdateFleetCapacity", @@ -1335,7 +1417,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Updates capacity settings for a fleet. Use this operation to specify the number of EC2 instances (hosts) that you want this fleet to contain. Before calling this operation, you may want to call DescribeEC2InstanceLimits to get the maximum capacity based on the fleet's EC2 instance type.

Specify minimum and maximum number of instances. Amazon GameLift will not change fleet capacity to values fall outside of this range. This is particularly important when using auto-scaling (see PutScalingPolicy) to allow capacity to adjust based on player demand while imposing limits on automatic adjustments.

To update fleet capacity, specify the fleet ID and the number of instances you want the fleet to host. If successful, Amazon GameLift starts or terminates instances so that the fleet's active instance count matches the desired instance count. You can view a fleet's current capacity information by calling DescribeFleetCapacity. If the desired instance count is higher than the instance type's limit, the \"Limit Exceeded\" exception occurs.

Learn more

Setting up GameLift Fleets

Related operations

" + "documentation":"

Updates capacity settings for a fleet. For fleets with multiple locations, use this operation to manage capacity settings in each location individually. Fleet capacity determines the number of game sessions and players that can be hosted based on the fleet configuration. Use this operation to set the following fleet capacity properties:

This operation can be used in the following ways:

If successful, capacity settings are updated immediately. In response a change in desired capacity, GameLift initiates steps to start new instances or terminate existing instances in the requested fleet location. This continues until the location's active instance count matches the new desired instance count. You can track a fleet's current capacity by calling DescribeFleetCapacity or DescribeFleetLocationCapacity. If the requested desired instance count is higher than the instance type's limit, the LimitExceeded exception occurs.

Learn more

Scaling fleet capacity

Related actions

CreateFleetLocations | UpdateFleetAttributes | UpdateFleetCapacity | UpdateFleetPortSettings | UpdateRuntimeConfiguration | StopFleetActions | StartFleetActions | PutScalingPolicy | DeleteFleet | DeleteFleetLocations | DeleteScalingPolicy | All APIs by task

" }, "UpdateFleetPortSettings":{ "name":"UpdateFleetPortSettings", @@ -1354,7 +1436,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Updates port settings for a fleet. To update settings, specify the fleet ID to be updated and list the permissions you want to update. List the permissions you want to add in InboundPermissionAuthorizations, and permissions you want to remove in InboundPermissionRevocations. Permissions to be removed must match existing fleet permissions. If successful, the fleet ID for the updated fleet is returned.

Learn more

Setting up GameLift Fleets

Related operations

" + "documentation":"

Updates permissions that allow inbound traffic to connect to game sessions that are being hosted on instances in the fleet.

To update settings, specify the fleet ID to be updated and specify the changes to be made. List the permissions you want to add in InboundPermissionAuthorizations, and permissions you want to remove in InboundPermissionRevocations. Permissions to be removed must match existing fleet permissions.

If successful, the fleet ID for the updated fleet is returned. For fleets with remote locations, port setting updates can take time to propagate across all locations. You can check the status of updates in each location by calling DescribeFleetPortSettings with a location name.

Learn more

Setting up GameLift fleets

Related actions

CreateFleetLocations | UpdateFleetAttributes | UpdateFleetCapacity | UpdateFleetPortSettings | UpdateRuntimeConfiguration | StopFleetActions | StartFleetActions | PutScalingPolicy | DeleteFleet | DeleteFleetLocations | DeleteScalingPolicy | All APIs by task

" }, "UpdateGameServer":{ "name":"UpdateGameServer", @@ -1370,7 +1452,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

Updates information about a registered game server to help GameLift FleetIQ to track game server availability. This operation is called by a game server process that is running on an instance in a game server group.

Use this operation to update the following types of game server information. You can make all three types of updates in the same request:

Once a game server is successfully updated, the relevant statuses and timestamps are updated.

Learn more

GameLift FleetIQ Guide

Related operations

" + "documentation":"

This operation is used with the GameLift FleetIQ solution and game server groups.

Updates information about a registered game server to help GameLift FleetIQ to track game server availability. This operation is called by a game server process that is running on an instance in a game server group.

Use this operation to update the following types of game server information. You can make all three types of updates in the same request:

Once a game server is successfully updated, the relevant statuses and timestamps are updated.

Learn more

GameLift FleetIQ Guide

Related actions

RegisterGameServer | ListGameServers | ClaimGameServer | DescribeGameServer | UpdateGameServer | DeregisterGameServer | All APIs by task

" }, "UpdateGameServerGroup":{ "name":"UpdateGameServerGroup", @@ -1386,7 +1468,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

Updates GameLift FleetIQ-specific properties for a game server group. Many Auto Scaling group properties are updated on the Auto Scaling group directly, including the launch template, Auto Scaling policies, and maximum/minimum/desired instance counts.

To update the game server group, specify the game server group ID and provide the updated values. Before applying the updates, the new values are validated to ensure that GameLift FleetIQ can continue to perform instance balancing activity. If successful, a GameServerGroup object is returned.

Learn more

GameLift FleetIQ Guide

Related operations

" + "documentation":"

This operation is used with the GameLift FleetIQ solution and game server groups.

Updates GameLift FleetIQ-specific properties for a game server group. Many Auto Scaling group properties are updated on the Auto Scaling group directly, including the launch template, Auto Scaling policies, and maximum/minimum/desired instance counts.

To update the game server group, specify the game server group ID and provide the updated values. Before applying the updates, the new values are validated to ensure that GameLift FleetIQ can continue to perform instance balancing activity. If successful, a GameServerGroup object is returned.

Learn more

GameLift FleetIQ Guide

Related actions

CreateGameServerGroup | ListGameServerGroups | DescribeGameServerGroup | UpdateGameServerGroup | DeleteGameServerGroup | ResumeGameServerGroup | SuspendGameServerGroup | DescribeGameServerInstances | All APIs by task

" }, "UpdateGameSession":{ "name":"UpdateGameSession", @@ -1404,7 +1486,7 @@ {"shape":"InvalidGameSessionStatusException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Updates game session properties. This includes the session name, maximum player count, protection policy, which controls whether or not an active game session can be terminated during a scale-down event, and the player session creation policy, which controls whether or not new players can join the session. To update a game session, specify the game session ID and the values you want to change. If successful, an updated GameSession object is returned.

" + "documentation":"

Updates the mutable properties of a game session.

To update a game session, specify the game session ID and the values you want to change.

If successful, the updated GameSession object is returned.

Related actions

CreateGameSession | DescribeGameSessions | DescribeGameSessionDetails | SearchGameSessions | UpdateGameSession | GetGameSessionLogUrl | StartGameSessionPlacement | DescribeGameSessionPlacement | StopGameSessionPlacement | All APIs by task

" }, "UpdateGameSessionQueue":{ "name":"UpdateGameSessionQueue", @@ -1420,7 +1502,7 @@ {"shape":"NotFoundException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Updates settings for a game session queue, which determines how new game session requests in the queue are processed. To update settings, specify the queue name to be updated and provide the new settings. When updating destinations, provide a complete list of destinations.

Learn more

Using Multi-Region Queues

Related operations

" + "documentation":"

Updates the configuration of a game session queue, which determines how the queue processes new game session requests. To update settings, specify the queue name to be updated and provide the new settings. When updating destinations, provide a complete list of destinations.

Learn more

Using Multi-Region Queues

Related actions

CreateGameSessionQueue | DescribeGameSessionQueues | UpdateGameSessionQueue | DeleteGameSessionQueue | All APIs by task

" }, "UpdateMatchmakingConfiguration":{ "name":"UpdateMatchmakingConfiguration", @@ -1436,7 +1518,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

Updates settings for a FlexMatch matchmaking configuration. These changes affect all matches and game sessions that are created after the update. To update settings, specify the configuration name to be updated and provide the new settings.

Learn more

Design a FlexMatch Matchmaker

Related operations

" + "documentation":"

Updates settings for a FlexMatch matchmaking configuration. These changes affect all matches and game sessions that are created after the update. To update settings, specify the configuration name to be updated and provide the new settings.

Learn more

Design a FlexMatch matchmaker

Related actions

CreateMatchmakingConfiguration | DescribeMatchmakingConfigurations | UpdateMatchmakingConfiguration | DeleteMatchmakingConfiguration | CreateMatchmakingRuleSet | DescribeMatchmakingRuleSets | ValidateMatchmakingRuleSet | DeleteMatchmakingRuleSet | All APIs by task

" }, "UpdateRuntimeConfiguration":{ "name":"UpdateRuntimeConfiguration", @@ -1453,7 +1535,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InvalidFleetStatusException"} ], - "documentation":"

Updates the current runtime configuration for the specified fleet, which tells Amazon GameLift how to launch server processes on instances in the fleet. You can update a fleet's runtime configuration at any time after the fleet is created; it does not need to be in an ACTIVE status.

To update runtime configuration, specify the fleet ID and provide a RuntimeConfiguration object with an updated set of server process configurations.

Each instance in a Amazon GameLift fleet checks regularly for an updated runtime configuration and changes how it launches server processes to comply with the latest version. Existing server processes are not affected by the update; runtime configuration changes are applied gradually as existing processes shut down and new processes are launched during Amazon GameLift's normal process recycling activity.

Learn more

Setting up GameLift Fleets

Related operations

" + "documentation":"

Updates the current runtime configuration for the specified fleet, which tells GameLift how to launch server processes on all instances in the fleet. You can update a fleet's runtime configuration at any time after the fleet is created; it does not need to be in ACTIVE status.

To update runtime configuration, specify the fleet ID and provide a RuntimeConfiguration with an updated set of server process configurations.

If successful, the fleet's runtime configuration settings are updated. Each instance in the fleet regularly checks for and retrieves updated runtime configurations. Instances immediately begin complying with the new configuration by launching new server processes or not replacing existing processes when they shut down. Updating a fleet's runtime configuration never affects existing server processes.

Learn more

Setting up GameLift fleets

Related actions

CreateFleetLocations | UpdateFleetAttributes | UpdateFleetCapacity | UpdateFleetPortSettings | UpdateRuntimeConfiguration | StopFleetActions | StartFleetActions | PutScalingPolicy | DeleteFleet | DeleteFleetLocations | DeleteScalingPolicy | All APIs by task

" }, "UpdateScript":{ "name":"UpdateScript", @@ -1469,7 +1551,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Updates Realtime script metadata and content.

To update script metadata, specify the script ID and provide updated name and/or version values.

To update script content, provide an updated zip file by pointing to either a local file or an Amazon S3 bucket location. You can use either method regardless of how the original script was uploaded. Use the Version parameter to track updates to the script.

If the call is successful, the updated metadata is stored in the script record and a revised script is uploaded to the Amazon GameLift service. Once the script is updated and acquired by a fleet instance, the new version is used for all new game sessions.

Learn more

Amazon GameLift Realtime Servers

Related operations

" + "documentation":"

Updates Realtime script metadata and content.

To update script metadata, specify the script ID and provide updated name and/or version values.

To update script content, provide an updated zip file by pointing to either a local file or an Amazon S3 bucket location. You can use either method regardless of how the original script was uploaded. Use the Version parameter to track updates to the script.

If the call is successful, the updated metadata is stored in the script record and a revised script is uploaded to the Amazon GameLift service. Once the script is updated and acquired by a fleet instance, the new version is used for all new game sessions.

Learn more

Amazon GameLift Realtime Servers

Related actions

CreateScript | ListScripts | DescribeScript | UpdateScript | DeleteScript | All APIs by task

" }, "ValidateMatchmakingRuleSet":{ "name":"ValidateMatchmakingRuleSet", @@ -1484,7 +1566,7 @@ {"shape":"UnsupportedRegionException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Validates the syntax of a matchmaking rule or rule set. This operation checks that the rule set is using syntactically correct JSON and that it conforms to allowed property expressions. To validate syntax, provide a rule set JSON string.

Learn more

Related operations

" + "documentation":"

Validates the syntax of a matchmaking rule or rule set. This operation checks that the rule set is using syntactically correct JSON and that it conforms to allowed property expressions. To validate syntax, provide a rule set JSON string.

Learn more

Related actions

CreateMatchmakingConfiguration | DescribeMatchmakingConfigurations | UpdateMatchmakingConfiguration | DeleteMatchmakingConfiguration | CreateMatchmakingRuleSet | DescribeMatchmakingRuleSets | ValidateMatchmakingRuleSet | DeleteMatchmakingRuleSet | All APIs by task

" } }, "shapes":{ @@ -1528,7 +1610,7 @@ "members":{ "AliasId":{ "shape":"AliasId", - "documentation":"

A unique identifier for an alias. Alias IDs are unique within a Region.

" + "documentation":"

A unique identifier for the alias. Alias IDs are unique within a Region.

" }, "Name":{ "shape":"NonBlankAndLengthConstraintString", @@ -1536,7 +1618,7 @@ }, "AliasArn":{ "shape":"AliasArn", - "documentation":"

Amazon Resource Name (ARN) that is assigned to a GameLift alias resource and uniquely identifies it. ARNs are unique across all Regions. In a GameLift alias ARN, the resource ID matches the alias ID value.

" + "documentation":"

The Amazon Resource Name (ARN) that is assigned to a GameLift alias resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::alias/alias-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912. In a GameLift alias ARN, the resource ID matches the alias ID value.

" }, "Description":{ "shape":"FreeText", @@ -1548,14 +1630,14 @@ }, "CreationTime":{ "shape":"Timestamp", - "documentation":"

A time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" + "documentation":"

A time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" }, "LastUpdatedTime":{ "shape":"Timestamp", - "documentation":"

The time that this data object was last modified. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" + "documentation":"

The time that this data object was last modified. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" } }, - "documentation":"

Properties that describe an alias resource.

" + "documentation":"

Properties that describe an alias resource.

Related actions

CreateAlias | ListAliases | DescribeAlias | UpdateAlias | DeleteAlias | ResolveAlias | All APIs by task

" }, "AliasArn":{ "type":"string", @@ -1652,11 +1734,11 @@ "members":{ "BuildId":{ "shape":"BuildId", - "documentation":"

A unique identifier for a build.

" + "documentation":"

A unique identifier for the build.

" }, "BuildArn":{ "shape":"BuildArn", - "documentation":"

Amazon Resource Name (ARN) that is assigned to a GameLift build resource and uniquely identifies it. ARNs are unique across all Regions. In a GameLift build ARN, the resource ID matches the BuildId value.

" + "documentation":"

The Amazon Resource Name (ARN) that is assigned to a GameLift build resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::build/build-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912. In a GameLift build ARN, the resource ID matches the BuildId value.

" }, "Name":{ "shape":"FreeText", @@ -1680,10 +1762,10 @@ }, "CreationTime":{ "shape":"Timestamp", - "documentation":"

Time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" + "documentation":"

A time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" } }, - "documentation":"

Properties describing a custom game build.

Related operations

" + "documentation":"

Properties describing a custom game build.

Related actions

CreateBuild | ListBuilds | DescribeBuild | UpdateBuild | DeleteBuild | All APIs by task

" }, "BuildArn":{ "type":"string", @@ -1715,10 +1797,10 @@ "members":{ "CertificateType":{ "shape":"CertificateType", - "documentation":"

Indicates whether a TLS/SSL certificate was generated for a fleet.

" + "documentation":"

Indicates whether a TLS/SSL certificate is generated for a fleet.

Valid values include:

" } }, - "documentation":"

Information about the use of a TLS/SSL certificate for a fleet. TLS certificate generation is enabled at the fleet level, with one certificate generated for the fleet. When this feature is enabled, the certificate can be retrieved using the GameLift Server SDK call GetInstanceCertificate. All instances in a fleet share the same certificate.

" + "documentation":"

Determines whether a TLS/SSL certificate is generated for a fleet. This feature must be enabled when creating the fleet. All instances in a fleet share the same certificate. The certificate can be retrieved by calling the GameLift Server SDK operation GetInstanceCertificate.

A fleet's certificate configuration is part of FleetAttributes.

" }, "CertificateType":{ "type":"string", @@ -1733,7 +1815,7 @@ "members":{ "GameServerGroupName":{ "shape":"GameServerGroupNameOrArn", - "documentation":"

A unique identifier for the game server group where the game server is running. Use either the GameServerGroup name or ARN value.. If you are not specifying a game server to claim, this value identifies where you want GameLift FleetIQ to look for an available game server to claim.

" + "documentation":"

A unique identifier for the game server group where the game server is running. Use either the GameServerGroup name or ARN value. If you are not specifying a game server to claim, this value identifies where you want GameLift FleetIQ to look for an available game server to claim.

" }, "GameServerId":{ "shape":"GameServerId", @@ -1820,7 +1902,7 @@ }, "StorageLocation":{ "shape":"S3Location", - "documentation":"

The location where your game build files are stored. Use this parameter only when creating a build using files that are stored in an S3 bucket that you own. Identify an S3 bucket name and key, which must in the same Region where you're creating a build. This parameter must also specify the ARN for an IAM role that you've set up to give Amazon GameLift access your S3 bucket. To call this operation with a storage location, you must have IAM PassRole permission. For more details on IAM roles and PassRole permissions, see Set up a role for GameLift access.

" + "documentation":"

Information indicating where your game build files are stored. Use this parameter only when creating a build with files stored in an Amazon S3 bucket that you own. The storage location must specify an Amazon S3 bucket name and key. The location must also specify a role ARN that you set up to allow Amazon GameLift to access your Amazon S3 bucket. The S3 bucket and your new build must be in the same Region.

" }, "OperatingSystem":{ "shape":"OperatingSystem", @@ -1842,7 +1924,7 @@ }, "UploadCredentials":{ "shape":"AwsCredentials", - "documentation":"

This element is returned only when the operation is called without a storage location. It contains credentials to use when you are uploading a build file to an S3 bucket that is owned by Amazon GameLift. Credentials have a limited life span. To refresh these credentials, call RequestUploadCredentials.

" + "documentation":"

This element is returned only when the operation is called without a storage location. It contains credentials to use when you are uploading a build file to an Amazon S3 bucket that is owned by Amazon GameLift. Credentials have a limited life span. To refresh these credentials, call RequestUploadCredentials.

" }, "StorageLocation":{ "shape":"S3Location", @@ -1864,85 +1946,129 @@ }, "Description":{ "shape":"NonZeroAndMaxString", - "documentation":"

A human-readable description of a fleet.

" + "documentation":"

A human-readable description of the fleet.

" }, "BuildId":{ "shape":"BuildIdOrArn", - "documentation":"

A unique identifier for a build to be deployed on the new fleet. You can use either the build ID or ARN value. The custom game server build must have been successfully uploaded to Amazon GameLift and be in a READY status. This fleet setting cannot be changed once the fleet is created.

" + "documentation":"

The unique identifier for a custom game server build to be deployed on fleet instances. You can use either the build ID or ARN. The build must be uploaded to GameLift and in READY status. This fleet property cannot be changed later.

" }, "ScriptId":{ "shape":"ScriptIdOrArn", - "documentation":"

A unique identifier for a Realtime script to be deployed on the new fleet. You can use either the script ID or ARN value. The Realtime script must have been successfully uploaded to Amazon GameLift. This fleet setting cannot be changed once the fleet is created.

" + "documentation":"

The unique identifier for a Realtime configuration script to be deployed on fleet instances. You can use either the script ID or ARN. Scripts must be uploaded to GameLift prior to creating the fleet. This fleet property cannot be changed later.

" }, "ServerLaunchPath":{ "shape":"NonZeroAndMaxString", - "documentation":"

This parameter is no longer used. Instead, specify a server launch path using the RuntimeConfiguration parameter. Requests that specify a server launch path and launch parameters instead of a runtime configuration will continue to work.

" + "documentation":"

This parameter is no longer used. Specify a server launch path using the RuntimeConfiguration parameter. Requests that use this parameter instead continue to be valid.

" }, "ServerLaunchParameters":{ "shape":"NonZeroAndMaxString", - "documentation":"

This parameter is no longer used. Instead, specify server launch parameters in the RuntimeConfiguration parameter. (Requests that specify a server launch path and launch parameters instead of a runtime configuration will continue to work.)

" + "documentation":"

This parameter is no longer used. Specify server launch parameters using the RuntimeConfiguration parameter. Requests that use this parameter instead continue to be valid.

" }, "LogPaths":{ "shape":"StringList", - "documentation":"

This parameter is no longer used. Instead, to specify where Amazon GameLift should store log files once a server process shuts down, use the Amazon GameLift server API ProcessReady() and specify one or more directory paths in logParameters. See more information in the Server API Reference.

" + "documentation":"

This parameter is no longer used. To specify where GameLift should store log files once a server process shuts down, use the GameLift server API ProcessReady() and specify one or more directory paths in logParameters. See more information in the Server API Reference.

" }, "EC2InstanceType":{ "shape":"EC2InstanceType", - "documentation":"

The name of an EC2 instance type that is supported in Amazon GameLift. A fleet instance type determines the computing resources of each instance in the fleet, including CPU, memory, storage, and networking capacity. Amazon GameLift supports the following EC2 instance types. See Amazon EC2 Instance Types for detailed descriptions.

" + "documentation":"

The GameLift-supported EC2 instance type to use for all fleet instances. Instance type determines the computing resources that will be used to host your game servers, including CPU, memory, storage, and networking capacity. See Amazon EC2 Instance Types for detailed descriptions of EC2 instance types.

" }, "EC2InboundPermissions":{ "shape":"IpPermissionsList", - "documentation":"

Range of IP addresses and port settings that permit inbound traffic to access game sessions that are running on the fleet. For fleets using a custom game build, this parameter is required before game sessions running on the fleet can accept connections. For Realtime Servers fleets, Amazon GameLift automatically sets TCP and UDP ranges for use by the Realtime servers. You can specify multiple permission settings or add more by updating the fleet.

" + "documentation":"

The allowed IP address ranges and port settings that allow inbound traffic to access game sessions on this fleet. If the fleet is hosting a custom game build, this property must be set before players can connect to game sessions. For Realtime Servers fleets, GameLift automatically sets TCP and UDP ranges.

" }, "NewGameSessionProtectionPolicy":{ "shape":"ProtectionPolicy", - "documentation":"

A game session protection policy to apply to all instances in this fleet. If this parameter is not set, instances in this fleet default to no protection. You can change a fleet's protection policy using UpdateFleetAttributes, but this change will only affect sessions created after the policy change. You can also set protection for individual instances using UpdateGameSession.

" + "documentation":"

The status of termination protection for active game sessions on the fleet. By default, this property is set to NoProtection. You can also set game session protection for an individual game session by calling UpdateGameSession.

" }, "RuntimeConfiguration":{ "shape":"RuntimeConfiguration", - "documentation":"

Instructions for launching server processes on each instance in the fleet. Server processes run either a custom game build executable or a Realtime script. The runtime configuration defines the server executables or launch script file, launch parameters, and the number of processes to run concurrently on each instance. When creating a fleet, the runtime configuration must have at least one server process configuration; otherwise the request fails with an invalid request exception. (This parameter replaces the parameters ServerLaunchPath and ServerLaunchParameters, although requests that contain values for these parameters instead of a runtime configuration will continue to work.) This parameter is required unless the parameters ServerLaunchPath and ServerLaunchParameters are defined. Runtime configuration replaced these parameters, but fleets that use them will continue to work.

" + "documentation":"

Instructions for how to launch and maintain server processes on instances in the fleet. The runtime configuration defines one or more server process configurations, each identifying a build executable or Realtime script file and the number of processes of that type to run concurrently.

The RuntimeConfiguration parameter is required unless the fleet is being configured using the older parameters ServerLaunchPath and ServerLaunchParameters, which are still supported for backward compatibility.

" }, "ResourceCreationLimitPolicy":{ "shape":"ResourceCreationLimitPolicy", - "documentation":"

A policy that limits the number of game sessions an individual player can create over a span of time for this fleet.

" + "documentation":"

A policy that limits the number of game sessions that an individual player can create on instances in this fleet within a specified span of time.

" }, "MetricGroups":{ "shape":"MetricGroupList", - "documentation":"

The name of an Amazon CloudWatch metric group to add this fleet to. A metric group aggregates the metrics for all fleets in the group. Specify an existing metric group name, or provide a new name to create a new metric group. A fleet can only be included in one metric group at a time.

" + "documentation":"

The name of an AWS CloudWatch metric group to add this fleet to. A metric group is used to aggregate the metrics for multiple fleets. You can specify an existing metric group name or set a new name to create a new metric group. A fleet can be included in only one metric group at a time.

" }, "PeerVpcAwsAccountId":{ "shape":"NonZeroAndMaxString", - "documentation":"

A unique identifier for the AWS account with the VPC that you want to peer your Amazon GameLift fleet with. You can find your account ID in the AWS Management Console under account settings.

" + "documentation":"

Used when peering your GameLift fleet with a VPC, the unique identifier for the AWS account that owns the VPC. You can find your account ID in the AWS Management Console under account settings.

" }, "PeerVpcId":{ "shape":"NonZeroAndMaxString", - "documentation":"

A unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same Region as your fleet. To look up a VPC ID, use the VPC Dashboard in the AWS Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Fleets.

" + "documentation":"

A unique identifier for a VPC with resources to be accessed by your GameLift fleet. The VPC must be in the same Region as your fleet. To look up a VPC ID, use the VPC Dashboard in the AWS Management Console. Learn more about VPC peering in VPC Peering with GameLift Fleets.

" }, "FleetType":{ "shape":"FleetType", - "documentation":"

Indicates whether to use On-Demand instances or Spot instances for this fleet. If empty, the default is ON_DEMAND. Both categories of instances use identical hardware and configurations based on the instance type selected for this fleet. Learn more about On-Demand versus Spot Instances.

" + "documentation":"

Indicates whether to use On-Demand or Spot instances for this fleet. By default, this property is set to ON_DEMAND. Learn more about when to use On-Demand versus Spot Instances. This property cannot be changed after the fleet is created.

" }, "InstanceRoleArn":{ "shape":"NonEmptyString", - "documentation":"

A unique identifier for an AWS IAM role that manages access to your AWS services. Fleets with an instance role ARN allow applications that are running on the fleet's instances to assume the role. Learn more about using on-box credentials for your game servers at Access external resources from a game server. To call this operation with instance role ARN, you must have IAM PassRole permissions. See IAM policy examples for GameLift.

" + "documentation":"

A unique identifier for an AWS IAM role that manages access to your AWS services. With an instance role ARN set, any application that runs on an instance in this fleet can assume the role, including install scripts, server processes, and daemons (background processes). Create a role or look up a role's ARN by using the IAM dashboard in the AWS Management Console. Learn more about using on-box credentials for your game servers at Access external resources from a game server. This property cannot be changed after the fleet is created.

" }, "CertificateConfiguration":{ "shape":"CertificateConfiguration", - "documentation":"

Indicates whether to generate a TLS/SSL certificate for the new fleet. TLS certificates are used for encrypting traffic between game clients and game servers running on GameLift. If this parameter is not specified, the default value, DISABLED, is used. This fleet setting cannot be changed once the fleet is created. Learn more at Securing Client/Server Communication.

Note: This feature requires the AWS Certificate Manager (ACM) service, which is available in the AWS global partition but not in all other partitions. When working in a partition that does not support this feature, a request for a new fleet with certificate generation results fails with a 4xx unsupported Region error.

Valid values include:

" + "documentation":"

Prompts GameLift to generate a TLS/SSL certificate for the fleet. TLS certificates are used for encrypting traffic between game clients and the game servers that are running on GameLift. By default, the CertificateConfiguration is set to DISABLED. Learn more at Securing Client/Server Communication. This property cannot be changed after the fleet is created.

Note: This feature requires the AWS Certificate Manager (ACM) service, which is not available in all AWS regions. When working in a region that does not support this feature, a fleet creation request with certificate generation fails with a 4xx error.

" + }, + "Locations":{ + "shape":"LocationConfigurationList", + "documentation":"

A set of remote locations to deploy additional instances to and manage as part of the fleet. This parameter can only be used when creating fleets in AWS Regions that support multiple locations. You can add any GameLift-supported AWS Region as a remote location, in the form of an AWS Region code such as us-west-2. To create a fleet with instances in the home Region only, omit this parameter.

" }, "Tags":{ "shape":"TagList", - "documentation":"

A list of labels to assign to the new fleet resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management and cost allocation. For more information, see Tagging AWS Resources in the AWS General Reference. Once the resource is created, you can use TagResource, UntagResource, and ListTagsForResource to add, remove, and view tags. The maximum tag limit may be lower than stated. See the AWS General Reference for actual tagging limits.

" + "documentation":"

A list of labels to assign to the new fleet resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management and cost allocation. For more information, see Tagging AWS Resources in the AWS General Reference. Once the fleet is created, you can use TagResource, UntagResource, and ListTagsForResource to add, remove, and view tags. The maximum tag limit may be lower than stated. See the AWS General Reference for actual tagging limits.

" } }, "documentation":"

Represents the input for a request operation.

" }, + "CreateFleetLocationsInput":{ + "type":"structure", + "required":[ + "FleetId", + "Locations" + ], + "members":{ + "FleetId":{ + "shape":"FleetIdOrArn", + "documentation":"

A unique identifier for the fleet to add locations to. You can use either the fleet ID or ARN value.

" + }, + "Locations":{ + "shape":"LocationConfigurationList", + "documentation":"

A list of locations to deploy additional instances to and manage as part of the fleet. You can add any GameLift-supported AWS Region as a remote location, in the form of an AWS Region code such as us-west-2.

" + } + }, + "documentation":"

Represents the input for a request operation.

" + }, + "CreateFleetLocationsOutput":{ + "type":"structure", + "members":{ + "FleetId":{ + "shape":"FleetIdOrArn", + "documentation":"

A unique identifier for the fleet that was updated with new locations.

" + }, + "FleetArn":{ + "shape":"FleetArn", + "documentation":"

The Amazon Resource Name (ARN) that is assigned to a GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

" + }, + "LocationStates":{ + "shape":"LocationStateList", + "documentation":"

The remote locations that are being added to the fleet, and the life-cycle status of each location. For new locations, the status is set to NEW. During location creation, GameLift updates each location's status as instances are deployed there and prepared for game hosting. This list does not include the fleet home Region or any remote locations that were already added to the fleet.

" + } + }, + "documentation":"

Represents the returned data in response to a request operation.

" + }, "CreateFleetOutput":{ "type":"structure", "members":{ "FleetAttributes":{ "shape":"FleetAttributes", - "documentation":"

Properties for the newly created fleet.

" + "documentation":"

The properties for the new fleet, including the current status. All fleets are placed in NEW status on creation.

" + }, + "LocationStates":{ + "shape":"LocationStateList", + "documentation":"

The fleet's locations and life-cycle status of each location. For new fleets, the status of all locations is set to NEW. During fleet creation, GameLift updates each location status as instances are deployed there and prepared for game hosting. This list includes an entry for the fleet's home Region. For fleets with no remote locations, only one entry, representing the home Region, is returned.

" } }, "documentation":"

Represents the returned data in response to a request operation.

" @@ -1976,7 +2102,7 @@ }, "LaunchTemplate":{ "shape":"LaunchTemplateSpecification", - "documentation":"

The EC2 launch template that contains configuration settings and game server code to be deployed to all instances in the game server group. You can specify the template using either the template name or ID. For help with creating a launch template, see Creating a Launch Template for an Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide. After the Auto Scaling group is created, update this value directly in the Auto Scaling group using the AWS console or APIs.

" + "documentation":"

The EC2 launch template that contains configuration settings and game server code to be deployed to all instances in the game server group. You can specify the template using either the template name or ID. For help with creating a launch template, see Creating a Launch Template for an Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide. After the Auto Scaling group is created, update this value directly in the Auto Scaling group using the AWS console or APIs.

If you specify network interfaces in your launch template, you must explicitly set the property AssociatePublicIpAddress to \"true\". If no network interface is specified in the launch template, GameLift FleetIQ uses your account's default VPC.

" }, "InstanceDefinitions":{ "shape":"InstanceDefinitions", @@ -2019,11 +2145,11 @@ "members":{ "FleetId":{ "shape":"FleetIdOrArn", - "documentation":"

A unique identifier for a fleet to create a game session in. You can use either the fleet ID or ARN value. Each request must reference either a fleet ID or alias ID, but not both.

" + "documentation":"

A unique identifier for the fleet to create a game session in. You can use either the fleet ID or ARN value. Each request must reference either a fleet ID or alias ID, but not both.

" }, "AliasId":{ "shape":"AliasIdOrArn", - "documentation":"

A unique identifier for an alias associated with the fleet to create a game session in. You can use either the alias ID or ARN value. Each request must reference either a fleet ID or alias ID, but not both.

" + "documentation":"

A unique identifier for the alias associated with the fleet to create a game session in. You can use either the alias ID or ARN value. Each request must reference either a fleet ID or alias ID, but not both.

" }, "MaximumPlayerSessionCount":{ "shape":"WholeNumber", @@ -2035,23 +2161,27 @@ }, "GameProperties":{ "shape":"GamePropertyList", - "documentation":"

Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

" + "documentation":"

A set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session.

" }, "CreatorId":{ "shape":"NonZeroAndMaxString", - "documentation":"

A unique identifier for a player or entity creating the game session. This ID is used to enforce a resource protection policy (if one exists) that limits the number of concurrent active game sessions one player can have.

" + "documentation":"

A unique identifier for a player or entity creating the game session. This parameter is required when requesting a new game session on a fleet with a resource creation limit policy. This type of policy limits the number of concurrent active game sessions that one player can create within a certain time span. GameLift uses the CreatorId to evaluate the new request against the policy.

" }, "GameSessionId":{ "shape":"IdStringModel", - "documentation":"

This parameter is no longer preferred. Please use IdempotencyToken instead. Custom string that uniquely identifies a request for a new game session. Maximum token length is 48 characters. If provided, this string is included in the new game session's ID. (A game session ARN has the following format: arn:aws:gamelift:<region>::gamesession/<fleet ID>/<custom ID string or idempotency token>.)

" + "documentation":"

This parameter is no longer preferred. Please use IdempotencyToken instead. Custom string that uniquely identifies a request for a new game session. Maximum token length is 48 characters. If provided, this string is included in the new game session's ID.

" }, "IdempotencyToken":{ "shape":"IdStringModel", - "documentation":"

Custom string that uniquely identifies a request for a new game session. Maximum token length is 48 characters. If provided, this string is included in the new game session's ID. (A game session ARN has the following format: arn:aws:gamelift:<region>::gamesession/<fleet ID>/<custom ID string or idempotency token>.) Idempotency tokens remain in use for 30 days after a game session has ended; game session objects are retained for this time period and then deleted.

" + "documentation":"

Custom string that uniquely identifies the new game session request. This is useful for ensuring that game session requests with the same idempotency token are processed only once. Subsequent requests with the same string return the original GameSession object, with an updated status. Maximum token length is 48 characters. If provided, this string is included in the new game session's ID. A game session ARN has the following format: arn:aws:gamelift:<region>::gamesession/<fleet ID>/<custom ID string or idempotency token>. Idempotency tokens remain in use for 30 days after a game session has ended; game session objects are retained for this time period and then deleted.

" }, "GameSessionData":{ - "shape":"GameSessionData", - "documentation":"

Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

" + "shape":"LargeGameSessionData", + "documentation":"

A set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session.

" + }, + "Location":{ + "shape":"LocationStringModel", + "documentation":"

A fleet's remote location to place the new game session in. If this parameter is not set, the new game session is placed in the fleet's home Region. Specify a remote location with an AWS Region code such as us-west-2.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -2080,11 +2210,27 @@ }, "PlayerLatencyPolicies":{ "shape":"PlayerLatencyPolicyList", - "documentation":"

A collection of latency policies to apply when processing game sessions placement requests with player latency information. Multiple policies are evaluated in order of the maximum latency value, starting with the lowest latency values. With just one policy, the policy is enforced at the start of the game session placement for the duration period. With multiple policies, each policy is enforced consecutively for its duration period. For example, a queue might enforce a 60-second policy followed by a 120-second policy, and then no policy for the remainder of the placement. A player latency policy must set a value for MaximumIndividualPlayerLatencyMilliseconds. If none is set, this API request fails.

" + "documentation":"

A set of policies that act as a sliding cap on player latency. FleetIQ works to deliver low latency for most players in a game session. These policies ensure that no individual player can be placed into a game with unreasonably high latency. Use multiple policies to gradually relax latency requirements a step at a time. Multiple policies are applied based on their maximum allowed latency, starting with the lowest value.

" }, "Destinations":{ "shape":"GameSessionQueueDestinationList", - "documentation":"

A list of fleets that can be used to fulfill game session placement requests in the queue. Fleets are identified by either a fleet ARN or a fleet alias ARN. Destinations are listed in default preference order.

" + "documentation":"

A list of fleets and/or fleet aliases that can be used to fulfill game session placement requests in the queue. Destinations are identified by either a fleet ARN or a fleet alias ARN, and are listed in order of placement preference.

" + }, + "FilterConfiguration":{ + "shape":"FilterConfiguration", + "documentation":"

A list of locations where a queue is allowed to place new game sessions. Locations are specified in the form of AWS Region codes, such as us-west-2. If this parameter is not set, game sessions can be placed in any queue location.

" + }, + "PriorityConfiguration":{ + "shape":"PriorityConfiguration", + "documentation":"

Custom settings to use when prioritizing destinations and locations for game session placements. This configuration replaces the FleetIQ default prioritization process. Priority types that are not explicitly named will be automatically applied at the end of the prioritization process.

" + }, + "CustomEventData":{ + "shape":"QueueCustomEventData", + "documentation":"

Information to be added to all events that are related to this game session queue.

" + }, + "NotificationTarget":{ + "shape":"QueueSnsArnStringModel", + "documentation":"

An SNS topic ARN that is set up to receive game session placement notifications. See Setting up notifications for game session placement.

" }, "Tags":{ "shape":"TagList", @@ -2114,7 +2260,7 @@ "members":{ "Name":{ "shape":"MatchmakingIdStringModel", - "documentation":"

A unique identifier for a matchmaking configuration. This name is used to identify the configuration associated with a matchmaking request or ticket.

" + "documentation":"

A unique identifier for the matchmaking configuration. This name is used to identify the configuration associated with a matchmaking request or ticket.

" }, "Description":{ "shape":"NonZeroAndMaxString", @@ -2122,7 +2268,7 @@ }, "GameSessionQueueArns":{ "shape":"QueueArnsList", - "documentation":"

Amazon Resource Name (ARN) that is assigned to a GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. Queues can be located in any Region. Queues are used to start new GameLift-hosted game sessions for matches that are created with this matchmaking configuration. If FlexMatchMode is set to STANDALONE, do not set this parameter.

" + "documentation":"

The Amazon Resource Name (ARN) that is assigned to a GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::gamesessionqueue/<queue name>. Queues can be located in any Region. Queues are used to start new GameLift-hosted game sessions for matches that are created with this matchmaking configuration. If FlexMatchMode is set to STANDALONE, do not set this parameter.

" }, "RequestTimeoutSeconds":{ "shape":"MatchmakingRequestTimeoutInteger", @@ -2130,7 +2276,7 @@ }, "AcceptanceTimeoutSeconds":{ "shape":"MatchmakingAcceptanceTimeoutInteger", - "documentation":"

The length of time (in seconds) to wait for players to accept a proposed match, if acceptance is required. If any player rejects the match or fails to accept before the timeout, the tickets are returned to the ticket pool and continue to be evaluated for an acceptable match.

" + "documentation":"

The length of time (in seconds) to wait for players to accept a proposed match, if acceptance is required.

" }, "AcceptanceRequired":{ "shape":"BooleanModel", @@ -2138,15 +2284,15 @@ }, "RuleSetName":{ "shape":"MatchmakingRuleSetName", - "documentation":"

A unique identifier for a matchmaking rule set to use with this configuration. You can use either the rule set name or ARN value. A matchmaking configuration can only use rule sets that are defined in the same Region.

" + "documentation":"

A unique identifier for the matchmaking rule set to use with this configuration. You can use either the rule set name or ARN value. A matchmaking configuration can only use rule sets that are defined in the same Region.

" }, "NotificationTarget":{ "shape":"SnsArnStringModel", - "documentation":"

An SNS topic ARN that is set up to receive matchmaking notifications.

" + "documentation":"

An SNS topic ARN that is set up to receive matchmaking notifications. See Setting up notifications for matchmaking for more information.

" }, "AdditionalPlayerCount":{ "shape":"WholeNumber", - "documentation":"

The number of player slots in a match to keep open for future players. For example, assume that the configuration's rule set specifies a match for a single 12-person team. If the additional player count is set to 2, only 10 players are initially selected for the match. This parameter is not used if FlexMatchMode is set to STANDALONE.

" + "documentation":"

The number of player slots in a match to keep open for future players. For example, if the configuration's rule set specifies a match for a single 12-person team, and the additional player count is set to 2, only 10 players are selected for the match. This parameter is not used if FlexMatchMode is set to STANDALONE.

" }, "CustomEventData":{ "shape":"CustomEventData", @@ -2154,7 +2300,7 @@ }, "GameProperties":{ "shape":"GamePropertyList", - "documentation":"

A set of custom properties for a game session, formatted as key-value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match. This parameter is not used if FlexMatchMode is set to STANDALONE.

" + "documentation":"

A set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match. This parameter is not used if FlexMatchMode is set to STANDALONE.

" }, "GameSessionData":{ "shape":"GameSessionData", @@ -2194,7 +2340,7 @@ "members":{ "Name":{ "shape":"MatchmakingIdStringModel", - "documentation":"

A unique identifier for a matchmaking rule set. A matchmaking configuration identifies the rule set it uses by this name value. Note that the rule set name is different from the optional name field in the rule set body.

" + "documentation":"

A unique identifier for the matchmaking rule set. A matchmaking configuration identifies the rule set it uses by this name value. Note that the rule set name is different from the optional name field in the rule set body.

" }, "RuleSetBody":{ "shape":"RuleSetBody", @@ -2235,7 +2381,7 @@ }, "PlayerData":{ "shape":"PlayerData", - "documentation":"

Developer-defined information related to a player. Amazon GameLift does not use this data, so it can be formatted as needed for use in the game.

" + "documentation":"

Developer-defined information related to a player. GameLift does not use this data, so it can be formatted as needed for use in the game.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -2267,7 +2413,7 @@ }, "PlayerDataMap":{ "shape":"PlayerDataMap", - "documentation":"

Map of string pairs, each specifying a player ID and a set of developer-defined information related to the player. Amazon GameLift does not use this data, so it can be formatted as needed for use in the game. Player data strings for player IDs not included in the PlayerIds parameter are ignored.

" + "documentation":"

Map of string pairs, each specifying a player ID and a set of developer-defined information related to the player. Amazon GameLift does not use this data, so it can be formatted as needed for use in the game. Any player data strings for player IDs that are not included in the PlayerIds parameter are ignored.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -2291,11 +2437,11 @@ }, "Version":{ "shape":"NonZeroAndMaxString", - "documentation":"

The version that is associated with a build or script. Version strings do not need to be unique. You can use UpdateScript to change this value later.

" + "documentation":"

Version information that is associated with a build or script. Version strings do not need to be unique. You can use UpdateScript to change this value later.

" }, "StorageLocation":{ "shape":"S3Location", - "documentation":"

The Amazon S3 location of your Realtime scripts. The storage location must specify the S3 bucket name, the zip file name (the \"key\"), and an IAM role ARN that allows Amazon GameLift to access the S3 storage location. The S3 bucket must be in the same Region where you are creating a new script. By default, Amazon GameLift uploads the latest version of the zip file; if you have S3 object versioning turned on, you can use the ObjectVersion parameter to specify an earlier version. To call this operation with a storage location, you must have IAM PassRole permission. For more details on IAM roles and PassRole permissions, see Set up a role for GameLift access.

" + "documentation":"

The location of the Amazon S3 bucket where a zipped file containing your Realtime scripts is stored. The storage location must specify the Amazon S3 bucket name, the zip file name (the \"key\"), and a role ARN that allows Amazon GameLift to access the Amazon S3 storage location. The S3 bucket must be in the same Region where you want to create a new script. By default, Amazon GameLift uploads the latest version of the zip file; if you have S3 object versioning turned on, you can use the ObjectVersion parameter to specify an earlier version.

" }, "ZipFile":{ "shape":"ZipBlob", @@ -2325,11 +2471,11 @@ "members":{ "GameLiftAwsAccountId":{ "shape":"NonZeroAndMaxString", - "documentation":"

A unique identifier for the AWS account that you use to manage your Amazon GameLift fleet. You can find your Account ID in the AWS Management Console under account settings.

" + "documentation":"

A unique identifier for the AWS account that you use to manage your GameLift fleet. You can find your Account ID in the AWS Management Console under account settings.

" }, "PeerVpcId":{ "shape":"NonZeroAndMaxString", - "documentation":"

A unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same Region where your fleet is deployed. Look up a VPC ID using the VPC Dashboard in the AWS Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Fleets.

" + "documentation":"

A unique identifier for a VPC with resources to be accessed by your GameLift fleet. The VPC must be in the same Region as your fleet. To look up a VPC ID, use the VPC Dashboard in the AWS Management Console. Learn more about VPC peering in VPC Peering with GameLift Fleets.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -2354,7 +2500,7 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

A unique identifier for a fleet. You can use either the fleet ID or ARN value. This tells Amazon GameLift which GameLift VPC to peer with.

" + "documentation":"

A unique identifier for the fleet. You can use either the fleet ID or ARN value. This tells Amazon GameLift which GameLift VPC to peer with.

" }, "PeerVpcAwsAccountId":{ "shape":"NonZeroAndMaxString", @@ -2362,7 +2508,7 @@ }, "PeerVpcId":{ "shape":"NonZeroAndMaxString", - "documentation":"

A unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same Region where your fleet is deployed. Look up a VPC ID using the VPC Dashboard in the AWS Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Fleets.

" + "documentation":"

A unique identifier for a VPC with resources to be accessed by your GameLift fleet. The VPC must be in the same Region as your fleet. To look up a VPC ID, use the VPC Dashboard in the AWS Management Console. Learn more about VPC peering in VPC Peering with GameLift Fleets.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -2394,7 +2540,7 @@ "members":{ "BuildId":{ "shape":"BuildIdOrArn", - "documentation":"

A unique identifier for a build to delete. You can use either the build ID or ARN value.

" + "documentation":"

A unique identifier for the build to delete. You can use either the build ID or ARN value.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -2405,11 +2551,47 @@ "members":{ "FleetId":{ "shape":"FleetIdOrArn", - "documentation":"

A unique identifier for a fleet to be deleted. You can use either the fleet ID or ARN value.

" + "documentation":"

A unique identifier for the fleet to be deleted. You can use either the fleet ID or ARN value.

" } }, "documentation":"

Represents the input for a request operation.

" }, + "DeleteFleetLocationsInput":{ + "type":"structure", + "required":[ + "FleetId", + "Locations" + ], + "members":{ + "FleetId":{ + "shape":"FleetIdOrArn", + "documentation":"

A unique identifier for the fleet to delete locations for. You can use either the fleet ID or ARN value.

" + }, + "Locations":{ + "shape":"LocationList", + "documentation":"

The list of fleet locations to delete. Specify locations in the form of an AWS Region code, such as us-west-2.

" + } + }, + "documentation":"

Represents the input for a request operation.

" + }, + "DeleteFleetLocationsOutput":{ + "type":"structure", + "members":{ + "FleetId":{ + "shape":"FleetIdOrArn", + "documentation":"

A unique identifier for the fleet that location attributes are being deleted for.

" + }, + "FleetArn":{ + "shape":"FleetArn", + "documentation":"

The Amazon Resource Name (ARN) that is assigned to a GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

" + }, + "LocationStates":{ + "shape":"LocationStateList", + "documentation":"

The remote locations that are being deleted, with each location status set to DELETING.

" + } + }, + "documentation":"

Represents the returned data in response to a request operation.

" + }, "DeleteGameServerGroupInput":{ "type":"structure", "required":["GameServerGroupName"], @@ -2455,7 +2637,7 @@ "members":{ "Name":{ "shape":"MatchmakingConfigurationName", - "documentation":"

A unique identifier for a matchmaking configuration. You can use either the configuration name or ARN value.

" + "documentation":"

A unique identifier for the matchmaking configuration. You can use either the configuration name or ARN value.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -2471,7 +2653,7 @@ "members":{ "Name":{ "shape":"MatchmakingRuleSetName", - "documentation":"

A unique identifier for a matchmaking rule set to be deleted. (Note: The rule set name is different from the optional \"name\" field in the rule set body.) You can use either the rule set name or ARN value.

" + "documentation":"

A unique identifier for the matchmaking rule set to be deleted. (Note: The rule set name is different from the optional \"name\" field in the rule set body.) You can use either the rule set name or ARN value.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -2491,11 +2673,11 @@ "members":{ "Name":{ "shape":"NonZeroAndMaxString", - "documentation":"

A descriptive label that is associated with a scaling policy. Policy names do not need to be unique.

" + "documentation":"

A descriptive label that is associated with a fleet's scaling policy. Policy names do not need to be unique.

" }, "FleetId":{ "shape":"FleetIdOrArn", - "documentation":"

A unique identifier for a fleet to be deleted. You can use either the fleet ID or ARN value.

" + "documentation":"

A unique identifier for the fleet to be deleted. You can use either the fleet ID or ARN value.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -2506,7 +2688,7 @@ "members":{ "ScriptId":{ "shape":"ScriptIdOrArn", - "documentation":"

A unique identifier for a Realtime script to delete. You can use either the script ID or ARN value.

" + "documentation":"

A unique identifier for the Realtime script to delete. You can use either the script ID or ARN value.

" } } }, @@ -2519,11 +2701,11 @@ "members":{ "GameLiftAwsAccountId":{ "shape":"NonZeroAndMaxString", - "documentation":"

A unique identifier for the AWS account that you use to manage your Amazon GameLift fleet. You can find your Account ID in the AWS Management Console under account settings.

" + "documentation":"

A unique identifier for the AWS account that you use to manage your GameLift fleet. You can find your Account ID in the AWS Management Console under account settings.

" }, "PeerVpcId":{ "shape":"NonZeroAndMaxString", - "documentation":"

A unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same Region where your fleet is deployed. Look up a VPC ID using the VPC Dashboard in the AWS Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Fleets.

" + "documentation":"

A unique identifier for a VPC with resources to be accessed by your GameLift fleet. The VPC must be in the same Region as your fleet. To look up a VPC ID, use the VPC Dashboard in the AWS Management Console. Learn more about VPC peering in VPC Peering with GameLift Fleets.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -2542,7 +2724,7 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

A unique identifier for a fleet. This fleet specified must match the fleet referenced in the VPC peering connection record. You can use either the fleet ID or ARN value.

" + "documentation":"

A unique identifier for the fleet. This fleet specified must match the fleet referenced in the VPC peering connection record. You can use either the fleet ID or ARN value.

" }, "VpcPeeringConnectionId":{ "shape":"NonZeroAndMaxString", @@ -2600,7 +2782,7 @@ "members":{ "BuildId":{ "shape":"BuildIdOrArn", - "documentation":"

A unique identifier for a build to retrieve properties for. You can use either the build ID or ARN value.

" + "documentation":"

A unique identifier for the build to retrieve properties for. You can use either the build ID or ARN value.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -2620,7 +2802,11 @@ "members":{ "EC2InstanceType":{ "shape":"EC2InstanceType", - "documentation":"

Name of an EC2 instance type that is supported in Amazon GameLift. A fleet instance type determines the computing resources of each instance in the fleet, including CPU, memory, storage, and networking capacity. Amazon GameLift supports the following EC2 instance types. See Amazon EC2 Instance Types for detailed descriptions. Leave this parameter blank to retrieve limits for all types.

" + "documentation":"

Name of an EC2 instance type that is supported in GameLift. A fleet instance type determines the computing resources of each instance in the fleet, including CPU, memory, storage, and networking capacity. Do not specify a value for this parameter to retrieve limits for all instance types.

" + }, + "Location":{ + "shape":"LocationStringModel", + "documentation":"

The name of a remote location to request instance limits for, in the form of an AWS Region code such as us-west-2.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -2640,7 +2826,7 @@ "members":{ "FleetIds":{ "shape":"FleetIdOrArnList", - "documentation":"

A list of unique fleet identifiers to retrieve attributes for. You can use either the fleet ID or ARN value. To retrieve attributes for all current fleets, do not include this parameter. If the list of fleet identifiers includes fleets that don't currently exist, the request succeeds but no attributes for that fleet are returned.

" + "documentation":"

A list of unique fleet identifiers to retrieve attributes for. You can use either the fleet ID or ARN value. To retrieve attributes for all current fleets, do not include this parameter.

" }, "Limit":{ "shape":"PositiveInteger", @@ -2648,7 +2834,7 @@ }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value. This parameter is ignored when the request specifies one or a list of fleet IDs.

" + "documentation":"

A token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value. This parameter is ignored when the request specifies one or a list of fleet IDs.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -2662,7 +2848,7 @@ }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates where to resume retrieving results on the next call to this operation. If no token is returned, these results represent the end of the list.

" + "documentation":"

A token that indicates where to resume retrieving results on the next call to this operation. If no token is returned, these results represent the end of the list.

" } }, "documentation":"

Represents the returned data in response to a request operation.

" @@ -2672,7 +2858,7 @@ "members":{ "FleetIds":{ "shape":"FleetIdOrArnList", - "documentation":"

A unique identifier for a fleet(s) to retrieve capacity information for. You can use either the fleet ID or ARN value.

" + "documentation":"

A unique identifier for the fleet(s) to retrieve capacity information for. You can use either the fleet ID or ARN value. Leave this parameter empty to retrieve capacity information for all fleets.

" }, "Limit":{ "shape":"PositiveInteger", @@ -2680,7 +2866,7 @@ }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value. This parameter is ignored when the request specifies one or a list of fleet IDs.

" + "documentation":"

A token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value. This parameter is ignored when the request specifies one or a list of fleet IDs.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -2690,11 +2876,11 @@ "members":{ "FleetCapacity":{ "shape":"FleetCapacityList", - "documentation":"

A collection of objects containing capacity information for each requested fleet ID. Leave this parameter empty to retrieve capacity information for all fleets.

" + "documentation":"

A collection of objects that contains capacity information for each requested fleet ID. Capacity objects are returned only for fleets that currently exist.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates where to resume retrieving results on the next call to this operation. If no token is returned, these results represent the end of the list.

" + "documentation":"

A token that indicates where to resume retrieving results on the next call to this operation. If no token is returned, these results represent the end of the list.

" } }, "documentation":"

Represents the returned data in response to a request operation.

" @@ -2705,15 +2891,15 @@ "members":{ "FleetId":{ "shape":"FleetIdOrArn", - "documentation":"

A unique identifier for a fleet to get event logs for. You can use either the fleet ID or ARN value.

" + "documentation":"

A unique identifier for the fleet to get event logs for. You can use either the fleet ID or ARN value.

" }, "StartTime":{ "shape":"Timestamp", - "documentation":"

Earliest date to retrieve event logs for. If no start time is specified, this call returns entries starting from when the fleet was created to the specified end time. Format is a number expressed in Unix time as milliseconds (ex: \"1469498468.057\").

" + "documentation":"

The earliest date to retrieve event logs for. If no start time is specified, this call returns entries starting from when the fleet was created to the specified end time. Format is a number expressed in Unix time as milliseconds (ex: \"1469498468.057\").

" }, "EndTime":{ "shape":"Timestamp", - "documentation":"

Most recent date to retrieve event logs for. If no end time is specified, this call returns entries from the specified start time up to the present. Format is a number expressed in Unix time as milliseconds (ex: \"1469498468.057\").

" + "documentation":"

The most recent date to retrieve event logs for. If no end time is specified, this call returns entries from the specified start time up to the present. Format is a number expressed in Unix time as milliseconds (ex: \"1469498468.057\").

" }, "Limit":{ "shape":"PositiveInteger", @@ -2721,7 +2907,7 @@ }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value.

" + "documentation":"

A token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -2735,7 +2921,108 @@ }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates where to resume retrieving results on the next call to this operation. If no token is returned, these results represent the end of the list.

" + "documentation":"

A token that indicates where to resume retrieving results on the next call to this operation. If no token is returned, these results represent the end of the list.

" + } + }, + "documentation":"

Represents the returned data in response to a request operation.

" + }, + "DescribeFleetLocationAttributesInput":{ + "type":"structure", + "required":["FleetId"], + "members":{ + "FleetId":{ + "shape":"FleetIdOrArn", + "documentation":"

A unique identifier for the fleet to retrieve remote locations for. You can use either the fleet ID or ARN value.

" + }, + "Locations":{ + "shape":"LocationList", + "documentation":"

A list of fleet locations to retrieve information for. Specify locations in the form of an AWS Region code, such as us-west-2.

" + }, + "Limit":{ + "shape":"PositiveInteger", + "documentation":"

The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages. This limit is not currently enforced.

" + }, + "NextToken":{ + "shape":"NonZeroAndMaxString", + "documentation":"

A token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value.

" + } + }, + "documentation":"

Represents the input for a request operation.

" + }, + "DescribeFleetLocationAttributesOutput":{ + "type":"structure", + "members":{ + "FleetId":{ + "shape":"FleetIdOrArn", + "documentation":"

A unique identifier for the fleet that location attributes were requested for.

" + }, + "FleetArn":{ + "shape":"FleetArn", + "documentation":"

The Amazon Resource Name (ARN) that is assigned to a GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

" + }, + "LocationAttributes":{ + "shape":"LocationAttributesList", + "documentation":"

Location-specific information on the requested fleet's remote locations.

" + }, + "NextToken":{ + "shape":"NonZeroAndMaxString", + "documentation":"

A token that indicates where to resume retrieving results on the next call to this operation. If no token is returned, these results represent the end of the list.

" + } + }, + "documentation":"

Represents the returned data in response to a request operation.

" + }, + "DescribeFleetLocationCapacityInput":{ + "type":"structure", + "required":[ + "FleetId", + "Location" + ], + "members":{ + "FleetId":{ + "shape":"FleetIdOrArn", + "documentation":"

A unique identifier for the fleet to request location capacity for. You can use either the fleet ID or ARN value.

" + }, + "Location":{ + "shape":"LocationStringModel", + "documentation":"

The fleet location to retrieve capacity information for. Specify a location in the form of an AWS Region code, such as us-west-2.

" + } + }, + "documentation":"

Represents the input for a request operation.

" + }, + "DescribeFleetLocationCapacityOutput":{ + "type":"structure", + "members":{ + "FleetCapacity":{ + "shape":"FleetCapacity", + "documentation":"

Resource capacity information for the requested fleet location. Capacity objects are returned only for fleets and locations that currently exist.

" + } + }, + "documentation":"

Represents the returned data in response to a request operation.

" + }, + "DescribeFleetLocationUtilizationInput":{ + "type":"structure", + "required":[ + "FleetId", + "Location" + ], + "members":{ + "FleetId":{ + "shape":"FleetIdOrArn", + "documentation":"

A unique identifier for the fleet to request location utilization for. You can use either the fleet ID or ARN value.

" + }, + "Location":{ + "shape":"LocationStringModel", + "documentation":"

The fleet location to retrieve utilization information for. Specify a location in the form of an AWS Region code, such as us-west-2.

" + } + }, + "documentation":"

Represents the input for a request operation.

" + }, + "DescribeFleetLocationUtilizationOutput":{ + "type":"structure", + "members":{ + "FleetUtilization":{ + "shape":"FleetUtilization", + "documentation":"

Utilization information for the requested fleet location. Utilization objects are returned only for fleets and locations that currently exist.

" } }, "documentation":"

Represents the returned data in response to a request operation.

" @@ -2746,7 +3033,11 @@ "members":{ "FleetId":{ "shape":"FleetIdOrArn", - "documentation":"

A unique identifier for a fleet to retrieve port settings for. You can use either the fleet ID or ARN value.

" + "documentation":"

A unique identifier for the fleet to retrieve port settings for. You can use either the fleet ID or ARN value.

" + }, + "Location":{ + "shape":"LocationStringModel", + "documentation":"

A remote location to check for status of port setting updates. Use the AWS Region code format, such as us-west-2.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -2754,9 +3045,25 @@ "DescribeFleetPortSettingsOutput":{ "type":"structure", "members":{ + "FleetId":{ + "shape":"FleetId", + "documentation":"

A unique identifier for the fleet that was requested.

" + }, + "FleetArn":{ + "shape":"FleetArn", + "documentation":"

The Amazon Resource Name (ARN) that is assigned to a GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

" + }, "InboundPermissions":{ "shape":"IpPermissionsList", "documentation":"

The port settings for the requested fleet ID.

" + }, + "UpdateStatus":{ + "shape":"LocationUpdateStatus", + "documentation":"

The current status of updates to the fleet's port settings in the requested fleet location. A status of PENDING_UPDATE indicates that an update was requested for the fleet but has not yet been completed for the location.

" + }, + "Location":{ + "shape":"LocationStringModel", + "documentation":"

The requested fleet location, expressed as an AWS Region code, such as us-west-2.

" } }, "documentation":"

Represents the returned data in response to a request operation.

" @@ -2766,7 +3073,7 @@ "members":{ "FleetIds":{ "shape":"FleetIdOrArnList", - "documentation":"

A unique identifier for a fleet(s) to retrieve utilization data for. You can use either the fleet ID or ARN value. To retrieve attributes for all current fleets, do not include this parameter. If the list of fleet identifiers includes fleets that don't currently exist, the request succeeds but no attributes for that fleet are returned.

" + "documentation":"

A unique identifier for the fleet(s) to retrieve utilization data for. You can use either the fleet ID or ARN value. To retrieve attributes for all current fleets, do not include this parameter.

" }, "Limit":{ "shape":"PositiveInteger", @@ -2774,7 +3081,7 @@ }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value. This parameter is ignored when the request specifies one or a list of fleet IDs.

" + "documentation":"

A token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value. This parameter is ignored when the request specifies one or a list of fleet IDs.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -2784,11 +3091,11 @@ "members":{ "FleetUtilization":{ "shape":"FleetUtilizationList", - "documentation":"

A collection of objects containing utilization information for each requested fleet ID.

" + "documentation":"

A collection of objects containing utilization information for each requested fleet ID. Utilization objects are returned only for fleets that currently exist.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates where to resume retrieving results on the next call to this operation. If no token is returned, these results represent the end of the list.

" + "documentation":"

A token that indicates where to resume retrieving results on the next call to this operation. If no token is returned, these results represent the end of the list.

" } }, "documentation":"

Represents the returned data in response to a request operation.

" @@ -2843,11 +3150,11 @@ }, "Limit":{ "shape":"PositiveInteger", - "documentation":"

The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential segments.

" + "documentation":"

The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

A token that indicates the start of the next sequential segment of results. Use the token returned with the previous call to this operation. To start at the beginning of the result set, do not specify a value.

" + "documentation":"

A token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value.

" } } }, @@ -2878,7 +3185,7 @@ "members":{ "FleetId":{ "shape":"FleetIdOrArn", - "documentation":"

A unique identifier for a fleet to retrieve all game sessions active on the fleet. You can use either the fleet ID or ARN value.

" + "documentation":"

A unique identifier for the fleet to retrieve all game sessions active on the fleet. You can use either the fleet ID or ARN value.

" }, "GameSessionId":{ "shape":"ArnStringModel", @@ -2886,7 +3193,11 @@ }, "AliasId":{ "shape":"AliasIdOrArn", - "documentation":"

A unique identifier for an alias associated with the fleet to retrieve all game sessions for. You can use either the alias ID or ARN value.

" + "documentation":"

A unique identifier for the alias associated with the fleet to retrieve all game sessions for. You can use either the alias ID or ARN value.

" + }, + "Location":{ + "shape":"LocationStringModel", + "documentation":"

A fleet location to get game sessions for. You can specify a fleet's home Region or a remote location. Use the AWS Region code format, such as us-west-2.

" }, "StatusFilter":{ "shape":"NonZeroAndMaxString", @@ -2898,7 +3209,7 @@ }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value.

" + "documentation":"

A token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -2908,11 +3219,11 @@ "members":{ "GameSessionDetails":{ "shape":"GameSessionDetailList", - "documentation":"

A collection of objects containing game session properties and the protection policy currently in force for each session matching the request.

" + "documentation":"

A collection of properties for each game session that matches the request.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates where to resume retrieving results on the next call to this operation. If no token is returned, these results represent the end of the list.

" + "documentation":"

A token that indicates where to resume retrieving results on the next call to this operation. If no token is returned, these results represent the end of the list.

" } }, "documentation":"

Represents the returned data in response to a request operation.

" @@ -2975,7 +3286,7 @@ "members":{ "FleetId":{ "shape":"FleetIdOrArn", - "documentation":"

A unique identifier for a fleet to retrieve all game sessions for. You can use either the fleet ID or ARN value.

" + "documentation":"

A unique identifier for the fleet to retrieve game sessions for. You can use either the fleet ID or ARN value.

" }, "GameSessionId":{ "shape":"ArnStringModel", @@ -2983,11 +3294,15 @@ }, "AliasId":{ "shape":"AliasIdOrArn", - "documentation":"

A unique identifier for an alias associated with the fleet to retrieve all game sessions for. You can use either the alias ID or ARN value.

" + "documentation":"

A unique identifier for the alias associated with the fleet to retrieve game sessions for. You can use either the alias ID or ARN value.

" + }, + "Location":{ + "shape":"LocationStringModel", + "documentation":"

A fleet location to get game session details for. You can specify a fleet's home Region or a remote location. Use the AWS Region code format, such as us-west-2.

" }, "StatusFilter":{ "shape":"NonZeroAndMaxString", - "documentation":"

Game session status to filter results on. Possible game session statuses include ACTIVE, TERMINATED, ACTIVATING, and TERMINATING (the last two are transitory).

" + "documentation":"

Game session status to filter results on. You can filter on the following states: ACTIVE, TERMINATED, ACTIVATING, and TERMINATING. The last two are transitory and used for only very brief periods of time.

" }, "Limit":{ "shape":"PositiveInteger", @@ -2995,7 +3310,7 @@ }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value.

" + "documentation":"

A token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -3005,11 +3320,11 @@ "members":{ "GameSessions":{ "shape":"GameSessionList", - "documentation":"

A collection of objects containing game session properties for each session matching the request.

" + "documentation":"

A collection of properties for each game session that matches the request.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates where to resume retrieving results on the next call to this operation. If no token is returned, these results represent the end of the list.

" + "documentation":"

A token that indicates where to resume retrieving results on the next call to this operation. If no token is returned, these results represent the end of the list.

" } }, "documentation":"

Represents the returned data in response to a request operation.

" @@ -3020,7 +3335,7 @@ "members":{ "FleetId":{ "shape":"FleetIdOrArn", - "documentation":"

A unique identifier for a fleet to retrieve instance information for. You can use either the fleet ID or ARN value.

" + "documentation":"

A unique identifier for the fleet to retrieve instance information for. You can use either the fleet ID or ARN value.

" }, "InstanceId":{ "shape":"InstanceId", @@ -3032,7 +3347,11 @@ }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value.

" + "documentation":"

A token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value.

" + }, + "Location":{ + "shape":"LocationStringModel", + "documentation":"

The name of a location to retrieve instance information for, in the form of an AWS Region code such as us-west-2.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -3046,7 +3365,7 @@ }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates where to resume retrieving results on the next call to this operation. If no token is returned, these results represent the end of the list.

" + "documentation":"

A token that indicates where to resume retrieving results on the next call to this operation. If no token is returned, these results represent the end of the list.

" } }, "documentation":"

Represents the returned data in response to a request operation.

" @@ -3056,11 +3375,11 @@ "members":{ "Names":{ "shape":"MatchmakingConfigurationNameList", - "documentation":"

A unique identifier for a matchmaking configuration(s) to retrieve. You can use either the configuration name or ARN value. To request all existing configurations, leave this parameter empty.

" + "documentation":"

A unique identifier for the matchmaking configuration(s) to retrieve. You can use either the configuration name or ARN value. To request all existing configurations, leave this parameter empty.

" }, "RuleSetName":{ "shape":"MatchmakingRuleSetName", - "documentation":"

A unique identifier for a matchmaking rule set. You can use either the rule set name or ARN value. Use this parameter to retrieve all matchmaking configurations that use this rule set.

" + "documentation":"

A unique identifier for the matchmaking rule set. You can use either the rule set name or ARN value. Use this parameter to retrieve all matchmaking configurations that use this rule set.

" }, "Limit":{ "shape":"PositiveInteger", @@ -3166,7 +3485,7 @@ }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value. If a player session ID is specified, this parameter is ignored.

" + "documentation":"

A token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value. If a player session ID is specified, this parameter is ignored.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -3180,7 +3499,7 @@ }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates where to resume retrieving results on the next call to this operation. If no token is returned, these results represent the end of the list.

" + "documentation":"

A token that indicates where to resume retrieving results on the next call to this operation. If no token is returned, these results represent the end of the list.

" } }, "documentation":"

Represents the returned data in response to a request operation.

" @@ -3191,7 +3510,7 @@ "members":{ "FleetId":{ "shape":"FleetIdOrArn", - "documentation":"

A unique identifier for a fleet to get the runtime configuration for. You can use either the fleet ID or ARN value.

" + "documentation":"

A unique identifier for the fleet to get the runtime configuration for. You can use either the fleet ID or ARN value.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -3201,7 +3520,7 @@ "members":{ "RuntimeConfiguration":{ "shape":"RuntimeConfiguration", - "documentation":"

Instructions describing how server processes should be launched and maintained on each instance in the fleet.

" + "documentation":"

Instructions that describe how server processes should be launched and maintained on each instance in the fleet.

" } }, "documentation":"

Represents the returned data in response to a request operation.

" @@ -3212,7 +3531,7 @@ "members":{ "FleetId":{ "shape":"FleetIdOrArn", - "documentation":"

A unique identifier for a fleet to retrieve scaling policies for. You can use either the fleet ID or ARN value.

" + "documentation":"

A unique identifier for the fleet to retrieve scaling policies for. You can use either the fleet ID or ARN value.

" }, "StatusFilter":{ "shape":"ScalingStatusType", @@ -3224,7 +3543,11 @@ }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value.

" + "documentation":"

A token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value.

" + }, + "Location":{ + "shape":"LocationStringModel", + "documentation":"

CONTENT TODO

" } }, "documentation":"

Represents the input for a request operation.

" @@ -3238,7 +3561,7 @@ }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates where to resume retrieving results on the next call to this operation. If no token is returned, these results represent the end of the list.

" + "documentation":"

A token that indicates where to resume retrieving results on the next call to this operation. If no token is returned, these results represent the end of the list.

" } }, "documentation":"

Represents the returned data in response to a request operation.

" @@ -3249,7 +3572,7 @@ "members":{ "ScriptId":{ "shape":"ScriptIdOrArn", - "documentation":"

A unique identifier for a Realtime script to retrieve properties for. You can use either the script ID or ARN value.

" + "documentation":"

A unique identifier for the Realtime script to retrieve properties for. You can use either the script ID or ARN value.

" } } }, @@ -3281,7 +3604,7 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

A unique identifier for a fleet. You can use either the fleet ID or ARN value.

" + "documentation":"

A unique identifier for the fleet. You can use either the fleet ID or ARN value.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -3305,7 +3628,7 @@ }, "PlayerData":{ "shape":"PlayerData", - "documentation":"

Developer-defined information related to a player. Amazon GameLift does not use this data, so it can be formatted as needed for use in the game.

" + "documentation":"

Developer-defined information related to a player. GameLift does not use this data, so it can be formatted as needed for use in the game.

" } }, "documentation":"

Player information for use when creating player sessions using a game session placement request with StartGameSessionPlacement.

" @@ -3322,52 +3645,56 @@ "members":{ "DESIRED":{ "shape":"WholeNumber", - "documentation":"

Ideal number of active instances in the fleet.

" + "documentation":"

Ideal number of active instances. GameLift will always try to maintain the desired number of instances. Capacity is scaled up or down by changing the desired instances.

" }, "MINIMUM":{ "shape":"WholeNumber", - "documentation":"

The minimum value allowed for the fleet's instance count.

" + "documentation":"

The minimum instance count value allowed.

" }, "MAXIMUM":{ "shape":"WholeNumber", - "documentation":"

The maximum value allowed for the fleet's instance count.

" + "documentation":"

The maximum instance count value allowed.

" }, "PENDING":{ "shape":"WholeNumber", - "documentation":"

Number of instances in the fleet that are starting but not yet active.

" + "documentation":"

Number of instances that are starting but not yet active.

" }, "ACTIVE":{ "shape":"WholeNumber", - "documentation":"

Actual number of active instances in the fleet.

" + "documentation":"

Actual number of instances that are ready to host game sessions.

" }, "IDLE":{ "shape":"WholeNumber", - "documentation":"

Number of active instances in the fleet that are not currently hosting a game session.

" + "documentation":"

Number of active instances that are not currently hosting a game session.

" }, "TERMINATING":{ "shape":"WholeNumber", - "documentation":"

Number of instances in the fleet that are no longer active but haven't yet been terminated.

" + "documentation":"

Number of instances that are no longer active but haven't yet been terminated.

" } }, - "documentation":"

Current status of fleet capacity. The number of active instances should match or be in the process of matching the number of desired instances. Pending and terminating counts are non-zero only if fleet capacity is adjusting to an UpdateFleetCapacity request, or if access to resources is temporarily affected.

" + "documentation":"

Resource capacity settings. Fleet capacity is measured in EC2 instances. Pending and terminating counts are non-zero when the fleet capacity is adjusting to a scaling event or if access to resources is temporarily affected.

EC2 instance counts are part of FleetCapacity.

" }, "EC2InstanceLimit":{ "type":"structure", "members":{ "EC2InstanceType":{ "shape":"EC2InstanceType", - "documentation":"

Name of an EC2 instance type that is supported in Amazon GameLift. A fleet instance type determines the computing resources of each instance in the fleet, including CPU, memory, storage, and networking capacity. Amazon GameLift supports the following EC2 instance types. See Amazon EC2 Instance Types for detailed descriptions.

" + "documentation":"

The name of an EC2 instance type. See Amazon EC2 Instance Types for detailed descriptions.

" }, "CurrentInstances":{ "shape":"WholeNumber", - "documentation":"

Number of instances of the specified type that are currently in use by this AWS account.

" + "documentation":"

The number of instances for the specified type and location that are currently being used by the AWS account.

" }, "InstanceLimit":{ "shape":"WholeNumber", - "documentation":"

Number of instances allowed.

" + "documentation":"

The number of instances that is allowed for the specified instance type and location.

" + }, + "Location":{ + "shape":"LocationStringModel", + "documentation":"

An AWS Region code, such as us-west-2.

" } }, - "documentation":"

The maximum number of instances allowed based on the Amazon Elastic Compute Cloud (Amazon EC2) instance type. Instance limits can be retrieved by calling DescribeEC2InstanceLimits.

" + "documentation":"

The GameLift service limits for an EC2 instance type and current utilization. GameLift allows AWS accounts a maximum number of instances, per instance type, per AWS Region or location, for use with GameLift. You can request an limit increase for your account by using the Service limits page in the GameLift console.

Related actions

DescribeEC2InstanceLimits

" }, "EC2InstanceLimitList":{ "type":"list", @@ -3473,7 +3800,7 @@ }, "EventCode":{ "shape":"EventCode", - "documentation":"

The type of event being logged.

Fleet creation events (ordered by fleet creation activity):

VPC peering events:

Spot instance events:

Other fleet events:

" + "documentation":"

The type of event being logged.

Fleet creation events (ordered by fleet creation activity):

VPC peering events:

Spot instance events:

Other fleet events:

" }, "Message":{ "shape":"NonEmptyString", @@ -3481,14 +3808,14 @@ }, "EventTime":{ "shape":"Timestamp", - "documentation":"

Time stamp indicating when this event occurred. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" + "documentation":"

Time stamp indicating when this event occurred. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" }, "PreSignedLogUrl":{ "shape":"NonZeroAndMaxString", - "documentation":"

Location of stored logs with additional detail that is related to the event. This is useful for debugging issues. The URL is valid for 15 minutes. You can also access fleet creation logs through the Amazon GameLift console.

" + "documentation":"

Location of stored logs with additional detail that is related to the event. This is useful for debugging issues. The URL is valid for 15 minutes. You can also access fleet creation logs through the GameLift console.

" } }, - "documentation":"

Log entry describing an event that involves Amazon GameLift resources (such as a fleet). In addition to tracking activity, event codes and messages can provide additional information for troubleshooting and debugging problems.

" + "documentation":"

Log entry describing an event that involves GameLift resources (such as a fleet). In addition to tracking activity, event codes and messages can provide additional information for troubleshooting and debugging problems.

Related actions

DescribeFleetEvents

" }, "EventCode":{ "type":"string", @@ -3532,6 +3859,16 @@ "type":"list", "member":{"shape":"Event"} }, + "FilterConfiguration":{ + "type":"structure", + "members":{ + "AllowedLocations":{ + "shape":"LocationList", + "documentation":"

A list of locations to allow game session placement in, in the form of AWS Region codes such as us-west-2.

" + } + }, + "documentation":"

A list of fleet locations where a game session queue can place new game sessions. You can use a filter to temporarily turn off placements for specific locations. For queues that have multi-location fleets, you can use a filter configuration allow placement with some, but not all of these locations.

Filter configurations are part of a GameSessionQueue.

" + }, "FleetAction":{ "type":"string", "enum":["AUTO_SCALING"] @@ -3551,23 +3888,23 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

A unique identifier for a fleet.

" + "documentation":"

A unique identifier for the fleet.

" }, "FleetArn":{ "shape":"FleetArn", - "documentation":"

The Amazon Resource Name (ARN) that is assigned to a GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. In a GameLift fleet ARN, the resource ID matches the FleetId value.

" + "documentation":"

The Amazon Resource Name (ARN) that is assigned to a GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912. In a GameLift fleet ARN, the resource ID matches the FleetId value.

" }, "FleetType":{ "shape":"FleetType", - "documentation":"

Indicates whether the fleet uses on-demand or spot instances. A spot instance in use may be interrupted with a two-minute notification.

" + "documentation":"

The kind of instances, On-Demand or Spot, that this fleet uses.

" }, "InstanceType":{ "shape":"EC2InstanceType", - "documentation":"

EC2 instance type indicating the computing resources of each instance in the fleet, including CPU, memory, storage, and networking capacity. See Amazon EC2 Instance Types for detailed descriptions.

" + "documentation":"

The EC2 instance type that determines the computing resources of each instance in the fleet. Instance type defines the CPU, memory, storage, and networking capacity. See Amazon EC2 Instance Types for detailed descriptions.

" }, "Description":{ "shape":"NonZeroAndMaxString", - "documentation":"

Human-readable description of the fleet.

" + "documentation":"

A human-readable description of the fleet.

" }, "Name":{ "shape":"NonZeroAndMaxString", @@ -3575,74 +3912,74 @@ }, "CreationTime":{ "shape":"Timestamp", - "documentation":"

Time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" + "documentation":"

A time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" }, "TerminationTime":{ "shape":"Timestamp", - "documentation":"

Time stamp indicating when this data object was terminated. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" + "documentation":"

A time stamp indicating when this data object was terminated. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" }, "Status":{ "shape":"FleetStatus", - "documentation":"

Current status of the fleet.

Possible fleet statuses include the following:

" + "documentation":"

Current status of the fleet. Possible fleet statuses include the following:

" }, "BuildId":{ "shape":"BuildId", - "documentation":"

A unique identifier for a build.

" + "documentation":"

A unique identifier for the build resource that is deployed on instances in this fleet.

" }, "BuildArn":{ "shape":"BuildArn", - "documentation":"

The Amazon Resource Name (ARN) associated with the GameLift build resource that is deployed on instances in this fleet. In a GameLift build ARN, the resource ID matches the BuildId value.

" + "documentation":"

The Amazon Resource Name (ARN) associated with the GameLift build resource that is deployed on instances in this fleet. In a GameLift build ARN, the resource ID matches the BuildId value.

" }, "ScriptId":{ "shape":"ScriptId", - "documentation":"

A unique identifier for a Realtime script.

" + "documentation":"

A unique identifier for the Realtime script resource that is deployed on instances in this fleet.

" }, "ScriptArn":{ "shape":"ScriptArn", - "documentation":"

The Amazon Resource Name (ARN) associated with the GameLift script resource that is deployed on instances in this fleet. In a GameLift script ARN, the resource ID matches the ScriptId value.

" + "documentation":"

The Amazon Resource Name (ARN) associated with the GameLift script resource that is deployed on instances in this fleet. In a GameLift script ARN, the resource ID matches the ScriptId value.

" }, "ServerLaunchPath":{ "shape":"NonZeroAndMaxString", - "documentation":"

Path to a game server executable in the fleet's build, specified for fleets created before 2016-08-04 (or AWS SDK v. 0.12.16). Server launch paths for fleets created after this date are specified in the fleet's RuntimeConfiguration.

" + "documentation":"

This parameter is no longer used. Server launch paths are now defined using the fleet's RuntimeConfiguration parameter. Requests that use this parameter instead continue to be valid.

" }, "ServerLaunchParameters":{ "shape":"NonZeroAndMaxString", - "documentation":"

Game server launch parameters specified for fleets created before 2016-08-04 (or AWS SDK v. 0.12.16). Server launch parameters for fleets created after this date are specified in the fleet's RuntimeConfiguration.

" + "documentation":"

This parameter is no longer used. Server launch parameters are now defined using the fleet's RuntimeConfiguration parameter. Requests that use this parameter instead continue to be valid.

" }, "LogPaths":{ "shape":"StringList", - "documentation":"

Location of default log files. When a server process is shut down, Amazon GameLift captures and stores any log files in this location. These logs are in addition to game session logs; see more on game session logs in the Amazon GameLift Developer Guide. If no default log path for a fleet is specified, Amazon GameLift automatically uploads logs that are stored on each instance at C:\\game\\logs (for Windows) or /local/game/logs (for Linux). Use the Amazon GameLift console to access stored logs.

" + "documentation":"

This parameter is no longer used. Game session log paths are now defined using the GameLift server API ProcessReady() logParameters. See more information in the Server API Reference.

" }, "NewGameSessionProtectionPolicy":{ "shape":"ProtectionPolicy", - "documentation":"

The type of game session protection to set for all new instances started in the fleet.

" + "documentation":"

The type of game session protection to set on all new instances that are started in the fleet.

" }, "OperatingSystem":{ "shape":"OperatingSystem", - "documentation":"

Operating system of the fleet's computing resources. A fleet's operating system depends on the OS specified for the build that is deployed on this fleet.

" + "documentation":"

The operating system of the fleet's computing resources. A fleet's operating system is determined by the OS of the build or script that is deployed on this fleet.

" }, "ResourceCreationLimitPolicy":{ "shape":"ResourceCreationLimitPolicy", - "documentation":"

Fleet policy to limit the number of game sessions an individual player can create over a span of time.

" + "documentation":"

The fleet policy that limits the number of game sessions an individual player can create over a span of time.

" }, "MetricGroups":{ "shape":"MetricGroupList", - "documentation":"

Names of metric groups that this fleet is included in. In Amazon CloudWatch, you can view metrics for an individual fleet or aggregated metrics for fleets that are in a fleet metric group. A fleet can be included in only one metric group at a time.

" + "documentation":"

Name of a metric group that metrics for this fleet are added to. In Amazon CloudWatch, you can view aggregated metrics for fleets that are in a metric group. A fleet can be included in only one metric group at a time.

" }, "StoppedActions":{ "shape":"FleetActionList", - "documentation":"

List of fleet activity that have been suspended using StopFleetActions. This includes auto-scaling.

" + "documentation":"

A list of fleet activity that has been suspended using StopFleetActions. This includes fleet auto-scaling.

" }, "InstanceRoleArn":{ "shape":"NonEmptyString", - "documentation":"

A unique identifier for an AWS IAM role that manages access to your AWS services.

" + "documentation":"

A unique identifier for an AWS IAM role that manages access to your AWS services. With an instance role ARN set, any application that runs on an instance in this fleet can assume the role, including install scripts, server processes, and daemons (background processes). Create a role or look up a role's ARN by using the IAM dashboard in the AWS Management Console. Learn more about using on-box credentials for your game servers at Access external resources from a game server.

" }, "CertificateConfiguration":{ "shape":"CertificateConfiguration", "documentation":"

Indicates whether a TLS/SSL certificate was generated for the fleet.

" } }, - "documentation":"

General properties describing a fleet.

" + "documentation":"

Describes a GameLift fleet of game hosting resources.

Related actions

CreateFleet | DescribeFleetAttributes

" }, "FleetAttributesList":{ "type":"list", @@ -3653,18 +3990,26 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

A unique identifier for a fleet.

" + "documentation":"

A unique identifier for the fleet associated with the location.

" + }, + "FleetArn":{ + "shape":"FleetArn", + "documentation":"

The Amazon Resource Name (ARN) that is assigned to a GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

" }, "InstanceType":{ "shape":"EC2InstanceType", - "documentation":"

Name of an EC2 instance type that is supported in Amazon GameLift. A fleet instance type determines the computing resources of each instance in the fleet, including CPU, memory, storage, and networking capacity. Amazon GameLift supports the following EC2 instance types. See Amazon EC2 Instance Types for detailed descriptions.

" + "documentation":"

The EC2 instance type that is used for all instances in a fleet. The instance type determines the computing resources in use, including CPU, memory, storage, and networking capacity. See Amazon EC2 Instance Types for detailed descriptions.

" }, "InstanceCounts":{ "shape":"EC2InstanceCounts", - "documentation":"

Current status of fleet capacity.

" + "documentation":"

The current instance count and capacity settings for the fleet location.

" + }, + "Location":{ + "shape":"LocationStringModel", + "documentation":"

The fleet location for the instance count information, expressed as an AWS Region code, such as us-west-2.

" } }, - "documentation":"

Information about the fleet's capacity. Fleet capacity is measured in EC2 instances. By default, new fleets have a capacity of one instance, but can be updated as needed. The maximum number of instances for a fleet is determined by the fleet's instance type.

" + "documentation":"

Current resource capacity settings in a specified fleet or location. The location value might refer to a fleet's remote location or its home Region.

Related actions

DescribeFleetCapacity | DescribeFleetLocationCapacity | UpdateFleetCapacity

" }, "FleetCapacityExceededException":{ "type":"structure", @@ -3722,26 +4067,34 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

A unique identifier for a fleet.

" + "documentation":"

A unique identifier for the fleet associated with the location.

" + }, + "FleetArn":{ + "shape":"FleetArn", + "documentation":"

The Amazon Resource Name (ARN) that is assigned to a GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

" }, "ActiveServerProcessCount":{ "shape":"WholeNumber", - "documentation":"

Number of server processes in an ACTIVE status currently running across all instances in the fleet

" + "documentation":"

The number of server processes in ACTIVE status that are currently running across all instances in the fleet location.

" }, "ActiveGameSessionCount":{ "shape":"WholeNumber", - "documentation":"

Number of active game sessions currently being hosted on all instances in the fleet.

" + "documentation":"

The number of active game sessions that are currently being hosted across all instances in the fleet location.

" }, "CurrentPlayerSessionCount":{ "shape":"WholeNumber", - "documentation":"

Number of active player sessions currently being hosted on all instances in the fleet.

" + "documentation":"

The number of active player sessions that are currently being hosted across all instances in the fleet location.

" }, "MaximumPlayerSessionCount":{ "shape":"WholeNumber", - "documentation":"

The maximum number of players allowed across all game sessions currently being hosted on all instances in the fleet.

" + "documentation":"

The maximum number of players allowed across all game sessions that are currently being hosted across all instances in the fleet location.

" + }, + "Location":{ + "shape":"LocationStringModel", + "documentation":"

The fleet location for the fleet utilization information, expressed as an AWS Region code, such as us-west-2.

" } }, - "documentation":"

Current status of fleet utilization, including the number of game and player sessions being hosted.

" + "documentation":"

Current resource utilization statistics in a specified fleet or location. The location value might refer to a fleet's remote location or its home Region.

Related actions

DescribeFleetUtilization | DescribeFleetLocationUtilization

" }, "FleetUtilizationList":{ "type":"list", @@ -3772,7 +4125,7 @@ "documentation":"

The game property value.

" } }, - "documentation":"

Set of key-value pairs that contain information about a game session. When included in a game session request, these properties communicate details to be used when setting up the new game session. For example, a game property might specify a game mode, level, or map. Game properties are passed to the game server process when initiating a new game session. For more information, see the Amazon GameLift Developer Guide.

" + "documentation":"

Set of key-value pairs that contain information about a game session. When included in a game session request, these properties communicate details to be used when setting up the new game session. For example, a game property might specify a game mode, level, or map. Game properties are passed to the game server process when initiating a new game session. For more information, see the GameLift Developer Guide.

" }, "GamePropertyKey":{ "type":"string", @@ -3835,7 +4188,7 @@ "documentation":"

Timestamp that indicates the last time the game server was updated with health status using an UpdateGameServer request. The format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\"). After game server registration, this property is only changed when a game server update specifies a health check value.

" } }, - "documentation":"

This data type is used with the Amazon GameLift FleetIQ and game server groups.

Properties describing a game server that is running on an instance in a GameServerGroup.

A game server is created by a successful call to RegisterGameServer and deleted by calling DeregisterGameServer. A game server is claimed to host a game session by calling ClaimGameServer.

" + "documentation":"

This data type is used with the GameLift FleetIQ and game server groups.

Properties describing a game server that is running on an instance in a GameServerGroup.

A game server is created by a successful call to RegisterGameServer and deleted by calling DeregisterGameServer. A game server is claimed to host a game session by calling ClaimGameServer.

Related actions

RegisterGameServer | ListGameServers | ClaimGameServer | DescribeGameServer | UpdateGameServer | DeregisterGameServer | All APIs by task

" }, "GameServerClaimStatus":{ "type":"string", @@ -3898,14 +4251,14 @@ }, "CreationTime":{ "shape":"Timestamp", - "documentation":"

A timestamp that indicates when this data object was created. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" + "documentation":"

A time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" }, "LastUpdatedTime":{ "shape":"Timestamp", "documentation":"

A timestamp that indicates when this game server group was last updated.

" } }, - "documentation":"

This data type is used with the Amazon GameLift FleetIQ and game server groups.

Properties that describe a game server group resource. A game server group manages certain properties related to a corresponding EC2 Auto Scaling group.

A game server group is created by a successful call to CreateGameServerGroup and deleted by calling DeleteGameServerGroup. Game server group activity can be temporarily suspended and resumed by calling SuspendGameServerGroup and ResumeGameServerGroup, respectively.

" + "documentation":"

This data type is used with the GameLift FleetIQ and game server groups.

Properties that describe a game server group resource. A game server group manages certain properties related to a corresponding EC2 Auto Scaling group.

A game server group is created by a successful call to CreateGameServerGroup and deleted by calling DeleteGameServerGroup. Game server group activity can be temporarily suspended and resumed by calling SuspendGameServerGroup and ResumeGameServerGroup, respectively.

Related actions

CreateGameServerGroup | ListGameServerGroups | DescribeGameServerGroup | UpdateGameServerGroup | DeleteGameServerGroup | ResumeGameServerGroup | SuspendGameServerGroup | DescribeGameServerInstances | All APIs by task

" }, "GameServerGroupAction":{ "type":"string", @@ -3936,7 +4289,7 @@ "documentation":"

Settings for a target-based scaling policy applied to Auto Scaling group. These settings are used to create a target-based policy that tracks the GameLift FleetIQ metric \"PercentUtilizedGameServers\" and specifies a target value for the metric. As player usage changes, the policy triggers to adjust the game server group capacity so that the metric returns to the target value.

" } }, - "documentation":"

This data type is used with the Amazon GameLift FleetIQ and game server groups.

Configuration settings for intelligent automatic scaling that uses target tracking. These settings are used to add an Auto Scaling policy when creating the corresponding Auto Scaling group with CreateGameServerGroup. After the Auto Scaling group is created, all updates to Auto Scaling policies, including changing this policy and adding or removing other policies, is done directly on the Auto Scaling group.

" + "documentation":"

This data type is used with the GameLift FleetIQ and game server groups.

Configuration settings for intelligent automatic scaling that uses target tracking. These settings are used to add an Auto Scaling policy when creating the corresponding Auto Scaling group with CreateGameServerGroup. After the Auto Scaling group is created, all updates to Auto Scaling policies, including changing this policy and adding or removing other policies, is done directly on the Auto Scaling group.

" }, "GameServerGroupDeleteOption":{ "type":"string", @@ -3962,6 +4315,14 @@ "c5.12xlarge", "c5.18xlarge", "c5.24xlarge", + "c5a.large", + "c5a.xlarge", + "c5a.2xlarge", + "c5a.4xlarge", + "c5a.8xlarge", + "c5a.12xlarge", + "c5a.16xlarge", + "c5a.24xlarge", "r4.large", "r4.xlarge", "r4.2xlarge", @@ -3976,6 +4337,14 @@ "r5.12xlarge", "r5.16xlarge", "r5.24xlarge", + "r5a.large", + "r5a.xlarge", + "r5a.2xlarge", + "r5a.4xlarge", + "r5a.8xlarge", + "r5a.12xlarge", + "r5a.16xlarge", + "r5a.24xlarge", "m4.large", "m4.xlarge", "m4.2xlarge", @@ -3988,7 +4357,15 @@ "m5.8xlarge", "m5.12xlarge", "m5.16xlarge", - "m5.24xlarge" + "m5.24xlarge", + "m5a.large", + "m5a.xlarge", + "m5a.2xlarge", + "m5a.4xlarge", + "m5a.8xlarge", + "m5a.12xlarge", + "m5a.16xlarge", + "m5a.24xlarge" ] }, "GameServerGroupName":{ @@ -4049,7 +4426,7 @@ "documentation":"

Current status of the game server instance.

" } }, - "documentation":"

This data type is used with the Amazon GameLift FleetIQ and game server groups.

Additional properties, including status, that describe an EC2 instance in a game server group. Instance configurations are set with game server group properties (see DescribeGameServerGroup and with the EC2 launch template that was used when creating the game server group.

Retrieve game server instances for a game server group by calling DescribeGameServerInstances.

" + "documentation":"

This data type is used with the GameLift FleetIQ and game server groups.

Additional properties, including status, that describe an EC2 instance in a game server group. Instance configurations are set with game server group properties (see DescribeGameServerGroup and with the EC2 launch template that was used when creating the game server group.

Retrieve game server instances for a game server group by calling DescribeGameServerInstances.

Related actions

CreateGameServerGroup | ListGameServerGroups | DescribeGameServerGroup | UpdateGameServerGroup | DeleteGameServerGroup | ResumeGameServerGroup | SuspendGameServerGroup | DescribeGameServerInstances | All APIs by task

" }, "GameServerInstanceId":{ "type":"string", @@ -4106,19 +4483,19 @@ }, "FleetId":{ "shape":"FleetId", - "documentation":"

A unique identifier for a fleet that the game session is running on.

" + "documentation":"

A unique identifier for the fleet that the game session is running on.

" }, "FleetArn":{ "shape":"FleetArn", - "documentation":"

The Amazon Resource Name (ARN) associated with the GameLift fleet that this game session is running on.

" + "documentation":"

The Amazon Resource Name (ARN) associated with the GameLift fleet that this game session is running on.

" }, "CreationTime":{ "shape":"Timestamp", - "documentation":"

Time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" + "documentation":"

A time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" }, "TerminationTime":{ "shape":"Timestamp", - "documentation":"

Time stamp indicating when this data object was terminated. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" + "documentation":"

A time stamp indicating when this data object was terminated. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" }, "CurrentPlayerSessionCount":{ "shape":"WholeNumber", @@ -4138,19 +4515,19 @@ }, "GameProperties":{ "shape":"GamePropertyList", - "documentation":"

Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). You can search for active game sessions based on this custom data with SearchGameSessions.

" + "documentation":"

A set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session. You can search for active game sessions based on this custom data with SearchGameSessions.

" }, "IpAddress":{ "shape":"IpAddress", - "documentation":"

IP address of the instance that is running the game session. When connecting to a Amazon GameLift game server, a client needs to reference an IP address (or DNS name) and port number.

" + "documentation":"

The IP address of the game session. To connect to a GameLift game server, an app needs both the IP address and port number.

" }, "DnsName":{ "shape":"DnsName", - "documentation":"

DNS identifier assigned to the instance that is running the game session. Values have the following format:

When connecting to a game session that is running on a TLS-enabled fleet, you must use the DNS name, not the IP address.

" + "documentation":"

The DNS identifier assigned to the instance that is running the game session. Values have the following format:

When connecting to a game session that is running on a TLS-enabled fleet, you must use the DNS name, not the IP address.

" }, "Port":{ "shape":"PortNumber", - "documentation":"

Port number for the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number.

" + "documentation":"

The port number for the game session. To connect to a GameLift game server, an app needs both the IP address and port number.

" }, "PlayerSessionCreationPolicy":{ "shape":"PlayerSessionCreationPolicy", @@ -4161,15 +4538,19 @@ "documentation":"

A unique identifier for a player. This ID is used to enforce a resource protection policy (if one exists), that limits the number of game sessions a player can create.

" }, "GameSessionData":{ - "shape":"GameSessionData", - "documentation":"

Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

" + "shape":"LargeGameSessionData", + "documentation":"

A set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session.

" }, "MatchmakerData":{ "shape":"MatchmakerData", "documentation":"

Information about the matchmaking process that was used to create the game session. It is in JSON syntax, formatted as a string. In addition the matchmaking configuration used, it contains data on all players assigned to the match, including player attributes and team assignments. For more details on matchmaker data, see Match Data. Matchmaker data is useful when requesting match backfills, and is updated whenever new players are added during a successful backfill (see StartMatchBackfill).

" + }, + "Location":{ + "shape":"LocationStringModel", + "documentation":"

The fleet location where the game session is running. This value might specify the fleet's home Region or a remote location. Location is expressed as an AWS Region code such as us-west-2.

" } }, - "documentation":"

Properties describing a game session.

A game session in ACTIVE status can host players. When a game session ends, its status is set to TERMINATED.

Once the session ends, the game session object is retained for 30 days. This means you can reuse idempotency token values after this time. Game session logs are retained for 14 days.

" + "documentation":"

Properties describing a game session.

A game session in ACTIVE status can host players. When a game session ends, its status is set to TERMINATED.

Once the session ends, the game session object is retained for 30 days. This means you can reuse idempotency token values after this time. Game session logs are retained for 14 days.

Related actions

CreateGameSession | DescribeGameSessions | DescribeGameSessionDetails | SearchGameSessions | UpdateGameSession | GetGameSessionLogUrl | StartGameSessionPlacement | DescribeGameSessionPlacement | StopGameSessionPlacement | All APIs by task

" }, "GameSessionActivationTimeoutSeconds":{ "type":"integer", @@ -4181,19 +4562,19 @@ "members":{ "GameSessionArn":{ "shape":"ArnStringModel", - "documentation":"

Amazon Resource Name (ARN) that is assigned to a game session and uniquely identifies it.

" + "documentation":"

A unique identifier for the game session. Use the game session ID.

" }, "IpAddress":{ "shape":"StringModel", - "documentation":"

IP address of the instance that is running the game session. When connecting to a Amazon GameLift game server, a client needs to reference an IP address (or DNS name) and port number.

" + "documentation":"

The IP address of the game session. To connect to a GameLift game server, an app needs both the IP address and port number.

" }, "DnsName":{ "shape":"DnsName", - "documentation":"

DNS identifier assigned to the instance that is running the game session. Values have the following format:

When connecting to a game session that is running on a TLS-enabled fleet, you must use the DNS name, not the IP address.

" + "documentation":"

The DNS identifier assigned to the instance that is running the game session. Values have the following format:

When connecting to a game session that is running on a TLS-enabled fleet, you must use the DNS name, not the IP address.

" }, "Port":{ "shape":"PositiveInteger", - "documentation":"

Port number for the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number.

" + "documentation":"

The port number for the game session. To connect to a GameLift game server, an app needs both the IP address and port number.

" }, "MatchedPlayerSessions":{ "shape":"MatchedPlayerSessionList", @@ -4254,7 +4635,7 @@ }, "GameProperties":{ "shape":"GamePropertyList", - "documentation":"

Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

" + "documentation":"

A set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

" }, "MaximumPlayerSessionCount":{ "shape":"WholeNumber", @@ -4278,11 +4659,11 @@ }, "PlayerLatencies":{ "shape":"PlayerLatencyList", - "documentation":"

Set of values, expressed in milliseconds, indicating the amount of latency that a player experiences when connected to AWS Regions.

" + "documentation":"

A set of values, expressed in milliseconds, that indicates the amount of latency that a player experiences when connected to AWS Regions.

" }, "StartTime":{ "shape":"Timestamp", - "documentation":"

Time stamp indicating when this request was placed in the queue. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" + "documentation":"

Time stamp indicating when this request was placed in the queue. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" }, "EndTime":{ "shape":"Timestamp", @@ -4290,23 +4671,23 @@ }, "IpAddress":{ "shape":"IpAddress", - "documentation":"

IP address of the instance that is running the game session. When connecting to a Amazon GameLift game server, a client needs to reference an IP address (or DNS name) and port number. This value is set once the new game session is placed (placement status is FULFILLED).

" + "documentation":"

The IP address of the game session. To connect to a GameLift game server, an app needs both the IP address and port number. This value is set once the new game session is placed (placement status is FULFILLED).

" }, "DnsName":{ "shape":"DnsName", - "documentation":"

DNS identifier assigned to the instance that is running the game session. Values have the following format:

When connecting to a game session that is running on a TLS-enabled fleet, you must use the DNS name, not the IP address.

" + "documentation":"

The DNS identifier assigned to the instance that is running the game session. Values have the following format:

When connecting to a game session that is running on a TLS-enabled fleet, you must use the DNS name, not the IP address.

" }, "Port":{ "shape":"PortNumber", - "documentation":"

Port number for the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number. This value is set once the new game session is placed (placement status is FULFILLED).

" + "documentation":"

The port number for the game session. To connect to a GameLift game server, an app needs both the IP address and port number. This value is set once the new game session is placed (placement status is FULFILLED).

" }, "PlacedPlayerSessions":{ "shape":"PlacedPlayerSessionList", "documentation":"

A collection of information on player sessions created in response to the game session placement request. These player sessions are created only once a new game session is successfully placed (placement status is FULFILLED). This information includes the player ID (as provided in the placement request) and the corresponding player session ID. Retrieve full player sessions by calling DescribePlayerSessions with the player session ID.

" }, "GameSessionData":{ - "shape":"GameSessionData", - "documentation":"

Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

" + "shape":"LargeGameSessionData", + "documentation":"

A set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

" }, "MatchmakerData":{ "shape":"MatchmakerData", @@ -4334,7 +4715,7 @@ }, "GameSessionQueueArn":{ "shape":"GameSessionQueueArn", - "documentation":"

Amazon Resource Name (ARN) that is assigned to a GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. In a GameLift game session queue ARN, the resource ID matches the Name value.

" + "documentation":"

The Amazon Resource Name (ARN) that is assigned to a GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::gamesessionqueue/<queue name>. In a GameLift game session queue ARN, the resource ID matches the Name value.

" }, "TimeoutInSeconds":{ "shape":"WholeNumber", @@ -4342,14 +4723,30 @@ }, "PlayerLatencyPolicies":{ "shape":"PlayerLatencyPolicyList", - "documentation":"

A collection of latency policies to apply when processing game sessions placement requests with player latency information. Multiple policies are evaluated in order of the maximum latency value, starting with the lowest latency values. With just one policy, the policy is enforced at the start of the game session placement for the duration period. With multiple policies, each policy is enforced consecutively for its duration period. For example, a queue might enforce a 60-second policy followed by a 120-second policy, and then no policy for the remainder of the placement.

" + "documentation":"

A set of policies that act as a sliding cap on player latency. FleetIQ works to deliver low latency for most players in a game session. These policies ensure that no individual player can be placed into a game with unreasonably high latency. Use multiple policies to gradually relax latency requirements a step at a time. Multiple policies are applied based on their maximum allowed latency, starting with the lowest value.

" }, "Destinations":{ "shape":"GameSessionQueueDestinationList", - "documentation":"

A list of fleets that can be used to fulfill game session placement requests in the queue. Fleets are identified by either a fleet ARN or a fleet alias ARN. Destinations are listed in default preference order.

" + "documentation":"

A list of fleets and/or fleet aliases that can be used to fulfill game session placement requests in the queue. Destinations are identified by either a fleet ARN or a fleet alias ARN, and are listed in order of placement preference.

" + }, + "FilterConfiguration":{ + "shape":"FilterConfiguration", + "documentation":"

A list of locations where a queue is allowed to place new game sessions. Locations are specified in the form of AWS Region codes, such as us-west-2. If this parameter is not set, game sessions can be placed in any queue location.

" + }, + "PriorityConfiguration":{ + "shape":"PriorityConfiguration", + "documentation":"

Custom settings to use when prioritizing destinations and locations for game session placements. This configuration replaces the FleetIQ default prioritization process. Priority types that are not explicitly named will be automatically applied at the end of the prioritization process.

" + }, + "CustomEventData":{ + "shape":"QueueCustomEventData", + "documentation":"

Information that is added to all events that are related to this game session queue.

" + }, + "NotificationTarget":{ + "shape":"QueueSnsArnStringModel", + "documentation":"

An SNS topic ARN that is set up to receive game session placement notifications. See Setting up notifications for game session placement.

" } }, - "documentation":"

Configuration of a queue that is used to process game session placement requests. The queue configuration identifies several game features:

" + "documentation":"

Configuration for a game session placement mechanism that processes requests for new game sessions. A queue can be used on its own or as part of a matchmaking solution.

Related actions

CreateGameSessionQueue | DescribeGameSessionQueues | UpdateGameSessionQueue

" }, "GameSessionQueueArn":{ "type":"string", @@ -4365,7 +4762,7 @@ "documentation":"

The Amazon Resource Name (ARN) that is assigned to fleet or fleet alias. ARNs, which include a fleet ID or alias ID and a Region name, provide a unique identifier across all Regions.

" } }, - "documentation":"

Fleet designated in a game session queue. Requests for new game sessions in the queue are fulfilled by starting a new game session on any destination that is configured for a queue.

" + "documentation":"

A fleet or alias designated in a game session queue. Queues fulfill requests for new game sessions by placing a new game session on any of the queue's destinations.

Destinations are part of a GameSessionQueue.

" }, "GameSessionQueueDestinationList":{ "type":"list", @@ -4435,11 +4832,11 @@ "members":{ "FleetId":{ "shape":"FleetIdOrArn", - "documentation":"

A unique identifier for a fleet that contains the instance you want access to. You can use either the fleet ID or ARN value. The fleet can be in any of the following statuses: ACTIVATING, ACTIVE, or ERROR. Fleets with an ERROR status may be accessible for a short time before they are deleted.

" + "documentation":"

A unique identifier for the fleet that contains the instance you want access to. You can use either the fleet ID or ARN value. The fleet can be in any of the following statuses: ACTIVATING, ACTIVE, or ERROR. Fleets with an ERROR status may be accessible for a short time before they are deleted.

" }, "InstanceId":{ "shape":"InstanceId", - "documentation":"

A unique identifier for an instance you want to get access to. You can access an instance in any status.

" + "documentation":"

A unique identifier for the instance you want to get access to. You can access an instance in any status.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -4479,11 +4876,15 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

A unique identifier for a fleet that the instance is in.

" + "documentation":"

A unique identifier for the fleet that the instance is in.

" + }, + "FleetArn":{ + "shape":"FleetArn", + "documentation":"

The Amazon Resource Name (ARN) that is assigned to a GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

" }, "InstanceId":{ "shape":"InstanceId", - "documentation":"

A unique identifier for an instance.

" + "documentation":"

A unique identifier for the instance.

" }, "IpAddress":{ "shape":"IpAddress", @@ -4491,7 +4892,7 @@ }, "DnsName":{ "shape":"DnsName", - "documentation":"

DNS identifier assigned to the instance that is running the game session. Values have the following format:

When connecting to a game session that is running on a TLS-enabled fleet, you must use the DNS name, not the IP address.

" + "documentation":"

The DNS identifier assigned to the instance that is running the game session. Values have the following format:

When connecting to a game session that is running on a TLS-enabled fleet, you must use the DNS name, not the IP address.

" }, "OperatingSystem":{ "shape":"OperatingSystem", @@ -4503,25 +4904,29 @@ }, "Status":{ "shape":"InstanceStatus", - "documentation":"

Current status of the instance. Possible statuses include the following:

" + "documentation":"

Current status of the instance. Possible statuses include the following:

" }, "CreationTime":{ "shape":"Timestamp", - "documentation":"

Time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" + "documentation":"

A time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" + }, + "Location":{ + "shape":"LocationStringModel", + "documentation":"

The fleet location of the instance, expressed as an AWS Region code, such as us-west-2.

" } }, - "documentation":"

Properties that describe an instance of a virtual computing resource that hosts one or more game servers. A fleet may contain zero or more instances.

" + "documentation":"

Represents an EC2 instance of virtual computing resources that hosts one or more game servers. In GameLift, a fleet can contain zero or more instances.

Related actions

DescribeInstances

" }, "InstanceAccess":{ "type":"structure", "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

A unique identifier for a fleet containing the instance being accessed.

" + "documentation":"

A unique identifier for the fleet containing the instance being accessed.

" }, "InstanceId":{ "shape":"InstanceId", - "documentation":"

A unique identifier for an instance being accessed.

" + "documentation":"

A unique identifier for the instance being accessed.

" }, "IpAddress":{ "shape":"IpAddress", @@ -4566,7 +4971,7 @@ "documentation":"

Instance weighting that indicates how much this instance type contributes to the total capacity of a game server group. Instance weights are used by GameLift FleetIQ to calculate the instance type's cost per unit hour and better identify the most cost-effective options. For detailed information on weighting instance capacity, see Instance Weighting in the Amazon EC2 Auto Scaling User Guide. Default value is \"1\".

" } }, - "documentation":"

This data type is used with the Amazon GameLift FleetIQ and game server groups.

An allowed instance type for a GameServerGroup. All game server groups must have at least two instance types defined for it. GameLift FleetIQ periodically evaluates each defined instance type for viability. It then updates the Auto Scaling group with the list of viable instance types.

" + "documentation":"

This data type is used with the GameLift FleetIQ and game server groups.

An allowed instance type for a GameServerGroup. All game server groups must have at least two instance types defined for it. GameLift FleetIQ periodically evaluates each defined instance type for viability. It then updates the Auto Scaling group with the list of viable instance types.

" }, "InstanceDefinitions":{ "type":"list", @@ -4651,7 +5056,7 @@ "documentation":"

The network communication protocol used by the fleet.

" } }, - "documentation":"

A range of IP addresses and port settings that allow inbound traffic to connect to server processes on an Amazon GameLift hosting resource. New game sessions that are started on the fleet are assigned an IP address/port number combination, which must fall into the fleet's allowed ranges. For fleets created with a custom game server, the ranges reflect the server's game session assignments. For Realtime Servers fleets, Amazon GameLift automatically opens two port ranges, one for TCP messaging and one for UDP for use by the Realtime servers.

" + "documentation":"

A range of IP addresses and port settings that allow inbound traffic to connect to server processes on an instance in a fleet. New game sessions are assigned an IP address/port number combination, which must fall into the fleet's allowed ranges. Fleets with custom game builds must have permissions explicitly set. For Realtime Servers fleets, GameLift automatically opens two port ranges, one for TCP messaging and one for UDP.

Related actions

DescribeFleetPortSettings

" }, "IpPermissionsList":{ "type":"list", @@ -4665,6 +5070,11 @@ "UDP" ] }, + "LargeGameSessionData":{ + "type":"string", + "max":262144, + "min":1 + }, "LatencyMap":{ "type":"map", "key":{"shape":"NonEmptyString"}, @@ -4698,7 +5108,7 @@ "documentation":"

The version of the EC2 launch template to use. If no version is specified, the default version will be used. With Amazon EC2, you can specify a default version for a launch template. If none is set, the default is the first version created.

" } }, - "documentation":"

This data type is used with the Amazon GameLift FleetIQ and game server groups.

An EC2 launch template that contains configuration settings and game server code to be deployed to all instances in a game server group. The launch template is specified when creating a new game server group with CreateGameServerGroup.

" + "documentation":"

This data type is used with the GameLift FleetIQ and game server groups.

An EC2 launch template that contains configuration settings and game server code to be deployed to all instances in a game server group. The launch template is specified when creating a new game server group with CreateGameServerGroup.

" }, "LaunchTemplateVersion":{ "type":"string", @@ -4763,7 +5173,7 @@ }, "NextToken":{ "shape":"NonEmptyString", - "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value.

" + "documentation":"

A token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -4777,7 +5187,7 @@ }, "NextToken":{ "shape":"NonEmptyString", - "documentation":"

Token that indicates where to resume retrieving results on the next call to this operation. If no token is returned, these results represent the end of the list.

" + "documentation":"

A token that indicates where to resume retrieving results on the next call to this operation. If no token is returned, these results represent the end of the list.

" } }, "documentation":"

Represents the returned data in response to a request operation.

" @@ -4787,11 +5197,11 @@ "members":{ "BuildId":{ "shape":"BuildIdOrArn", - "documentation":"

A unique identifier for a build to return fleets for. Use this parameter to return only fleets using a specified build. Use either the build ID or ARN value. To retrieve all fleets, do not include either a BuildId and ScriptID parameter.

" + "documentation":"

A unique identifier for the build to request fleets for. Use this parameter to return only fleets using a specified build. Use either the build ID or ARN value.

" }, "ScriptId":{ "shape":"ScriptIdOrArn", - "documentation":"

A unique identifier for a Realtime script to return fleets for. Use this parameter to return only fleets using a specified script. Use either the script ID or ARN value. To retrieve all fleets, leave this parameter empty.

" + "documentation":"

A unique identifier for the Realtime script to request fleets for. Use this parameter to return only fleets using a specified script. Use either the script ID or ARN value.

" }, "Limit":{ "shape":"PositiveInteger", @@ -4799,7 +5209,7 @@ }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value.

" + "documentation":"

A token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -4809,11 +5219,11 @@ "members":{ "FleetIds":{ "shape":"FleetIdList", - "documentation":"

Set of fleet IDs matching the list request. You can retrieve additional information about all returned fleets by passing this result set to a call to DescribeFleetAttributes, DescribeFleetCapacity, or DescribeFleetUtilization.

" + "documentation":"

A set of fleet IDs that match the list request. You can retrieve additional information about all returned fleets by passing this result set to a DescribeFleetAttributes, DescribeFleetCapacity, or DescribeFleetUtilization call.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates where to resume retrieving results on the next call to this operation. If no token is returned, these results represent the end of the list.

" + "documentation":"

A token that indicates where to resume retrieving results on the next call to this operation. If no token is returned, these results represent the end of the list.

" } }, "documentation":"

Represents the returned data in response to a request operation.

" @@ -4823,11 +5233,11 @@ "members":{ "Limit":{ "shape":"PositiveInteger", - "documentation":"

The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential segments.

" + "documentation":"

The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

A token that indicates the start of the next sequential segment of results. Use the token returned with the previous call to this operation. To start at the beginning of the result set, do not specify a value.

" + "documentation":"

A token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value.

" } } }, @@ -4854,15 +5264,15 @@ }, "SortOrder":{ "shape":"SortOrder", - "documentation":"

Indicates how to sort the returned data based on game server registration timestamp. Use ASCENDING to retrieve oldest game servers first, or use DESCENDING to retrieve newest game servers first. If this parameter is left empty, game servers are returned in no particular order.

" + "documentation":"

Indicates how to sort the returned data based on game server registration timestamp. Use ASCENDING to retrieve oldest game servers first, or use DESCENDING to retrieve newest game servers first. If this parameter is left empty, game servers are returned in no particular order.

" }, "Limit":{ "shape":"PositiveInteger", - "documentation":"

The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential segments.

" + "documentation":"

The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

A token that indicates the start of the next sequential segment of results. Use the token returned with the previous call to this operation. To start at the beginning of the result set, do not specify a value.

" + "documentation":"

A token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value.

" } } }, @@ -4924,6 +5334,78 @@ } } }, + "LocationAttributes":{ + "type":"structure", + "members":{ + "LocationState":{ + "shape":"LocationState", + "documentation":"

A fleet location and its current life-cycle state.

" + }, + "StoppedActions":{ + "shape":"FleetActionList", + "documentation":"

A list of fleet actions that have been suspended in the fleet location.

" + }, + "UpdateStatus":{ + "shape":"LocationUpdateStatus", + "documentation":"

The status of fleet activity updates to the location. The status PENDING_UPDATE indicates that StopFleetActions or StartFleetActions has been requested but the update has not yet been completed for the location.

" + } + }, + "documentation":"

Represents a location in a multi-location fleet.

Related actions

DescribeFleetLocationAttributes

" + }, + "LocationAttributesList":{ + "type":"list", + "member":{"shape":"LocationAttributes"} + }, + "LocationConfiguration":{ + "type":"structure", + "members":{ + "Location":{ + "shape":"LocationStringModel", + "documentation":"

An AWS Region code, such as us-west-2.

" + } + }, + "documentation":"

A remote location where a multi-location fleet can deploy EC2 instances for game hosting.

Related actions

CreateFleet

" + }, + "LocationConfigurationList":{ + "type":"list", + "member":{"shape":"LocationConfiguration"}, + "max":100, + "min":1 + }, + "LocationList":{ + "type":"list", + "member":{"shape":"LocationStringModel"}, + "max":100, + "min":1 + }, + "LocationState":{ + "type":"structure", + "members":{ + "Location":{ + "shape":"LocationStringModel", + "documentation":"

The fleet location, expressed as an AWS Region code such as us-west-2.

" + }, + "Status":{ + "shape":"FleetStatus", + "documentation":"

The life-cycle status of a fleet location.

" + } + }, + "documentation":"

A fleet location and its life-cycle state. A location state object might be used to describe a fleet's remote location or home Region. Life-cycle state tracks the progress of launching the first instance in a new location and preparing it for game hosting, and then removing all instances and deleting the location from the fleet.

Related actions

CreateFleet | CreateFleetLocations | DeleteFleetLocations

" + }, + "LocationStateList":{ + "type":"list", + "member":{"shape":"LocationState"} + }, + "LocationStringModel":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-z]+(-([a-z]+|\\d))*" + }, + "LocationUpdateStatus":{ + "type":"string", + "enum":["PENDING_UPDATE"] + }, "MatchedPlayerSession":{ "type":"structure", "members":{ @@ -4957,11 +5439,11 @@ "members":{ "Name":{ "shape":"MatchmakingIdStringModel", - "documentation":"

A unique identifier for a matchmaking configuration. This name is used to identify the configuration associated with a matchmaking request or ticket.

" + "documentation":"

A unique identifier for the matchmaking configuration. This name is used to identify the configuration associated with a matchmaking request or ticket.

" }, "ConfigurationArn":{ "shape":"MatchmakingConfigurationArn", - "documentation":"

Amazon Resource Name (ARN) that is assigned to a GameLift matchmaking configuration resource and uniquely identifies it. ARNs are unique across all Regions. In a GameLift configuration ARN, the resource ID matches the Name value.

" + "documentation":"

The Amazon Resource Name (ARN) that is assigned to a GameLift matchmaking configuration resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::matchmakingconfiguration/<matchmaking configuration name>. In a GameLift configuration ARN, the resource ID matches the Name value.

" }, "Description":{ "shape":"NonZeroAndMaxString", @@ -4969,7 +5451,7 @@ }, "GameSessionQueueArns":{ "shape":"QueueArnsList", - "documentation":"

Amazon Resource Name (ARN) that is assigned to a GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. Queues can be located in any Region. Queues are used to start new GameLift-hosted game sessions for matches that are created with this matchmaking configuration. Thais property is not set when FlexMatchMode is set to STANDALONE.

" + "documentation":"

The Amazon Resource Name (ARN) that is assigned to a GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::gamesessionqueue/<queue name>. Queues can be located in any Region. Queues are used to start new GameLift-hosted game sessions for matches that are created with this matchmaking configuration. This property is not set when FlexMatchMode is set to STANDALONE.

" }, "RequestTimeoutSeconds":{ "shape":"MatchmakingRequestTimeoutInteger", @@ -4977,7 +5459,7 @@ }, "AcceptanceTimeoutSeconds":{ "shape":"MatchmakingAcceptanceTimeoutInteger", - "documentation":"

The length of time (in seconds) to wait for players to accept a proposed match, if acceptance is required. If any player rejects the match or fails to accept before the timeout, the tickets are returned to the ticket pool and continue to be evaluated for an acceptable match.

" + "documentation":"

The length of time (in seconds) to wait for players to accept a proposed match, if acceptance is required. If any player rejects the match or fails to accept before the timeout, the ticket continues to look for an acceptable match.

" }, "AcceptanceRequired":{ "shape":"BooleanModel", @@ -4985,11 +5467,11 @@ }, "RuleSetName":{ "shape":"MatchmakingIdStringModel", - "documentation":"

A unique identifier for a matchmaking rule set to use with this configuration. A matchmaking configuration can only use rule sets that are defined in the same Region.

" + "documentation":"

A unique identifier for the matchmaking rule set to use with this configuration. A matchmaking configuration can only use rule sets that are defined in the same Region.

" }, "RuleSetArn":{ "shape":"MatchmakingRuleSetArn", - "documentation":"

The Amazon Resource Name (ARN) associated with the GameLift matchmaking rule set resource that this configuration uses.

" + "documentation":"

The Amazon Resource Name (ARN) associated with the GameLift matchmaking rule set resource that this configuration uses.

" }, "NotificationTarget":{ "shape":"SnsArnStringModel", @@ -4997,7 +5479,7 @@ }, "AdditionalPlayerCount":{ "shape":"WholeNumber", - "documentation":"

The number of player slots in a match to keep open for future players. For example, assume that the configuration's rule set specifies a match for a single 12-person team. If the additional player count is set to 2, only 10 players are initially selected for the match. This parameter is not used when FlexMatchMode is set to STANDALONE.

" + "documentation":"

The number of player slots in a match to keep open for future players. For example, if the configuration's rule set specifies a match for a single 12-person team, and the additional player count is set to 2, only 10 players are selected for the match. This parameter is not used when FlexMatchMode is set to STANDALONE.

" }, "CustomEventData":{ "shape":"CustomEventData", @@ -5005,11 +5487,11 @@ }, "CreationTime":{ "shape":"Timestamp", - "documentation":"

The time stamp indicating when this data object was created. The format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" + "documentation":"

A time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" }, "GameProperties":{ "shape":"GamePropertyList", - "documentation":"

A set of custom properties for a game session, formatted as key-value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match. This parameter is not used when FlexMatchMode is set to STANDALONE.

" + "documentation":"

A set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match. This parameter is not used when FlexMatchMode is set to STANDALONE.

" }, "GameSessionData":{ "shape":"GameSessionData", @@ -5017,7 +5499,7 @@ }, "BackfillMode":{ "shape":"BackfillMode", - "documentation":"

The method used to backfill game sessions created with this matchmaking configuration. MANUAL indicates that the game makes backfill requests or does not use the match backfill feature. AUTOMATIC indicates that GameLift creates StartMatchBackfill requests whenever a game session has one or more open slots. Learn more about manual and automatic backfill in Backfill Existing Games with FlexMatch. Automatic backfill is not available when FlexMatchMode is set to STANDALONE.

" + "documentation":"

The method used to backfill game sessions created with this matchmaking configuration. MANUAL indicates that the game makes backfill requests or does not use the match backfill feature. AUTOMATIC indicates that GameLift creates StartMatchBackfill requests whenever a game session has one or more open slots. Learn more about manual and automatic backfill in Backfill existing games with FlexMatch. Automatic backfill is not available when FlexMatchMode is set to STANDALONE.

" }, "FlexMatchMode":{ "shape":"FlexMatchMode", @@ -5078,11 +5560,11 @@ "members":{ "RuleSetName":{ "shape":"MatchmakingIdStringModel", - "documentation":"

A unique identifier for a matchmaking rule set

" + "documentation":"

A unique identifier for the matchmaking rule set

" }, "RuleSetArn":{ "shape":"MatchmakingRuleSetArn", - "documentation":"

Amazon Resource Name (ARN) that is assigned to a GameLift matchmaking rule set resource and uniquely identifies it. ARNs are unique across all Regions. In a GameLift rule set ARN, the resource ID matches the RuleSetName value.

" + "documentation":"

The Amazon Resource Name (ARN) that is assigned to a GameLift matchmaking rule set resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::matchmakingruleset/<ruleset name>. In a GameLift rule set ARN, the resource ID matches the RuleSetName value.

" }, "RuleSetBody":{ "shape":"RuleSetBody", @@ -5090,10 +5572,10 @@ }, "CreationTime":{ "shape":"Timestamp", - "documentation":"

The time stamp indicating when this data object was created. The format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" + "documentation":"

A time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" } }, - "documentation":"

Set of rule statements, used with FlexMatch, that determine how to build your player matches. Each rule set describes a type of group to be created and defines the parameters for acceptable player matches. Rule sets are used in MatchmakingConfiguration objects.

A rule set may define the following elements for a match. For detailed information and examples showing how to construct a rule set, see Build a FlexMatch Rule Set.

" + "documentation":"

Set of rule statements, used with FlexMatch, that determine how to build your player matches. Each rule set describes a type of group to be created and defines the parameters for acceptable player matches. Rule sets are used in MatchmakingConfiguration objects.

A rule set may define the following elements for a match. For detailed information and examples showing how to construct a rule set, see Build a FlexMatch rule set.

" }, "MatchmakingRuleSetArn":{ "type":"string", @@ -5129,7 +5611,7 @@ }, "ConfigurationArn":{ "shape":"MatchmakingConfigurationArn", - "documentation":"

The Amazon Resource Name (ARN) associated with the GameLift matchmaking configuration resource that is used with this ticket.

" + "documentation":"

The Amazon Resource Name (ARN) associated with the GameLift matchmaking configuration resource that is used with this ticket.

" }, "Status":{ "shape":"MatchmakingConfigurationStatus", @@ -5145,11 +5627,11 @@ }, "StartTime":{ "shape":"Timestamp", - "documentation":"

Time stamp indicating when this matchmaking request was received. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" + "documentation":"

Time stamp indicating when this matchmaking request was received. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" }, "EndTime":{ "shape":"Timestamp", - "documentation":"

Time stamp indicating when this matchmaking request stopped being processed due to success, failure, or cancellation. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" + "documentation":"

Time stamp indicating when this matchmaking request stopped being processed due to success, failure, or cancellation. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" }, "Players":{ "shape":"PlayerList", @@ -5260,7 +5742,7 @@ "documentation":"

A unique identifier for a player session.

" } }, - "documentation":"

Information about a player session that was created as part of a StartGameSessionPlacement request. This object contains only the player ID and player session ID. To retrieve full details on a player session, call DescribePlayerSessions with the player session ID.

" + "documentation":"

Information about a player session that was created as part of a StartGameSessionPlacement request. This object contains only the player ID and player session ID. To retrieve full details on a player session, call DescribePlayerSessions with the player session ID.

Related actions

CreatePlayerSession | CreatePlayerSessions | DescribePlayerSessions | StartGameSessionPlacement | DescribeGameSessionPlacement | StopGameSessionPlacement | All APIs by task

" }, "PlacedPlayerSessionList":{ "type":"list", @@ -5283,7 +5765,7 @@ }, "LatencyInMs":{ "shape":"LatencyMap", - "documentation":"

Set of values, expressed in milliseconds, indicating the amount of latency that a player experiences when connected to AWS Regions. If this property is present, FlexMatch considers placing the match only in Regions for which latency is reported.

If a matchmaker has a rule that evaluates player latency, players must report latency in order to be matched. If no latency is reported in this scenario, FlexMatch assumes that no Regions are available to the player and the ticket is not matchable.

" + "documentation":"

A set of values, expressed in milliseconds, that indicates the amount of latency that a player experiences when connected to AWS Regions. If this property is present, FlexMatch considers placing the match only in Regions for which latency is reported.

If a matchmaker has a rule that evaluates player latency, players must report latency in order to be matched. If no latency is reported in this scenario, FlexMatch assumes that no Regions are available to the player and the ticket is not matchable.

" } }, "documentation":"

Represents a player in matchmaking. When starting a matchmaking request, a player has a player ID, attributes, and may have latency data. Team information is added after a match has been successfully completed.

" @@ -5343,7 +5825,7 @@ "documentation":"

The length of time, in seconds, that the policy is enforced while placing a new game session. A null value for this property means that the policy is enforced until the queue times out.

" } }, - "documentation":"

Queue setting that determines the highest latency allowed for individual players when placing a game session. When a latency policy is in force, a game session cannot be placed with any fleet in a Region where a player reports latency higher than the cap. Latency policies are only enforced when the placement request contains player latency information.

" + "documentation":"

Sets a latency cap for individual players when placing a game session. With a latency policy in force, a game session cannot be placed in a fleet location where a player reports latency higher than the cap. Latency policies are used only with placement request that provide player latency information. Player latency policies can be stacked to gradually relax latency requirements over time.

Latency policies are part of a GameSessionQueue.

" }, "PlayerLatencyPolicyList":{ "type":"list", @@ -5370,19 +5852,19 @@ }, "FleetId":{ "shape":"FleetId", - "documentation":"

A unique identifier for a fleet that the player's game session is running on.

" + "documentation":"

A unique identifier for the fleet that the player's game session is running on.

" }, "FleetArn":{ "shape":"FleetArn", - "documentation":"

The Amazon Resource Name (ARN) associated with the GameLift fleet that the player's game session is running on.

" + "documentation":"

The Amazon Resource Name (ARN) associated with the GameLift fleet that the player's game session is running on.

" }, "CreationTime":{ "shape":"Timestamp", - "documentation":"

Time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" + "documentation":"

A time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" }, "TerminationTime":{ "shape":"Timestamp", - "documentation":"

Time stamp indicating when this data object was terminated. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" + "documentation":"

A time stamp indicating when this data object was terminated. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" }, "Status":{ "shape":"PlayerSessionStatus", @@ -5390,11 +5872,11 @@ }, "IpAddress":{ "shape":"IpAddress", - "documentation":"

IP address of the instance that is running the game session. When connecting to a Amazon GameLift game server, a client needs to reference an IP address (or DNS name) and port number.

" + "documentation":"

The IP address of the game session. To connect to a GameLift game server, an app needs both the IP address and port number.

" }, "DnsName":{ "shape":"DnsName", - "documentation":"

DNS identifier assigned to the instance that is running the game session. Values have the following format:

When connecting to a game session that is running on a TLS-enabled fleet, you must use the DNS name, not the IP address.

" + "documentation":"

The DNS identifier assigned to the instance that is running the game session. Values have the following format:

When connecting to a game session that is running on a TLS-enabled fleet, you must use the DNS name, not the IP address.

" }, "Port":{ "shape":"PortNumber", @@ -5402,10 +5884,10 @@ }, "PlayerData":{ "shape":"PlayerData", - "documentation":"

Developer-defined information related to a player. Amazon GameLift does not use this data, so it can be formatted as needed for use in the game.

" + "documentation":"

Developer-defined information related to a player. GameLift does not use this data, so it can be formatted as needed for use in the game.

" } }, - "documentation":"

Properties describing a player session. Player session objects are created either by creating a player session for a specific game session, or as part of a game session placement. A player session represents either a player reservation for a game session (status RESERVED) or actual player activity in a game session (status ACTIVE). A player session object (including player data) is automatically passed to a game session when the player connects to the game session and is validated.

When a player disconnects, the player session status changes to COMPLETED. Once the session ends, the player session object is retained for 30 days and then removed.

" + "documentation":"

Represents a player session. Player sessions are created either for a specific game session, or as part of a game session placement or matchmaking request. A player session can represents a reserved player slot in a game session (when status is RESERVED) or actual player activity in a game session (when status is ACTIVE). A player session object, including player data, is automatically passed to a game session when the player connects to the game session and is validated. After the game session ends, player sessions information is retained for 30 days and then removed.

Related actions

CreatePlayerSession | CreatePlayerSessions | DescribePlayerSessions | StartGameSessionPlacement | DescribeGameSessionPlacement | All APIs by task

" }, "PlayerSessionCreationPolicy":{ "type":"string", @@ -5451,6 +5933,35 @@ "type":"long", "min":1 }, + "PriorityConfiguration":{ + "type":"structure", + "members":{ + "PriorityOrder":{ + "shape":"PriorityTypeList", + "documentation":"

The recommended sequence to use when prioritizing where to place new game sessions. Each type can only be listed once.

" + }, + "LocationOrder":{ + "shape":"LocationList", + "documentation":"

The prioritization order to use for fleet locations, when the PriorityOrder property includes LOCATION. Locations are identified by AWS Region codes such as us-west-2. Each location can only be listed once.

" + } + }, + "documentation":"

Custom prioritization settings for use by a game session queue when placing new game sessions with available game servers. When defined, this configuration replaces the default FleetIQ prioritization process, which is as follows:

Changing the priority order will affect how game sessions are placed.

Priority configurations are part of a GameSessionQueue.

" + }, + "PriorityType":{ + "type":"string", + "enum":[ + "LATENCY", + "COST", + "DESTINATION", + "LOCATION" + ] + }, + "PriorityTypeList":{ + "type":"list", + "member":{"shape":"PriorityType"}, + "max":4, + "min":1 + }, "ProtectionPolicy":{ "type":"string", "enum":[ @@ -5468,11 +5979,11 @@ "members":{ "Name":{ "shape":"NonZeroAndMaxString", - "documentation":"

A descriptive label that is associated with a scaling policy. Policy names do not need to be unique. A fleet can have only one scaling policy with the same name.

" + "documentation":"

A descriptive label that is associated with a fleet's scaling policy. Policy names do not need to be unique. A fleet can have only one scaling policy with the same name.

" }, "FleetId":{ "shape":"FleetIdOrArn", - "documentation":"

A unique identifier for a fleet to apply this policy to. You can use either the fleet ID or ARN value. The fleet cannot be in any of the following statuses: ERROR or DELETING.

" + "documentation":"

A unique identifier for the fleet to apply this policy to. You can use either the fleet ID or ARN value. The fleet cannot be in any of the following statuses: ERROR or DELETING.

" }, "ScalingAdjustment":{ "shape":"Integer", @@ -5504,7 +6015,7 @@ }, "TargetConfiguration":{ "shape":"TargetConfiguration", - "documentation":"

The settings for a target-based scaling policy.

" + "documentation":"

An object that contains settings for a target-based scaling policy.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -5514,7 +6025,7 @@ "members":{ "Name":{ "shape":"NonZeroAndMaxString", - "documentation":"

A descriptive label that is associated with a scaling policy. Policy names do not need to be unique.

" + "documentation":"

A descriptive label that is associated with a fleet's scaling policy. Policy names do not need to be unique.

" } }, "documentation":"

Represents the returned data in response to a request operation.

" @@ -5523,6 +6034,18 @@ "type":"list", "member":{"shape":"ArnStringModel"} }, + "QueueCustomEventData":{ + "type":"string", + "max":256, + "min":0, + "pattern":"[\\s\\S]*" + }, + "QueueSnsArnStringModel":{ + "type":"string", + "max":300, + "min":0, + "pattern":"[a-zA-Z0-9:_-]*(\\.fifo)?" + }, "RegisterGameServerInput":{ "type":"structure", "required":[ @@ -5568,7 +6091,7 @@ "members":{ "BuildId":{ "shape":"BuildIdOrArn", - "documentation":"

A unique identifier for a build to get credentials for. You can use either the build ID or ARN value.

" + "documentation":"

A unique identifier for the build to get credentials for. You can use either the build ID or ARN value.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -5607,7 +6130,7 @@ }, "FleetArn":{ "shape":"FleetArn", - "documentation":"

The Amazon Resource Name (ARN) associated with the GameLift fleet resource that this alias points to.

" + "documentation":"

The Amazon Resource Name (ARN) associated with the GameLift fleet resource that this alias points to.

" } }, "documentation":"

Represents the returned data in response to a request operation.

" @@ -5624,7 +6147,7 @@ "documentation":"

The time span used in evaluating the resource creation limit policy.

" } }, - "documentation":"

A policy that limits the number of game sessions a player can create on the same fleet. This optional policy gives game owners control over how players can consume available game server resources. A resource creation policy makes the following statement: \"An individual player can create a maximum number of new game sessions within a specified time period\".

The policy is evaluated when a player tries to create a new game session. For example: Assume you have a policy of 10 new game sessions and a time period of 60 minutes. On receiving a CreateGameSession request, Amazon GameLift checks that the player (identified by CreatorId) has created fewer than 10 game sessions in the past 60 minutes.

" + "documentation":"

A policy that puts limits on the number of game sessions that a player can create within a specified span of time. With this policy, you can control players' ability to consume available resources.

The policy is evaluated when a player tries to create a new game session. On receiving a CreateGameSession request, GameLift checks that the player (identified by CreatorId) has created fewer than game session limit in the specified time period.

The resource creation limit policy is included in FleetAttributes.

" }, "ResumeGameServerGroupInput":{ "type":"structure", @@ -5661,14 +6184,14 @@ }, "FleetId":{ "shape":"FleetId", - "documentation":"

The unique identifier for a fleet that the alias points to. This value is the fleet ID, not the fleet ARN.

" + "documentation":"

A unique identifier for the fleet that the alias points to. This value is the fleet ID, not the fleet ARN.

" }, "Message":{ "shape":"FreeText", "documentation":"

The message text to be used with a terminal routing strategy.

" } }, - "documentation":"

The routing configuration for a fleet alias.

" + "documentation":"

The routing configuration for a fleet alias.

Related actions

CreateAlias | ListAliases | DescribeAlias | UpdateAlias | DeleteAlias | ResolveAlias | All APIs by task

" }, "RoutingStrategyType":{ "type":"string", @@ -5692,25 +6215,25 @@ "members":{ "ServerProcesses":{ "shape":"ServerProcessList", - "documentation":"

A collection of server process configurations that describe which server processes to run on each instance in a fleet.

" + "documentation":"

A collection of server process configurations that identify what server processes to run on each instance in a fleet.

" }, "MaxConcurrentGameSessionActivations":{ "shape":"MaxConcurrentGameSessionActivations", - "documentation":"

The maximum number of game sessions with status ACTIVATING to allow on an instance simultaneously. This setting limits the amount of instance resources that can be used for new game activations at any one time.

" + "documentation":"

The number of game sessions in status ACTIVATING to allow on an instance. This setting limits the instance resources that can be used for new game activations at any one time.

" }, "GameSessionActivationTimeoutSeconds":{ "shape":"GameSessionActivationTimeoutSeconds", - "documentation":"

The maximum amount of time (in seconds) that a game session can remain in status ACTIVATING. If the game session is not active before the timeout, activation is terminated and the game session status is changed to TERMINATED.

" + "documentation":"

The maximum amount of time (in seconds) allowed to launch a new game session and have it report ready to host players. During this time, the game session is in status ACTIVATING. If the game session does not become active before the timeout, it is ended and the game session status is changed to TERMINATED.

" } }, - "documentation":"

A collection of server process configurations that describe what processes to run on each instance in a fleet. Server processes run either a custom game build executable or a Realtime Servers script. Each instance in the fleet starts the specified server processes and continues to start new processes as existing processes end. Each instance regularly checks for an updated runtime configuration.

The runtime configuration enables the instances in a fleet to run multiple processes simultaneously. Learn more about Running Multiple Processes on a Fleet .

A Amazon GameLift instance is limited to 50 processes running simultaneously. To calculate the total number of processes in a runtime configuration, add the values of the ConcurrentExecutions parameter for each ServerProcess object.

" + "documentation":"

A collection of server process configurations that describe the set of processes to run on each instance in a fleet. Server processes run either an executable in a custom game build or a Realtime Servers script. GameLift launches the configured processes, manages their life cycle, and replaces them as needed. Each instance checks regularly for an updated runtime configuration.

A GameLift instance is limited to 50 processes running concurrently. To calculate the total number of processes in a runtime configuration, add the values of the ConcurrentExecutions parameter for each ServerProcess. Learn more about Running Multiple Processes on a Fleet.

Related actions

DescribeRuntimeConfiguration | UpdateRuntimeConfiguration

" }, "S3Location":{ "type":"structure", "members":{ "Bucket":{ "shape":"NonEmptyString", - "documentation":"

An S3 bucket identifier. This is the name of the S3 bucket.

GameLift currently does not support uploading from S3 buckets with names that contain a dot (.).

" + "documentation":"

An Amazon S3 bucket identifier. This is the name of the S3 bucket.

GameLift currently does not support uploading from Amazon S3 buckets with names that contain a dot (.).

" }, "Key":{ "shape":"NonEmptyString", @@ -5725,7 +6248,7 @@ "documentation":"

The version of the file, if object versioning is turned on for the bucket. Amazon GameLift uses this information when retrieving files from an S3 bucket that you own. Use this parameter to specify a specific version of the file. If not set, the latest version of the file is retrieved.

" } }, - "documentation":"

The location in S3 where build or script files are stored for access by Amazon GameLift. This location is specified in CreateBuild, CreateScript, and UpdateScript requests.

" + "documentation":"

The location in Amazon S3 where build or script files are stored for access by Amazon GameLift. This location is specified in CreateBuild, CreateScript, and UpdateScript requests.

" }, "ScalingAdjustmentType":{ "type":"string", @@ -5740,11 +6263,15 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

A unique identifier for a fleet that is associated with this scaling policy.

" + "documentation":"

A unique identifier for the fleet that is associated with this scaling policy.

" + }, + "FleetArn":{ + "shape":"FleetArn", + "documentation":"

The Amazon Resource Name (ARN) that is assigned to a GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

" }, "Name":{ "shape":"NonZeroAndMaxString", - "documentation":"

A descriptive label that is associated with a scaling policy. Policy names do not need to be unique.

" + "documentation":"

A descriptive label that is associated with a fleet's scaling policy. Policy names do not need to be unique.

" }, "Status":{ "shape":"ScalingStatusType", @@ -5780,10 +6307,18 @@ }, "TargetConfiguration":{ "shape":"TargetConfiguration", - "documentation":"

The settings for a target-based scaling policy.

" + "documentation":"

An object that contains settings for a target-based scaling policy.

" + }, + "UpdateStatus":{ + "shape":"LocationUpdateStatus", + "documentation":"

The current status of the fleet's scaling policies in a requested fleet location. The status PENDING_UPDATE indicates that an update was requested for the fleet but has not yet been completed for the location.

" + }, + "Location":{ + "shape":"LocationStringModel", + "documentation":"

" } }, - "documentation":"

Rule that controls how a fleet is scaled. Scaling policies are uniquely identified by the combination of name and fleet ID.

" + "documentation":"

Rule that controls how a fleet is scaled. Scaling policies are uniquely identified by the combination of name and fleet ID.

Related actions

DescribeFleetCapacity | UpdateFleetCapacity | DescribeEC2InstanceLimits | PutScalingPolicy | DescribeScalingPolicies | DeleteScalingPolicy | StopFleetActions | StartFleetActions | All APIs by task

" }, "ScalingPolicyList":{ "type":"list", @@ -5806,11 +6341,11 @@ "members":{ "ScriptId":{ "shape":"ScriptId", - "documentation":"

A unique identifier for a Realtime script

" + "documentation":"

A unique identifier for the Realtime script

" }, "ScriptArn":{ "shape":"ScriptArn", - "documentation":"

Amazon Resource Name (ARN) that is assigned to a GameLift script resource and uniquely identifies it. ARNs are unique across all Regions. In a GameLift script ARN, the resource ID matches the ScriptId value.

" + "documentation":"

The Amazon Resource Name (ARN) that is assigned to a GameLift script resource and uniquely identifies it. ARNs are unique across all Regions. In a GameLift script ARN, the resource ID matches the ScriptId value.

" }, "Name":{ "shape":"NonZeroAndMaxString", @@ -5818,7 +6353,7 @@ }, "Version":{ "shape":"NonZeroAndMaxString", - "documentation":"

The version that is associated with a build or script. Version strings do not need to be unique.

" + "documentation":"

Version information that is associated with a build or script. Version strings do not need to be unique.

" }, "SizeOnDisk":{ "shape":"PositiveLong", @@ -5826,11 +6361,11 @@ }, "CreationTime":{ "shape":"Timestamp", - "documentation":"

A time stamp indicating when this data object was created. The format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" + "documentation":"

A time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" }, "StorageLocation":{"shape":"S3Location"} }, - "documentation":"

Properties describing a Realtime script.

Related operations

" + "documentation":"

Properties describing a Realtime script.

Related actions

CreateScript | ListScripts | DescribeScript | UpdateScript | DeleteScript | All APIs by task

" }, "ScriptArn":{ "type":"string", @@ -5853,11 +6388,15 @@ "members":{ "FleetId":{ "shape":"FleetIdOrArn", - "documentation":"

A unique identifier for a fleet to search for active game sessions. You can use either the fleet ID or ARN value. Each request must reference either a fleet ID or alias ID, but not both.

" + "documentation":"

A unique identifier for the fleet to search for active game sessions. You can use either the fleet ID or ARN value. Each request must reference either a fleet ID or alias ID, but not both.

" }, "AliasId":{ "shape":"AliasIdOrArn", - "documentation":"

A unique identifier for an alias associated with the fleet to search for active game sessions. You can use either the alias ID or ARN value. Each request must reference either a fleet ID or alias ID, but not both.

" + "documentation":"

A unique identifier for the alias associated with the fleet to search for active game sessions. You can use either the alias ID or ARN value. Each request must reference either a fleet ID or alias ID, but not both.

" + }, + "Location":{ + "shape":"LocationStringModel", + "documentation":"

A fleet location to search for game sessions. You can specify a fleet's home Region or a remote location. Use the AWS Region code format, such as us-west-2.

" }, "FilterExpression":{ "shape":"NonZeroAndMaxString", @@ -5873,7 +6412,7 @@ }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value.

" + "documentation":"

A token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -5883,11 +6422,11 @@ "members":{ "GameSessions":{ "shape":"GameSessionList", - "documentation":"

A collection of objects containing game session properties for each session matching the request.

" + "documentation":"

A collection of objects containing game session properties for each session that matches the request.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates where to resume retrieving results on the next call to this operation. If no token is returned, these results represent the end of the list.

" + "documentation":"

A token that indicates where to resume retrieving results on the next call to this operation. If no token is returned, these results represent the end of the list.

" } }, "documentation":"

Represents the returned data in response to a request operation.

" @@ -5901,7 +6440,7 @@ "members":{ "LaunchPath":{ "shape":"NonZeroAndMaxString", - "documentation":"

The location of the server executable in a custom game build or the name of the Realtime script file that contains the Init() function. Game builds and Realtime scripts are installed on instances at the root:

" + "documentation":"

The location of a game build executable or the Realtime script file that contains the Init() function. Game builds and Realtime scripts are installed on instances at the root:

" }, "Parameters":{ "shape":"NonZeroAndMaxString", @@ -5909,10 +6448,10 @@ }, "ConcurrentExecutions":{ "shape":"PositiveInteger", - "documentation":"

The number of server processes that use this configuration to run concurrently on an instance.

" + "documentation":"

The number of server processes using this configuration that run concurrently on each instance.

" } }, - "documentation":"

A set of instructions for launching server processes on each instance in a fleet. Server processes run either a custom game build executable or a Realtime Servers script. Each instruction set identifies the location of the custom game build executable or Realtime launch script, optional launch parameters, and the number of server processes with this configuration to maintain concurrently on the instance. Server process configurations make up a fleet's RuntimeConfiguration .

" + "documentation":"

A set of instructions for launching server processes on each instance in a fleet. Server processes run either an executable in a custom game build or a Realtime Servers script. Server process configurations are part of a fleet's RuntimeConfiguration.

" }, "ServerProcessList":{ "type":"list", @@ -5924,7 +6463,7 @@ "type":"string", "max":300, "min":0, - "pattern":"[a-zA-Z0-9:_/-]*" + "pattern":"[a-zA-Z0-9:_/-]*(.fifo)?" }, "SortOrder":{ "type":"string", @@ -5942,18 +6481,32 @@ "members":{ "FleetId":{ "shape":"FleetIdOrArn", - "documentation":"

A unique identifier for a fleet to start actions on. You can use either the fleet ID or ARN value.

" + "documentation":"

A unique identifier for the fleet to restart actions on. You can use either the fleet ID or ARN value.

" }, "Actions":{ "shape":"FleetActionList", "documentation":"

List of actions to restart on the fleet.

" + }, + "Location":{ + "shape":"LocationStringModel", + "documentation":"

The fleet location to restart fleet actions for. Specify a location in the form of an AWS Region code, such as us-west-2.

" } - } + }, + "documentation":"

Represents the input for a request operation.

" }, "StartFleetActionsOutput":{ "type":"structure", "members":{ - } + "FleetId":{ + "shape":"FleetId", + "documentation":"

A unique identifier for the fleet to restart actions on.

" + }, + "FleetArn":{ + "shape":"FleetArn", + "documentation":"

The Amazon Resource Name (ARN) that is assigned to a GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

" + } + }, + "documentation":"

Represents the returned data in response to a request operation.

" }, "StartGameSessionPlacementInput":{ "type":"structure", @@ -5973,7 +6526,7 @@ }, "GameProperties":{ "shape":"GamePropertyList", - "documentation":"

Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

" + "documentation":"

A set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

" }, "MaximumPlayerSessionCount":{ "shape":"WholeNumber", @@ -5985,15 +6538,15 @@ }, "PlayerLatencies":{ "shape":"PlayerLatencyList", - "documentation":"

Set of values, expressed in milliseconds, indicating the amount of latency that a player experiences when connected to AWS Regions. This information is used to try to place the new game session where it can offer the best possible gameplay experience for the players.

" + "documentation":"

A set of values, expressed in milliseconds, that indicates the amount of latency that a player experiences when connected to AWS Regions. This information is used to try to place the new game session where it can offer the best possible gameplay experience for the players.

" }, "DesiredPlayerSessions":{ "shape":"DesiredPlayerSessionList", "documentation":"

Set of information on each player to create a player session for.

" }, "GameSessionData":{ - "shape":"GameSessionData", - "documentation":"

Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

" + "shape":"LargeGameSessionData", + "documentation":"

A set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

" } }, "documentation":"

Represents the input for a request operation.

" @@ -6025,11 +6578,11 @@ }, "GameSessionArn":{ "shape":"ArnStringModel", - "documentation":"

Amazon Resource Name (ARN) that is assigned to a game session and uniquely identifies it. This is the same as the game session ID.

" + "documentation":"

A unique identifier for the game session. Use the game session ID. When using FlexMatch as a standalone matchmaking solution, this parameter is not needed.

" }, "Players":{ "shape":"PlayerList", - "documentation":"

Match information on all players that are currently assigned to the game session. This information is used by the matchmaker to find new players and add them to the existing game.

" + "documentation":"

Match information on all players that are currently assigned to the game session. This information is used by the matchmaker to find new players and add them to the existing game.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -6085,18 +6638,32 @@ "members":{ "FleetId":{ "shape":"FleetIdOrArn", - "documentation":"

A unique identifier for a fleet to stop actions on. You can use either the fleet ID or ARN value.

" + "documentation":"

A unique identifier for the fleet to stop actions on. You can use either the fleet ID or ARN value.

" }, "Actions":{ "shape":"FleetActionList", "documentation":"

List of actions to suspend on the fleet.

" + }, + "Location":{ + "shape":"LocationStringModel", + "documentation":"

The fleet location to stop fleet actions for. Specify a location in the form of an AWS Region code, such as us-west-2.

" } - } + }, + "documentation":"

Represents the input for a request operation.

" }, "StopFleetActionsOutput":{ "type":"structure", "members":{ - } + "FleetId":{ + "shape":"FleetId", + "documentation":"

A unique identifier for the fleet to stop actions on.

" + }, + "FleetArn":{ + "shape":"FleetArn", + "documentation":"

The Amazon Resource Name (ARN) that is assigned to a GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

" + } + }, + "documentation":"

Represents the input for a request operation.

" }, "StopGameSessionPlacementInput":{ "type":"structure", @@ -6187,7 +6754,7 @@ "documentation":"

The value for a developer-defined key:value pair for tagging an AWS resource.

" } }, - "documentation":"

A label that can be assigned to a GameLift resource.

Learn more

Tagging AWS Resources in the AWS General Reference

AWS Tagging Strategies

Related operations

" + "documentation":"

A label that can be assigned to a GameLift resource.

Learn more

Tagging AWS Resources in the AWS General Reference

AWS Tagging Strategies

Related actions

TagResource | UntagResource | ListTagsForResource | All APIs by task

" }, "TagKey":{ "type":"string", @@ -6250,7 +6817,7 @@ "documentation":"

Desired value to use with a target-based scaling policy. The value must be relevant for whatever metric the scaling policy is using. For example, in a policy using the metric PercentAvailableGameSessions, the target value should be the preferred size of the fleet's buffer (the percent of capacity that should be idle and ready for new game sessions).

" } }, - "documentation":"

Settings for a target-based scaling policy (see ScalingPolicy. A target-based policy tracks a particular fleet metric specifies a target value for the metric. As player usage changes, the policy triggers Amazon GameLift to adjust capacity so that the metric returns to the target value. The target configuration specifies settings as needed for the target based policy, including the target value.

" + "documentation":"

Settings for a target-based scaling policy (see ScalingPolicy. A target-based policy tracks a particular fleet metric specifies a target value for the metric. As player usage changes, the policy triggers Amazon GameLift to adjust capacity so that the metric returns to the target value. The target configuration specifies settings as needed for the target based policy, including the target value.

Related actions

DescribeFleetCapacity | UpdateFleetCapacity | DescribeEC2InstanceLimits | PutScalingPolicy | DescribeScalingPolicies | DeleteScalingPolicy | StopFleetActions | StartFleetActions | All APIs by task

" }, "TargetTrackingConfiguration":{ "type":"structure", @@ -6261,7 +6828,7 @@ "documentation":"

Desired value to use with a game server group target-based scaling policy.

" } }, - "documentation":"

This data type is used with the Amazon GameLift FleetIQ and game server groups.

Settings for a target-based scaling policy as part of a GameServerGroupAutoScalingPolicy. These settings are used to create a target-based policy that tracks the GameLift FleetIQ metric \"PercentUtilizedGameServers\" and specifies a target value for the metric. As player usage changes, the policy triggers to adjust the game server group capacity so that the metric returns to the target value.

" + "documentation":"

This data type is used with the GameLift FleetIQ and game server groups.

Settings for a target-based scaling policy as part of a GameServerGroupAutoScalingPolicy. These settings are used to create a target-based policy that tracks the GameLift FleetIQ metric \"PercentUtilizedGameServers\" and specifies a target value for the metric. As player usage changes, the policy triggers to adjust the game server group capacity so that the metric returns to the target value.

" }, "TerminalRoutingStrategyException":{ "type":"structure", @@ -6349,7 +6916,7 @@ "members":{ "BuildId":{ "shape":"BuildIdOrArn", - "documentation":"

A unique identifier for a build to update. You can use either the build ID or ARN value.

" + "documentation":"

A unique identifier for the build to update. You can use either the build ID or ARN value.

" }, "Name":{ "shape":"NonZeroAndMaxString", @@ -6378,7 +6945,7 @@ "members":{ "FleetId":{ "shape":"FleetIdOrArn", - "documentation":"

A unique identifier for a fleet to update attribute metadata for. You can use either the fleet ID or ARN value.

" + "documentation":"

A unique identifier for the fleet to update attribute metadata for. You can use either the fleet ID or ARN value.

" }, "Name":{ "shape":"NonZeroAndMaxString", @@ -6386,19 +6953,19 @@ }, "Description":{ "shape":"NonZeroAndMaxString", - "documentation":"

Human-readable description of a fleet.

" + "documentation":"

A human-readable description of a fleet.

" }, "NewGameSessionProtectionPolicy":{ "shape":"ProtectionPolicy", - "documentation":"

Game session protection policy to apply to all new instances created in this fleet. Instances that already exist are not affected. You can set protection for individual instances using UpdateGameSession.

" + "documentation":"

The game session protection policy to apply to all new instances created in this fleet. Instances that already exist are not affected. You can set protection for individual instances using UpdateGameSession.

" }, "ResourceCreationLimitPolicy":{ "shape":"ResourceCreationLimitPolicy", - "documentation":"

Policy that limits the number of game sessions an individual player can create over a span of time.

" + "documentation":"

Policy settings that limit the number of game sessions an individual player can create over a span of time.

" }, "MetricGroups":{ "shape":"MetricGroupList", - "documentation":"

Names of metric groups to include this fleet in. Amazon CloudWatch uses a fleet metric group is to aggregate metrics from multiple fleets. Use an existing metric group name to add this fleet to the group. Or use a new name to create a new metric group. A fleet can only be included in one metric group at a time.

" + "documentation":"

The name of a metric group to add this fleet to. Use a metric group in Amazon CloudWatch to aggregate the metrics from multiple fleets. Provide an existing metric group name, or create a new metric group by providing a new name. A fleet can only be in one metric group at a time.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -6408,7 +6975,7 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

A unique identifier for a fleet that was updated. Use either the fleet ID or ARN value.

" + "documentation":"

A unique identifier for the fleet that was updated.

" } }, "documentation":"

Represents the returned data in response to a request operation.

" @@ -6419,19 +6986,23 @@ "members":{ "FleetId":{ "shape":"FleetIdOrArn", - "documentation":"

A unique identifier for a fleet to update capacity for. You can use either the fleet ID or ARN value.

" + "documentation":"

A unique identifier for the fleet to update capacity settings for. You can use either the fleet ID or ARN value.

" }, "DesiredInstances":{ "shape":"WholeNumber", - "documentation":"

Number of EC2 instances you want this fleet to host.

" + "documentation":"

The number of EC2 instances you want to maintain in the specified fleet location. This value must fall between the minimum and maximum size limits.

" }, "MinSize":{ "shape":"WholeNumber", - "documentation":"

The minimum value allowed for the fleet's instance count. Default if not set is 0.

" + "documentation":"

The minimum number of instances that are allowed in the specified fleet location. If this parameter is not set, the default is 0.

" }, "MaxSize":{ "shape":"WholeNumber", - "documentation":"

The maximum value allowed for the fleet's instance count. Default if not set is 1.

" + "documentation":"

The maximum number of instances that are allowed in the specified fleet location. If this parameter is not set, the default is 1.

" + }, + "Location":{ + "shape":"LocationStringModel", + "documentation":"

The name of a remote location to update fleet capacity settings for, in the form of an AWS Region code such as us-west-2.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -6441,7 +7012,15 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

A unique identifier for a fleet that was updated.

" + "documentation":"

A unique identifier for the fleet that was updated.

" + }, + "FleetArn":{ + "shape":"FleetArn", + "documentation":"

The Amazon Resource Name (ARN) that is assigned to a GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

" + }, + "Location":{ + "shape":"LocationStringModel", + "documentation":"

The remote location being updated, expressed as an AWS Region code, such as us-west-2.

" } }, "documentation":"

Represents the returned data in response to a request operation.

" @@ -6452,7 +7031,7 @@ "members":{ "FleetId":{ "shape":"FleetIdOrArn", - "documentation":"

A unique identifier for a fleet to update port settings for. You can use either the fleet ID or ARN value.

" + "documentation":"

A unique identifier for the fleet to update port settings for. You can use either the fleet ID or ARN value.

" }, "InboundPermissionAuthorizations":{ "shape":"IpPermissionsList", @@ -6470,7 +7049,7 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

A unique identifier for a fleet that was updated.

" + "documentation":"

A unique identifier for the fleet that was updated.

" } }, "documentation":"

Represents the returned data in response to a request operation.

" @@ -6566,7 +7145,7 @@ }, "PlayerSessionCreationPolicy":{ "shape":"PlayerSessionCreationPolicy", - "documentation":"

Policy determining whether or not the game session accepts new players.

" + "documentation":"

A policy that determines whether the game session is accepting new players.

" }, "ProtectionPolicy":{ "shape":"ProtectionPolicy", @@ -6580,7 +7159,7 @@ "members":{ "GameSession":{ "shape":"GameSession", - "documentation":"

The updated game session metadata.

" + "documentation":"

The updated game session properties.

" } }, "documentation":"

Represents the returned data in response to a request operation.

" @@ -6599,11 +7178,27 @@ }, "PlayerLatencyPolicies":{ "shape":"PlayerLatencyPolicyList", - "documentation":"

A collection of latency policies to apply when processing game sessions placement requests with player latency information. Multiple policies are evaluated in order of the maximum latency value, starting with the lowest latency values. With just one policy, the policy is enforced at the start of the game session placement for the duration period. With multiple policies, each policy is enforced consecutively for its duration period. For example, a queue might enforce a 60-second policy followed by a 120-second policy, and then no policy for the remainder of the placement. When updating policies, provide a complete collection of policies.

" + "documentation":"

A set of policies that act as a sliding cap on player latency. FleetIQ works to deliver low latency for most players in a game session. These policies ensure that no individual player can be placed into a game with unreasonably high latency. Use multiple policies to gradually relax latency requirements a step at a time. Multiple policies are applied based on their maximum allowed latency, starting with the lowest value. When updating policies, provide a complete collection of policies.

" }, "Destinations":{ "shape":"GameSessionQueueDestinationList", - "documentation":"

A list of fleets that can be used to fulfill game session placement requests in the queue. Fleets are identified by either a fleet ARN or a fleet alias ARN. Destinations are listed in default preference order. When updating this list, provide a complete list of destinations.

" + "documentation":"

A list of fleets and/or fleet aliases that can be used to fulfill game session placement requests in the queue. Destinations are identified by either a fleet ARN or a fleet alias ARN, and are listed in order of placement preference. When updating this list, provide a complete list of destinations.

" + }, + "FilterConfiguration":{ + "shape":"FilterConfiguration", + "documentation":"

A list of locations where a queue is allowed to place new game sessions. Locations are specified in the form of AWS Region codes, such as us-west-2. If this parameter is not set, game sessions can be placed in any queue location. To remove an existing filter configuration, pass in an empty set.

" + }, + "PriorityConfiguration":{ + "shape":"PriorityConfiguration", + "documentation":"

Custom settings to use when prioritizing destinations and locations for game session placements. This configuration replaces the FleetIQ default prioritization process. Priority types that are not explicitly named will be automatically applied at the end of the prioritization process. To remove an existing priority configuration, pass in an empty set.

" + }, + "CustomEventData":{ + "shape":"QueueCustomEventData", + "documentation":"

Information to be added to all events that are related to this game session queue.

" + }, + "NotificationTarget":{ + "shape":"QueueSnsArnStringModel", + "documentation":"

An SNS topic ARN that is set up to receive game session placement notifications. See Setting up notifications for game session placement.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -6624,7 +7219,7 @@ "members":{ "Name":{ "shape":"MatchmakingConfigurationName", - "documentation":"

A unique identifier for a matchmaking configuration to update. You can use either the configuration name or ARN value.

" + "documentation":"

A unique identifier for the matchmaking configuration to update. You can use either the configuration name or ARN value.

" }, "Description":{ "shape":"NonZeroAndMaxString", @@ -6632,7 +7227,7 @@ }, "GameSessionQueueArns":{ "shape":"QueueArnsList", - "documentation":"

Amazon Resource Name (ARN) that is assigned to a GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. Queues can be located in any Region. Queues are used to start new GameLift-hosted game sessions for matches that are created with this matchmaking configuration. If FlexMatchMode is set to STANDALONE, do not set this parameter.

" + "documentation":"

The Amazon Resource Name (ARN) that is assigned to a GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::gamesessionqueue/<queue name>. Queues can be located in any Region. Queues are used to start new GameLift-hosted game sessions for matches that are created with this matchmaking configuration. If FlexMatchMode is set to STANDALONE, do not set this parameter.

" }, "RequestTimeoutSeconds":{ "shape":"MatchmakingRequestTimeoutInteger", @@ -6640,7 +7235,7 @@ }, "AcceptanceTimeoutSeconds":{ "shape":"MatchmakingAcceptanceTimeoutInteger", - "documentation":"

The length of time (in seconds) to wait for players to accept a proposed match, if acceptance is required. If any player rejects the match or fails to accept before the timeout, the tickets are returned to the ticket pool and continue to be evaluated for an acceptable match.

" + "documentation":"

The length of time (in seconds) to wait for players to accept a proposed match, if acceptance is required.

" }, "AcceptanceRequired":{ "shape":"BooleanModel", @@ -6648,15 +7243,15 @@ }, "RuleSetName":{ "shape":"MatchmakingRuleSetName", - "documentation":"

A unique identifier for a matchmaking rule set to use with this configuration. You can use either the rule set name or ARN value. A matchmaking configuration can only use rule sets that are defined in the same Region.

" + "documentation":"

A unique identifier for the matchmaking rule set to use with this configuration. You can use either the rule set name or ARN value. A matchmaking configuration can only use rule sets that are defined in the same Region.

" }, "NotificationTarget":{ "shape":"SnsArnStringModel", - "documentation":"

An SNS topic ARN that is set up to receive matchmaking notifications. See Setting up Notifications for Matchmaking for more information.

" + "documentation":"

An SNS topic ARN that is set up to receive matchmaking notifications. See Setting up notifications for matchmaking for more information.

" }, "AdditionalPlayerCount":{ "shape":"WholeNumber", - "documentation":"

The number of player slots in a match to keep open for future players. For example, assume that the configuration's rule set specifies a match for a single 12-person team. If the additional player count is set to 2, only 10 players are initially selected for the match. This parameter is not used if FlexMatchMode is set to STANDALONE.

" + "documentation":"

The number of player slots in a match to keep open for future players. For example, if the configuration's rule set specifies a match for a single 12-person team, and the additional player count is set to 2, only 10 players are selected for the match. This parameter is not used if FlexMatchMode is set to STANDALONE.

" }, "CustomEventData":{ "shape":"CustomEventData", @@ -6664,7 +7259,7 @@ }, "GameProperties":{ "shape":"GamePropertyList", - "documentation":"

A set of custom properties for a game session, formatted as key-value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match. This parameter is not used if FlexMatchMode is set to STANDALONE.

" + "documentation":"

A set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match. This parameter is not used if FlexMatchMode is set to STANDALONE.

" }, "GameSessionData":{ "shape":"GameSessionData", @@ -6700,11 +7295,11 @@ "members":{ "FleetId":{ "shape":"FleetIdOrArn", - "documentation":"

A unique identifier for a fleet to update runtime configuration for. You can use either the fleet ID or ARN value.

" + "documentation":"

A unique identifier for the fleet to update runtime configuration for. You can use either the fleet ID or ARN value.

" }, "RuntimeConfiguration":{ "shape":"RuntimeConfiguration", - "documentation":"

Instructions for launching server processes on each instance in the fleet. Server processes run either a custom game build executable or a Realtime Servers script. The runtime configuration lists the types of server processes to run on an instance and includes the following configuration settings: the server executable or launch script file, launch parameters, and the number of processes to run concurrently on each instance. A CreateFleet request must include a runtime configuration with at least one server process configuration.

" + "documentation":"

Instructions for launching server processes on each instance in the fleet. Server processes run either a custom game build executable or a Realtime Servers script. The runtime configuration lists the types of server processes to run on an instance, how to launch them, and the number of processes to run concurrently.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -6714,7 +7309,7 @@ "members":{ "RuntimeConfiguration":{ "shape":"RuntimeConfiguration", - "documentation":"

The runtime configuration currently in force. If the update was successful, this object matches the one in the request.

" + "documentation":"

The runtime configuration currently in use by all instances in the fleet. If the update was successful, all property changes are shown.

" } }, "documentation":"

Represents the returned data in response to a request operation.

" @@ -6725,7 +7320,7 @@ "members":{ "ScriptId":{ "shape":"ScriptIdOrArn", - "documentation":"

A unique identifier for a Realtime script to update. You can use either the script ID or ARN value.

" + "documentation":"

A unique identifier for the Realtime script to update. You can use either the script ID or ARN value.

" }, "Name":{ "shape":"NonZeroAndMaxString", @@ -6733,11 +7328,11 @@ }, "Version":{ "shape":"NonZeroAndMaxString", - "documentation":"

The version that is associated with a build or script. Version strings do not need to be unique.

" + "documentation":"

Version information that is associated with a build or script. Version strings do not need to be unique.

" }, "StorageLocation":{ "shape":"S3Location", - "documentation":"

The Amazon S3 location of your Realtime scripts. The storage location must specify the S3 bucket name, the zip file name (the \"key\"), and an IAM role ARN that allows Amazon GameLift to access the S3 storage location. The S3 bucket must be in the same Region as the script you're updating. By default, Amazon GameLift uploads the latest version of the zip file; if you have S3 object versioning turned on, you can use the ObjectVersion parameter to specify an earlier version. To call this operation with a storage location, you must have IAM PassRole permission. For more details on IAM roles and PassRole permissions, see Set up a role for GameLift access.

" + "documentation":"

The location of the Amazon S3 bucket where a zipped file containing your Realtime scripts is stored. The storage location must specify the Amazon S3 bucket name, the zip file name (the \"key\"), and a role ARN that allows Amazon GameLift to access the Amazon S3 storage location. The S3 bucket must be in the same Region where you want to create a new script. By default, Amazon GameLift uploads the latest version of the zip file; if you have S3 object versioning turned on, you can use the ObjectVersion parameter to specify an earlier version.

" }, "ZipFile":{ "shape":"ZipBlob", @@ -6780,7 +7375,7 @@ "members":{ "GameLiftAwsAccountId":{ "shape":"NonZeroAndMaxString", - "documentation":"

A unique identifier for the AWS account that you use to manage your Amazon GameLift fleet. You can find your Account ID in the AWS Management Console under account settings.

" + "documentation":"

A unique identifier for the AWS account that you use to manage your GameLift fleet. You can find your Account ID in the AWS Management Console under account settings.

" }, "PeerVpcAwsAccountId":{ "shape":"NonZeroAndMaxString", @@ -6788,18 +7383,18 @@ }, "PeerVpcId":{ "shape":"NonZeroAndMaxString", - "documentation":"

A unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same Region where your fleet is deployed. Look up a VPC ID using the VPC Dashboard in the AWS Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Fleets.

" + "documentation":"

A unique identifier for a VPC with resources to be accessed by your GameLift fleet. The VPC must be in the same Region as your fleet. To look up a VPC ID, use the VPC Dashboard in the AWS Management Console. Learn more about VPC peering in VPC Peering with GameLift Fleets.

" }, "CreationTime":{ "shape":"Timestamp", - "documentation":"

Time stamp indicating when this authorization was issued. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" + "documentation":"

Time stamp indicating when this authorization was issued. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" }, "ExpirationTime":{ "shape":"Timestamp", - "documentation":"

Time stamp indicating when this authorization expires (24 hours after issuance). Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" + "documentation":"

Time stamp indicating when this authorization expires (24 hours after issuance). Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" } }, - "documentation":"

Represents an authorization for a VPC peering connection between the VPC for an Amazon GameLift fleet and another VPC on an account you have access to. This authorization must exist and be valid for the peering connection to be established. Authorizations are valid for 24 hours after they are issued.

" + "documentation":"

Represents an authorization for a VPC peering connection between the VPC for an Amazon GameLift fleet and another VPC on an account you have access to. This authorization must exist and be valid for the peering connection to be established. Authorizations are valid for 24 hours after they are issued.

Related actions

CreateVpcPeeringAuthorization | DescribeVpcPeeringAuthorizations | DeleteVpcPeeringAuthorization | CreateVpcPeeringConnection | DescribeVpcPeeringConnections | DeleteVpcPeeringConnection | All APIs by task

" }, "VpcPeeringAuthorizationList":{ "type":"list", @@ -6810,11 +7405,11 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

A unique identifier for a fleet. This ID determines the ID of the Amazon GameLift VPC for your fleet.

" + "documentation":"

A unique identifier for the fleet. This ID determines the ID of the Amazon GameLift VPC for your fleet.

" }, "FleetArn":{ "shape":"FleetArn", - "documentation":"

The Amazon Resource Name (ARN) associated with the GameLift fleet resource for this connection.

" + "documentation":"

The Amazon Resource Name (ARN) associated with the GameLift fleet resource for this connection.

" }, "IpV4CidrBlock":{ "shape":"NonZeroAndMaxString", @@ -6830,14 +7425,14 @@ }, "PeerVpcId":{ "shape":"NonZeroAndMaxString", - "documentation":"

A unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same Region where your fleet is deployed. Look up a VPC ID using the VPC Dashboard in the AWS Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Fleets.

" + "documentation":"

A unique identifier for a VPC with resources to be accessed by your GameLift fleet. The VPC must be in the same Region as your fleet. To look up a VPC ID, use the VPC Dashboard in the AWS Management Console. Learn more about VPC peering in VPC Peering with GameLift Fleets.

" }, "GameLiftVpcId":{ "shape":"NonZeroAndMaxString", "documentation":"

A unique identifier for the VPC that contains the Amazon GameLift fleet for this connection. This VPC is managed by Amazon GameLift and does not appear in your AWS account.

" } }, - "documentation":"

Represents a peering connection between a VPC on one of your AWS accounts and the VPC for your Amazon GameLift fleets. This record may be for an active peering connection or a pending connection that has not yet been established.

" + "documentation":"

Represents a peering connection between a VPC on one of your AWS accounts and the VPC for your Amazon GameLift fleets. This record may be for an active peering connection or a pending connection that has not yet been established.

Related actions

CreateVpcPeeringAuthorization | DescribeVpcPeeringAuthorizations | DeleteVpcPeeringAuthorization | CreateVpcPeeringConnection | DescribeVpcPeeringConnections | DeleteVpcPeeringConnection | All APIs by task

" }, "VpcPeeringConnectionList":{ "type":"list", @@ -6884,5 +7479,5 @@ "max":5000000 } }, - "documentation":"Amazon GameLift Service

GameLift provides solutions for hosting session-based multiplayer game servers in the cloud, including tools for deploying, operating, and scaling game servers. Built on AWS global computing infrastructure, GameLift helps you deliver high-performance, high-reliability, low-cost game servers while dynamically scaling your resource usage to meet player demand.

About GameLift solutions

Get more information on these GameLift solutions in the Amazon GameLift Developer Guide.

About this API Reference

This reference guide describes the low-level service API for Amazon GameLift. You can find links to language-specific SDK guides and the AWS CLI reference with each operation and data type topic. Useful links:

" + "documentation":"Amazon GameLift Service

GameLift provides solutions for hosting session-based multiplayer game servers in the cloud, including tools for deploying, operating, and scaling game servers. Built on AWS global computing infrastructure, GameLift helps you deliver high-performance, high-reliability, low-cost game servers while dynamically scaling your resource usage to meet player demand.

About GameLift solutions

Get more information on these GameLift solutions in the GameLift Developer Guide.

About this API Reference

This reference guide describes the low-level service API for Amazon GameLift. With each topic in this guide, you can find links to language-specific SDK guides and the AWS CLI reference. Useful links:

" } diff --git a/botocore/data/globalaccelerator/2018-08-08/service-2.json b/botocore/data/globalaccelerator/2018-08-08/service-2.json index c8aeef67..f6b786e3 100644 --- a/botocore/data/globalaccelerator/2018-08-08/service-2.json +++ b/botocore/data/globalaccelerator/2018-08-08/service-2.json @@ -91,7 +91,7 @@ {"shape":"LimitExceededException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Create a custom routing accelerator. A custom routing accelerator directs traffic to one of possibly thousands of Amazon EC2 instance destinations running in a single or multiple virtual private clouds (VPC) subnet endpoints.

Be aware that, by default, all destination EC2 instances in a VPC subnet endpoint cannot receive traffic. To enable all destinations to receive traffic, or to specify individual port mappings that can receive traffic, see the AllowCustomRoutingTraffic operation.

" + "documentation":"

Create a custom routing accelerator. A custom routing accelerator directs traffic to one of possibly thousands of Amazon EC2 instance destinations running in a single or multiple virtual private clouds (VPC) subnet endpoints.

Be aware that, by default, all destination EC2 instances in a VPC subnet endpoint cannot receive traffic. To enable all destinations to receive traffic, or to specify individual port mappings that can receive traffic, see the AllowCustomRoutingTraffic operation.

Global Accelerator is a global service that supports endpoints in multiple AWS Regions but you must specify the US West (Oregon) Region to create or update accelerators.

" }, "CreateCustomRoutingEndpointGroup":{ "name":"CreateCustomRoutingEndpointGroup", @@ -1063,7 +1063,7 @@ }, "IpAddresses":{ "shape":"IpAddresses", - "documentation":"

Optionally, if you've added your own IP address pool to Global Accelerator (BYOIP), you can choose IP addresses from your own pool to use for the accelerator's static IP addresses when you create an accelerator. You can specify one or two addresses, separated by a comma. Do not include the /32 suffix.

Only one IP address from each of your IP address ranges can be used for each accelerator. If you specify only one IP address from your IP address range, Global Accelerator assigns a second static IP address for the accelerator from the AWS IP address pool.

Note that you can't update IP addresses for an existing accelerator. To change them, you must create a new accelerator with the new addresses.

For more information, see Bring Your Own IP Addresses (BYOIP) in the AWS Global Accelerator Developer Guide.

" + "documentation":"

Optionally, if you've added your own IP address pool to Global Accelerator (BYOIP), you can choose IP addresses from your own pool to use for the accelerator's static IP addresses when you create an accelerator. You can specify one or two addresses, separated by a space. Do not include the /32 suffix.

Only one IP address from each of your IP address ranges can be used for each accelerator. If you specify only one IP address from your IP address range, Global Accelerator assigns a second static IP address for the accelerator from the AWS IP address pool.

Note that you can't update IP addresses for an existing accelerator. To change them, you must create a new accelerator with the new addresses.

For more information, see Bring Your Own IP Addresses (BYOIP) in the AWS Global Accelerator Developer Guide.

" }, "Enabled":{ "shape":"GenericBoolean", @@ -1104,6 +1104,10 @@ "shape":"IpAddressType", "documentation":"

The value for the address type must be IPv4.

" }, + "IpAddresses":{ + "shape":"IpAddresses", + "documentation":"

Optionally, if you've added your own IP address pool to Global Accelerator (BYOIP), you can choose IP addresses from your own pool to use for the accelerator's static IP addresses when you create an accelerator. You can specify one or two addresses, separated by a space. Do not include the /32 suffix.

Only one IP address from each of your IP address ranges can be used for each accelerator. If you specify only one IP address from your IP address range, Global Accelerator assigns a second static IP address for the accelerator from the AWS IP address pool.

Note that you can't update IP addresses for an existing accelerator. To change them, you must create a new accelerator with the new addresses.

For more information, see Bring your own IP addresses (BYOIP) in the AWS Global Accelerator Developer Guide.

" + }, "Enabled":{ "shape":"GenericBoolean", "documentation":"

Indicates whether an accelerator is enabled. The value is true or false. The default value is true.

If the value is set to true, an accelerator cannot be deleted. If set to false, the accelerator can be deleted.

" diff --git a/botocore/data/glue/2017-03-31/service-2.json b/botocore/data/glue/2017-03-31/service-2.json index eed3b7b6..7545f79b 100644 --- a/botocore/data/glue/2017-03-31/service-2.json +++ b/botocore/data/glue/2017-03-31/service-2.json @@ -3288,6 +3288,12 @@ }, "documentation":"

Specifies a table definition in the AWS Glue Data Catalog.

" }, + "CatalogGetterPageSize":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, "CatalogIdString":{ "type":"string", "max":255, @@ -6774,7 +6780,7 @@ "documentation":"

A continuation token, if this is a continuation call.

" }, "MaxResults":{ - "shape":"PageSize", + "shape":"CatalogGetterPageSize", "documentation":"

The maximum number of databases to return in one response.

" }, "ResourceShareType":{ @@ -7355,7 +7361,8 @@ "MaxResults":{ "shape":"PageSize", "documentation":"

The maximum number of partitions to return in a single response.

" - } + }, + "ExcludeColumnSchema":{"shape":"BooleanNullable"} } }, "GetPartitionsResponse":{ @@ -7848,7 +7855,7 @@ "documentation":"

A continuation token, if this is not the first call.

" }, "MaxResults":{ - "shape":"PageSize", + "shape":"CatalogGetterPageSize", "documentation":"

The maximum number of table versions to return in one response.

" } } @@ -7887,7 +7894,7 @@ "documentation":"

A continuation token, included if this is a continuation call.

" }, "MaxResults":{ - "shape":"PageSize", + "shape":"CatalogGetterPageSize", "documentation":"

The maximum number of tables to return in a single response.

" } } @@ -8024,7 +8031,7 @@ "documentation":"

A continuation token, if this is a continuation call.

" }, "MaxResults":{ - "shape":"PageSize", + "shape":"CatalogGetterPageSize", "documentation":"

The maximum number of functions to return in one response.

" } } diff --git a/botocore/data/health/2016-08-04/service-2.json b/botocore/data/health/2016-08-04/service-2.json index 17fc2d85..b44789e2 100644 --- a/botocore/data/health/2016-08-04/service-2.json +++ b/botocore/data/health/2016-08-04/service-2.json @@ -24,7 +24,7 @@ "errors":[ {"shape":"InvalidPaginationToken"} ], - "documentation":"

Returns a list of accounts in the organization from AWS Organizations that are affected by the provided event. For more information about the different types of AWS Health events, see Event.

Before you can call this operation, you must first enable AWS Health to work with AWS Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's master account.

This API operation uses pagination. Specify the nextToken parameter in the next request to return more results.

", + "documentation":"

Returns a list of accounts in the organization from AWS Organizations that are affected by the provided event. For more information about the different types of AWS Health events, see Event.

Before you can call this operation, you must first enable AWS Health to work with AWS Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's management account.

This API operation uses pagination. Specify the nextToken parameter in the next request to return more results.

", "idempotent":true }, "DescribeAffectedEntities":{ @@ -39,7 +39,7 @@ {"shape":"InvalidPaginationToken"}, {"shape":"UnsupportedLocale"} ], - "documentation":"

Returns a list of entities that have been affected by the specified events, based on the specified filter criteria. Entities can refer to individual customer resources, groups of customer resources, or any other construct, depending on the AWS service. Events that have impact beyond that of the affected entities, or where the extent of impact is unknown, include at least one entity indicating this.

At least one event ARN is required. Results are sorted by the lastUpdatedTime of the entity, starting with the most recent.

This API operation uses pagination. Specify the nextToken parameter in the next request to return more results.

", + "documentation":"

Returns a list of entities that have been affected by the specified events, based on the specified filter criteria. Entities can refer to individual customer resources, groups of customer resources, or any other construct, depending on the AWS service. Events that have impact beyond that of the affected entities, or where the extent of impact is unknown, include at least one entity indicating this.

At least one event ARN is required. Results are sorted by the lastUpdatedTime of the entity, starting with the most recent.

", "idempotent":true }, "DescribeAffectedEntitiesForOrganization":{ @@ -54,7 +54,7 @@ {"shape":"InvalidPaginationToken"}, {"shape":"UnsupportedLocale"} ], - "documentation":"

Returns a list of entities that have been affected by one or more events for one or more accounts in your organization in AWS Organizations, based on the filter criteria. Entities can refer to individual customer resources, groups of customer resources, or any other construct, depending on the AWS service.

At least one event Amazon Resource Name (ARN) and account ID are required. Results are sorted by the lastUpdatedTime of the entity, starting with the most recent.

Before you can call this operation, you must first enable AWS Health to work with AWS Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's master account.

This API operation uses pagination. Specify the nextToken parameter in the next request to return more results.

", + "documentation":"

Returns a list of entities that have been affected by one or more events for one or more accounts in your organization in AWS Organizations, based on the filter criteria. Entities can refer to individual customer resources, groups of customer resources, or any other construct, depending on the AWS service.

At least one event Amazon Resource Name (ARN) and account ID are required. Results are sorted by the lastUpdatedTime of the entity, starting with the most recent.

Before you can call this operation, you must first enable AWS Health to work with AWS Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's management account.

", "idempotent":true }, "DescribeEntityAggregates":{ @@ -93,7 +93,7 @@ "errors":[ {"shape":"UnsupportedLocale"} ], - "documentation":"

Returns detailed information about one or more specified events. Information includes standard event data (Region, service, and so on, as returned by DescribeEvents), a detailed event description, and possible additional metadata that depends upon the nature of the event. Affected entities are not included. To retrieve those, use the DescribeAffectedEntities operation.

If a specified event cannot be retrieved, an error message is returned for that event.

", + "documentation":"

Returns detailed information about one or more specified events. Information includes standard event data (AWS Region, service, and so on, as returned by DescribeEvents), a detailed event description, and possible additional metadata that depends upon the nature of the event. Affected entities are not included. To retrieve those, use the DescribeAffectedEntities operation.

If a specified event cannot be retrieved, an error message is returned for that event.

This operation supports resource-level permissions. You can use this operation to allow or deny access to specific AWS Health events. For more information, see Resource- and action-based conditions in the AWS Health User Guide.

", "idempotent":true }, "DescribeEventDetailsForOrganization":{ @@ -107,7 +107,7 @@ "errors":[ {"shape":"UnsupportedLocale"} ], - "documentation":"

Returns detailed information about one or more specified events for one or more accounts in your organization. Information includes standard event data (Region, service, and so on, as returned by DescribeEventsForOrganization), a detailed event description, and possible additional metadata that depends upon the nature of the event. Affected entities are not included; to retrieve those, use the DescribeAffectedEntitiesForOrganization operation.

Before you can call this operation, you must first enable AWS Health to work with AWS Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's master account.

When you call the DescribeEventDetailsForOrganization operation, you specify the organizationEventDetailFilters object in the request. Depending on the AWS Health event type, note the following differences:

For more information, see Event.

", + "documentation":"

Returns detailed information about one or more specified events for one or more accounts in your organization. Information includes standard event data (AWS Region, service, and so on, as returned by DescribeEventsForOrganization), a detailed event description, and possible additional metadata that depends upon the nature of the event. Affected entities are not included; to retrieve those, use the DescribeAffectedEntitiesForOrganization operation.

Before you can call this operation, you must first enable AWS Health to work with AWS Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's management account.

When you call the DescribeEventDetailsForOrganization operation, you specify the organizationEventDetailFilters object in the request. Depending on the AWS Health event type, note the following differences:

For more information, see Event.

This operation doesn't support resource-level permissions. You can't use this operation to allow or deny access to specific AWS Health events. For more information, see Resource- and action-based conditions in the AWS Health User Guide.

", "idempotent":true }, "DescribeEventTypes":{ @@ -122,7 +122,7 @@ {"shape":"InvalidPaginationToken"}, {"shape":"UnsupportedLocale"} ], - "documentation":"

Returns the event types that meet the specified filter criteria. If no filter criteria are specified, all event types are returned, in no particular order.

This API operation uses pagination. Specify the nextToken parameter in the next request to return more results.

", + "documentation":"

Returns the event types that meet the specified filter criteria. You can use this API operation to find information about the AWS Health event, such as the category, AWS service, and event code. The metadata for each event appears in the EventType object.

If you don't specify a filter criteria, the API operation returns all event types, in no particular order.

This API operation uses pagination. Specify the nextToken parameter in the next request to return more results.

", "idempotent":true }, "DescribeEvents":{ @@ -152,7 +152,7 @@ {"shape":"InvalidPaginationToken"}, {"shape":"UnsupportedLocale"} ], - "documentation":"

Returns information about events across your organization in AWS Organizations. You can use thefilters parameter to specify the events that you want to return. Events are returned in a summary form and don't include the affected accounts, detailed description, any additional metadata that depends on the event type, or any affected resources. To retrieve that information, use the following operations:

If you don't specify a filter, the DescribeEventsForOrganizations returns all events across your organization. Results are sorted by lastModifiedTime, starting with the most recent event.

For more information about the different types of AWS Health events, see Event.

Before you can call this operation, you must first enable AWS Health to work with AWS Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's master AWS account.

This API operation uses pagination. Specify the nextToken parameter in the next request to return more results.

", + "documentation":"

Returns information about events across your organization in AWS Organizations. You can use thefilters parameter to specify the events that you want to return. Events are returned in a summary form and don't include the affected accounts, detailed description, any additional metadata that depends on the event type, or any affected resources. To retrieve that information, use the following operations:

If you don't specify a filter, the DescribeEventsForOrganizations returns all events across your organization. Results are sorted by lastModifiedTime, starting with the most recent event.

For more information about the different types of AWS Health events, see Event.

Before you can call this operation, you must first enable AWS Health to work with AWS Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's management account.

This API operation uses pagination. Specify the nextToken parameter in the next request to return more results.

", "idempotent":true }, "DescribeHealthServiceStatusForOrganization":{ @@ -162,7 +162,7 @@ "requestUri":"/" }, "output":{"shape":"DescribeHealthServiceStatusForOrganizationResponse"}, - "documentation":"

This operation provides status information on enabling or disabling AWS Health to work with your organization. To call this operation, you must sign in as an IAM user, assume an IAM role, or sign in as the root user (not recommended) in the organization's master account.

", + "documentation":"

This operation provides status information on enabling or disabling AWS Health to work with your organization. To call this operation, you must sign in as an IAM user, assume an IAM role, or sign in as the root user (not recommended) in the organization's management account.

", "idempotent":true }, "DisableHealthServiceAccessForOrganization":{ @@ -174,7 +174,7 @@ "errors":[ {"shape":"ConcurrentModificationException"} ], - "documentation":"

Disables AWS Health from working with AWS Organizations. To call this operation, you must sign in as an AWS Identity and Access Management (IAM) user, assume an IAM role, or sign in as the root user (not recommended) in the organization's master AWS account. For more information, see Aggregating AWS Health events in the AWS Health User Guide.

This operation doesn't remove the service-linked role (SLR) from the AWS master account in your organization. You must use the IAM console, API, or AWS Command Line Interface (AWS CLI) to remove the SLR. For more information, see Deleting a Service-Linked Role in the IAM User Guide.

You can also disable the organizational feature by using the Organizations DisableAWSServiceAccess API operation. After you call this operation, AWS Health stops aggregating events for all other AWS accounts in your organization. If you call the AWS Health API operations for organizational view, AWS Health returns an error. AWS Health continues to aggregate health events for your AWS account.

", + "documentation":"

Disables AWS Health from working with AWS Organizations. To call this operation, you must sign in as an AWS Identity and Access Management (IAM) user, assume an IAM role, or sign in as the root user (not recommended) in the organization's management account. For more information, see Aggregating AWS Health events in the AWS Health User Guide.

This operation doesn't remove the service-linked role from the management account in your organization. You must use the IAM console, API, or AWS Command Line Interface (AWS CLI) to remove the service-linked role. For more information, see Deleting a Service-Linked Role in the IAM User Guide.

You can also disable the organizational feature by using the Organizations DisableAWSServiceAccess API operation. After you call this operation, AWS Health stops aggregating events for all other AWS accounts in your organization. If you call the AWS Health API operations for organizational view, AWS Health returns an error. AWS Health continues to aggregate health events for your AWS account.

", "idempotent":true }, "EnableHealthServiceAccessForOrganization":{ @@ -186,7 +186,7 @@ "errors":[ {"shape":"ConcurrentModificationException"} ], - "documentation":"

Calling this operation enables AWS Health to work with AWS Organizations. This applies a service-linked role (SLR) to the master account in the organization. To call this operation, you must sign in as an IAM user, assume an IAM role, or sign in as the root user (not recommended) in the organization's master account.

For more information, see Aggregating AWS Health events in the AWS Health User Guide.

", + "documentation":"

Enables AWS Health to work with AWS Organizations. You can use the organizational view feature to aggregate events from all AWS accounts in your organization in a centralized location.

This operation also creates a service-linked role for the management account in the organization.

To call this operation, you must meet the following requirements:

If you don't have the required support plan, you can instead use the AWS Health console to enable the organizational view feature. For more information, see Aggregating AWS Health events in the AWS Health User Guide.

", "idempotent":true } }, @@ -864,7 +864,7 @@ "documentation":"

A list of event type category codes (issue, scheduledChange, or accountNotification).

" } }, - "documentation":"

Metadata about a type of event that is reported by AWS Health. Data consists of the category (for example, issue), the service (for example, EC2), and the event type code (for example, AWS_EC2_SYSTEM_MAINTENANCE_EVENT).

" + "documentation":"

Contains the metadata about a type of event that is reported by AWS Health. The EventType shows the category, service, and the event type code of the event. For example, an issue might be the category, EC2 the service, and AWS_EC2_SYSTEM_MAINTENANCE_EVENT the event type code.

You can use the DescribeEventTypes API operation to return this information about an event.

You can also use the Amazon CloudWatch Events console to create a rule so that you can get notified or take action when AWS Health delivers a specific event to your AWS account. For more information, see Monitor for AWS Health events with Amazon CloudWatch Events in the AWS Health User Guide.

" }, "EventTypeCategoryList":{ "type":"list", diff --git a/botocore/data/iam/2010-05-08/service-2.json b/botocore/data/iam/2010-05-08/service-2.json index 97e6e500..2d8c3296 100644 --- a/botocore/data/iam/2010-05-08/service-2.json +++ b/botocore/data/iam/2010-05-08/service-2.json @@ -42,7 +42,7 @@ {"shape":"UnmodifiableEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Adds the specified IAM role to the specified instance profile. An instance profile can contain only one role. (The number and size of IAM resources in an AWS account are limited. For more information, see IAM and STS Quotas in the IAM User Guide.) You can remove the existing role and then add a different role to an instance profile. You must then wait for the change to appear across all of AWS because of eventual consistency. To force the change, you must disassociate the instance profile and then associate the instance profile, or you can stop your instance and then restart it.

The caller of this API must be granted the PassRole permission on the IAM role by a permissions policy.

For more information about roles, go to Working with Roles. For more information about instance profiles, go to About Instance Profiles.

" + "documentation":"

Adds the specified IAM role to the specified instance profile. An instance profile can contain only one role, and this quota cannot be increased. You can remove the existing role and then add a different role to an instance profile. You must then wait for the change to appear across all of AWS because of eventual consistency. To force the change, you must disassociate the instance profile and then associate the instance profile, or you can stop your instance and then restart it.

The caller of this operation must be granted the PassRole permission on the IAM role by a permissions policy.

For more information about roles, see Working with roles. For more information about instance profiles, see About instance profiles.

" }, "AddUserToGroup":{ "name":"AddUserToGroup", @@ -72,7 +72,7 @@ {"shape":"PolicyNotAttachableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Attaches the specified managed policy to the specified IAM group.

You use this API to attach a managed policy to a group. To embed an inline policy in a group, use PutGroupPolicy.

For more information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

" + "documentation":"

Attaches the specified managed policy to the specified IAM group.

You use this operation to attach a managed policy to a group. To embed an inline policy in a group, use PutGroupPolicy.

As a best practice, you can validate your IAM policies. To learn more, see Validating IAM policies in the IAM User Guide.

For more information about policies, see Managed policies and inline policies in the IAM User Guide.

" }, "AttachRolePolicy":{ "name":"AttachRolePolicy", @@ -89,7 +89,7 @@ {"shape":"PolicyNotAttachableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Attaches the specified managed policy to the specified IAM role. When you attach a managed policy to a role, the managed policy becomes part of the role's permission (access) policy.

You cannot use a managed policy as the role's trust policy. The role's trust policy is created at the same time as the role, using CreateRole. You can update a role's trust policy using UpdateAssumeRolePolicy.

Use this API to attach a managed policy to a role. To embed an inline policy in a role, use PutRolePolicy. For more information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

" + "documentation":"

Attaches the specified managed policy to the specified IAM role. When you attach a managed policy to a role, the managed policy becomes part of the role's permission (access) policy.

You cannot use a managed policy as the role's trust policy. The role's trust policy is created at the same time as the role, using CreateRole. You can update a role's trust policy using UpdateAssumeRolePolicy.

Use this operation to attach a managed policy to a role. To embed an inline policy in a role, use PutRolePolicy. For more information about policies, see Managed policies and inline policies in the IAM User Guide.

As a best practice, you can validate your IAM policies. To learn more, see Validating IAM policies in the IAM User Guide.

" }, "AttachUserPolicy":{ "name":"AttachUserPolicy", @@ -105,7 +105,7 @@ {"shape":"PolicyNotAttachableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Attaches the specified managed policy to the specified user.

You use this API to attach a managed policy to a user. To embed an inline policy in a user, use PutUserPolicy.

For more information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

" + "documentation":"

Attaches the specified managed policy to the specified user.

You use this operation to attach a managed policy to a user. To embed an inline policy in a user, use PutUserPolicy.

As a best practice, you can validate your IAM policies. To learn more, see Validating IAM policies in the IAM User Guide.

For more information about policies, see Managed policies and inline policies in the IAM User Guide.

" }, "ChangePassword":{ "name":"ChangePassword", @@ -122,7 +122,7 @@ {"shape":"PasswordPolicyViolationException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Changes the password of the IAM user who is calling this operation. The AWS account root user password is not affected by this operation.

To change the password for a different user, see UpdateLoginProfile. For more information about modifying passwords, see Managing Passwords in the IAM User Guide.

" + "documentation":"

Changes the password of the IAM user who is calling this operation. This operation can be performed using the AWS CLI, the AWS API, or the My Security Credentials page in the AWS Management Console. The AWS account root user password is not affected by this operation.

Use UpdateLoginProfile to use the AWS CLI, the AWS API, or the Users page in the IAM console to change the password for any IAM user. For more information about modifying passwords, see Managing passwords in the IAM User Guide.

" }, "CreateAccessKey":{ "name":"CreateAccessKey", @@ -140,7 +140,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Creates a new AWS secret access key and corresponding AWS access key ID for the specified user. The default status for new keys is Active.

If you do not specify a user name, IAM determines the user name implicitly based on the AWS access key ID signing the request. This operation works for access keys under the AWS account. Consequently, you can use this operation to manage AWS account root user credentials. This is true even if the AWS account has no associated users.

The number and size of IAM resources in an AWS account are limited. For more information, see IAM and STS Quotas in the IAM User Guide.

To ensure the security of your AWS account, the secret access key is accessible only during key and user creation. You must save the key (for example, in a text file) if you want to be able to access it again. If a secret key is lost, you can delete the access keys for the associated user and then create new keys.

" + "documentation":"

Creates a new AWS secret access key and corresponding AWS access key ID for the specified user. The default status for new keys is Active.

If you do not specify a user name, IAM determines the user name implicitly based on the AWS access key ID signing the request. This operation works for access keys under the AWS account. Consequently, you can use this operation to manage AWS account root user credentials. This is true even if the AWS account has no associated users.

For information about quotas on the number of keys you can create, see IAM and STS quotas in the IAM User Guide.

To ensure the security of your AWS account, the secret access key is accessible only during key and user creation. You must save the key (for example, in a text file) if you want to be able to access it again. If a secret key is lost, you can delete the access keys for the associated user and then create new keys.

" }, "CreateAccountAlias":{ "name":"CreateAccountAlias", @@ -154,7 +154,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Creates an alias for your AWS account. For information about using an AWS account alias, see Using an Alias for Your AWS Account ID in the IAM User Guide.

" + "documentation":"

Creates an alias for your AWS account. For information about using an AWS account alias, see Using an alias for your AWS account ID in the IAM User Guide.

" }, "CreateGroup":{ "name":"CreateGroup", @@ -173,7 +173,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Creates a new group.

The number and size of IAM resources in an AWS account are limited. For more information, see IAM and STS Quotas in the IAM User Guide.

" + "documentation":"

Creates a new group.

For information about the number of groups you can create, see IAM and STS quotas in the IAM User Guide.

" }, "CreateInstanceProfile":{ "name":"CreateInstanceProfile", @@ -188,10 +188,12 @@ }, "errors":[ {"shape":"EntityAlreadyExistsException"}, + {"shape":"InvalidInputException"}, {"shape":"LimitExceededException"}, + {"shape":"ConcurrentModificationException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Creates a new instance profile. For information about instance profiles, go to About Instance Profiles.

The number and size of IAM resources in an AWS account are limited. For more information, see IAM and STS Quotas in the IAM User Guide.

" + "documentation":"

Creates a new instance profile. For information about instance profiles, see Using roles for applications on Amazon EC2 in the IAM User Guide, and Instance profiles in the Amazon EC2 User Guide.

For information about the number of instance profiles you can create, see IAM object quotas in the IAM User Guide.

" }, "CreateLoginProfile":{ "name":"CreateLoginProfile", @@ -211,7 +213,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Creates a password for the specified user, giving the user the ability to access AWS services through the AWS Management Console. For more information about managing passwords, see Managing Passwords in the IAM User Guide.

" + "documentation":"

Creates a password for the specified IAM user. A password allows an IAM user to access AWS services through the AWS Management Console.

You can use the AWS CLI, the AWS API, or the Users page in the IAM console to create a password for any IAM user. Use ChangePassword to update your own existing password in the My Security Credentials page in the AWS Management Console.

For more information about managing passwords, see Managing passwords in the IAM User Guide.

" }, "CreateOpenIDConnectProvider":{ "name":"CreateOpenIDConnectProvider", @@ -228,6 +230,7 @@ {"shape":"InvalidInputException"}, {"shape":"EntityAlreadyExistsException"}, {"shape":"LimitExceededException"}, + {"shape":"ConcurrentModificationException"}, {"shape":"ServiceFailureException"} ], "documentation":"

Creates an IAM entity to describe an identity provider (IdP) that supports OpenID Connect (OIDC).

The OIDC provider that you create with this operation can be used as a principal in a role's trust policy. Such a policy establishes a trust relationship between AWS and the OIDC provider.

When you create the IAM OIDC provider, you specify the following:

You get all of this information from the OIDC IdP that you want to use to access AWS.

The trust for the OIDC provider is derived from the IAM provider that this operation creates. Therefore, it is best to limit access to the CreateOpenIDConnectProvider operation to highly privileged users.

" @@ -248,9 +251,10 @@ {"shape":"LimitExceededException"}, {"shape":"EntityAlreadyExistsException"}, {"shape":"MalformedPolicyDocumentException"}, + {"shape":"ConcurrentModificationException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Creates a new managed policy for your AWS account.

This operation creates a policy version with a version identifier of v1 and sets v1 as the policy's default version. For more information about policy versions, see Versioning for Managed Policies in the IAM User Guide.

For more information about managed policies in general, see Managed Policies and Inline Policies in the IAM User Guide.

" + "documentation":"

Creates a new managed policy for your AWS account.

This operation creates a policy version with a version identifier of v1 and sets v1 as the policy's default version. For more information about policy versions, see Versioning for managed policies in the IAM User Guide.

As a best practice, you can validate your IAM policies. To learn more, see Validating IAM policies in the IAM User Guide.

For more information about managed policies in general, see Managed policies and inline policies in the IAM User Guide.

" }, "CreatePolicyVersion":{ "name":"CreatePolicyVersion", @@ -270,7 +274,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Creates a new version of the specified managed policy. To update a managed policy, you create a new policy version. A managed policy can have up to five versions. If the policy has five versions, you must delete an existing version using DeletePolicyVersion before you create a new version.

Optionally, you can set the new version as the policy's default version. The default version is the version that is in effect for the IAM users, groups, and roles to which the policy is attached.

For more information about managed policy versions, see Versioning for Managed Policies in the IAM User Guide.

" + "documentation":"

Creates a new version of the specified managed policy. To update a managed policy, you create a new policy version. A managed policy can have up to five versions. If the policy has five versions, you must delete an existing version using DeletePolicyVersion before you create a new version.

Optionally, you can set the new version as the policy's default version. The default version is the version that is in effect for the IAM users, groups, and roles to which the policy is attached.

For more information about managed policy versions, see Versioning for managed policies in the IAM User Guide.

" }, "CreateRole":{ "name":"CreateRole", @@ -291,7 +295,7 @@ {"shape":"ConcurrentModificationException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Creates a new role for your AWS account. For more information about roles, go to IAM Roles. The number and size of IAM resources in an AWS account are limited. For more information, see IAM and STS Quotas in the IAM User Guide.

" + "documentation":"

Creates a new role for your AWS account. For more information about roles, see IAM roles. For information about quotas for role names and the number of roles you can create, see IAM and STS quotas in the IAM User Guide.

" }, "CreateSAMLProvider":{ "name":"CreateSAMLProvider", @@ -308,9 +312,10 @@ {"shape":"InvalidInputException"}, {"shape":"EntityAlreadyExistsException"}, {"shape":"LimitExceededException"}, + {"shape":"ConcurrentModificationException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Creates an IAM resource that describes an identity provider (IdP) that supports SAML 2.0.

The SAML provider resource that you create with this operation can be used as a principal in an IAM role's trust policy. Such a policy can enable federated users who sign in using the SAML IdP to assume the role. You can create an IAM role that supports Web-based single sign-on (SSO) to the AWS Management Console or one that supports API access to AWS.

When you create the SAML provider resource, you upload a SAML metadata document that you get from your IdP. That document includes the issuer's name, expiration information, and keys that can be used to validate the SAML authentication response (assertions) that the IdP sends. You must generate the metadata document using the identity management software that is used as your organization's IdP.

This operation requires Signature Version 4.

For more information, see Enabling SAML 2.0 Federated Users to Access the AWS Management Console and About SAML 2.0-based Federation in the IAM User Guide.

" + "documentation":"

Creates an IAM resource that describes an identity provider (IdP) that supports SAML 2.0.

The SAML provider resource that you create with this operation can be used as a principal in an IAM role's trust policy. Such a policy can enable federated users who sign in using the SAML IdP to assume the role. You can create an IAM role that supports Web-based single sign-on (SSO) to the AWS Management Console or one that supports API access to AWS.

When you create the SAML provider resource, you upload a SAML metadata document that you get from your IdP. That document includes the issuer's name, expiration information, and keys that can be used to validate the SAML authentication response (assertions) that the IdP sends. You must generate the metadata document using the identity management software that is used as your organization's IdP.

This operation requires Signature Version 4.

For more information, see Enabling SAML 2.0 federated users to access the AWS Management Console and About SAML 2.0-based federation in the IAM User Guide.

" }, "CreateServiceLinkedRole":{ "name":"CreateServiceLinkedRole", @@ -329,7 +334,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Creates an IAM role that is linked to a specific AWS service. The service controls the attached policies and when the role can be deleted. This helps ensure that the service is not broken by an unexpectedly changed or deleted role, which could put your AWS resources into an unknown state. Allowing the service to control the role helps improve service stability and proper cleanup when a service and its role are no longer needed. For more information, see Using Service-Linked Roles in the IAM User Guide.

To attach a policy to this service-linked role, you must make the request using the AWS service that depends on this role.

" + "documentation":"

Creates an IAM role that is linked to a specific AWS service. The service controls the attached policies and when the role can be deleted. This helps ensure that the service is not broken by an unexpectedly changed or deleted role, which could put your AWS resources into an unknown state. Allowing the service to control the role helps improve service stability and proper cleanup when a service and its role are no longer needed. For more information, see Using service-linked roles in the IAM User Guide.

To attach a policy to this service-linked role, you must make the request using the AWS service that depends on this role.

" }, "CreateServiceSpecificCredential":{ "name":"CreateServiceSpecificCredential", @@ -347,7 +352,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceNotSupportedException"} ], - "documentation":"

Generates a set of credentials consisting of a user name and password that can be used to access the service specified in the request. These credentials are generated by IAM, and can be used only for the specified service.

You can have a maximum of two sets of service-specific credentials for each supported service per user.

The only supported service at this time is AWS CodeCommit.

You can reset the password to a new service-generated value by calling ResetServiceSpecificCredential.

For more information about service-specific credentials, see Using IAM with AWS CodeCommit: Git Credentials, SSH Keys, and AWS Access Keys in the IAM User Guide.

" + "documentation":"

Generates a set of credentials consisting of a user name and password that can be used to access the service specified in the request. These credentials are generated by IAM, and can be used only for the specified service.

You can have a maximum of two sets of service-specific credentials for each supported service per user.

You can create service-specific credentials for AWS CodeCommit and Amazon Keyspaces (for Apache Cassandra).

You can reset the password to a new service-generated value by calling ResetServiceSpecificCredential.

For more information about service-specific credentials, see Using IAM with AWS CodeCommit: Git credentials, SSH keys, and AWS access keys in the IAM User Guide.

" }, "CreateUser":{ "name":"CreateUser", @@ -368,7 +373,7 @@ {"shape":"ConcurrentModificationException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Creates a new IAM user for your AWS account.

The number and size of IAM resources in an AWS account are limited. For more information, see IAM and STS Quotas in the IAM User Guide.

" + "documentation":"

Creates a new IAM user for your AWS account.

For information about quotas for the number of IAM users you can create, see IAM and STS quotas in the IAM User Guide.

" }, "CreateVirtualMFADevice":{ "name":"CreateVirtualMFADevice", @@ -383,10 +388,12 @@ }, "errors":[ {"shape":"LimitExceededException"}, + {"shape":"InvalidInputException"}, {"shape":"EntityAlreadyExistsException"}, + {"shape":"ConcurrentModificationException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Creates a new virtual MFA device for the AWS account. After creating the virtual MFA, use EnableMFADevice to attach the MFA device to an IAM user. For more information about creating and working with virtual MFA devices, go to Using a Virtual MFA Device in the IAM User Guide.

The number and size of IAM resources in an AWS account are limited. For more information, see IAM and STS Quotas in the IAM User Guide.

The seed information contained in the QR code and the Base32 string should be treated like any other secret access information. In other words, protect the seed information as you would your AWS access keys or your passwords. After you provision your virtual device, you should ensure that the information is destroyed following secure procedures.

" + "documentation":"

Creates a new virtual MFA device for the AWS account. After creating the virtual MFA, use EnableMFADevice to attach the MFA device to an IAM user. For more information about creating and working with virtual MFA devices, see Using a virtual MFA device in the IAM User Guide.

For information about the maximum number of MFA devices you can create, see IAM and STS quotas in the IAM User Guide.

The seed information contained in the QR code and the Base32 string should be treated like any other secret access information. In other words, protect the seed information as you would your AWS access keys or your passwords. After you provision your virtual device, you should ensure that the information is destroyed following secure procedures.

" }, "DeactivateMFADevice":{ "name":"DeactivateMFADevice", @@ -401,7 +408,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Deactivates the specified MFA device and removes it from association with the user name for which it was originally enabled.

For more information about creating and working with virtual MFA devices, go to Enabling a Virtual Multi-factor Authentication (MFA) Device in the IAM User Guide.

" + "documentation":"

Deactivates the specified MFA device and removes it from association with the user name for which it was originally enabled.

For more information about creating and working with virtual MFA devices, see Enabling a virtual multi-factor authentication (MFA) device in the IAM User Guide.

" }, "DeleteAccessKey":{ "name":"DeleteAccessKey", @@ -429,7 +436,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Deletes the specified AWS account alias. For information about using an AWS account alias, see Using an Alias for Your AWS Account ID in the IAM User Guide.

" + "documentation":"

Deletes the specified AWS account alias. For information about using an AWS account alias, see Using an alias for your AWS account ID in the IAM User Guide.

" }, "DeleteAccountPasswordPolicy":{ "name":"DeleteAccountPasswordPolicy", @@ -471,7 +478,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Deletes the specified inline policy that is embedded in the specified IAM group.

A group can also have managed policies attached to it. To detach a managed policy from a group, use DetachGroupPolicy. For more information about policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

" + "documentation":"

Deletes the specified inline policy that is embedded in the specified IAM group.

A group can also have managed policies attached to it. To detach a managed policy from a group, use DetachGroupPolicy. For more information about policies, refer to Managed policies and inline policies in the IAM User Guide.

" }, "DeleteInstanceProfile":{ "name":"DeleteInstanceProfile", @@ -486,7 +493,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Deletes the specified instance profile. The instance profile must not have an associated role.

Make sure that you do not have any Amazon EC2 instances running with the instance profile you are about to delete. Deleting a role or instance profile that is associated with a running instance will break any applications running on the instance.

For more information about instance profiles, go to About Instance Profiles.

" + "documentation":"

Deletes the specified instance profile. The instance profile must not have an associated role.

Make sure that you do not have any Amazon EC2 instances running with the instance profile you are about to delete. Deleting a role or instance profile that is associated with a running instance will break any applications running on the instance.

For more information about instance profiles, see About instance profiles.

" }, "DeleteLoginProfile":{ "name":"DeleteLoginProfile", @@ -501,7 +508,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Deletes the password for the specified IAM user, which terminates the user's ability to access AWS services through the AWS Management Console.

Deleting a user's password does not prevent a user from accessing AWS through the command line interface or the API. To prevent all user access, you must also either make any access keys inactive or delete them. For more information about making keys inactive or deleting them, see UpdateAccessKey and DeleteAccessKey.

" + "documentation":"

Deletes the password for the specified IAM user, which terminates the user's ability to access AWS services through the AWS Management Console.

You can use the AWS CLI, the AWS API, or the Users page in the IAM console to delete a password for any IAM user. You can use ChangePassword to update, but not delete, your own password in the My Security Credentials page in the AWS Management Console.

Deleting a user's password does not prevent a user from accessing AWS through the command line interface or the API. To prevent all user access, you must also either make any access keys inactive or delete them. For more information about making keys inactive or deleting them, see UpdateAccessKey and DeleteAccessKey.

" }, "DeleteOpenIDConnectProvider":{ "name":"DeleteOpenIDConnectProvider", @@ -531,7 +538,7 @@ {"shape":"DeleteConflictException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Deletes the specified managed policy.

Before you can delete a managed policy, you must first detach the policy from all users, groups, and roles that it is attached to. In addition, you must delete all the policy's versions. The following steps describe the process for deleting a managed policy:

For information about managed policies, see Managed Policies and Inline Policies in the IAM User Guide.

" + "documentation":"

Deletes the specified managed policy.

Before you can delete a managed policy, you must first detach the policy from all users, groups, and roles that it is attached to. In addition, you must delete all the policy's versions. The following steps describe the process for deleting a managed policy:

For information about managed policies, see Managed policies and inline policies in the IAM User Guide.

" }, "DeletePolicyVersion":{ "name":"DeletePolicyVersion", @@ -547,7 +554,7 @@ {"shape":"DeleteConflictException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Deletes the specified version from the specified managed policy.

You cannot delete the default version from a policy using this API. To delete the default version from a policy, use DeletePolicy. To find out which version of a policy is marked as the default version, use ListPolicyVersions.

For information about versions for managed policies, see Versioning for Managed Policies in the IAM User Guide.

" + "documentation":"

Deletes the specified version from the specified managed policy.

You cannot delete the default version from a policy using this operation. To delete the default version from a policy, use DeletePolicy. To find out which version of a policy is marked as the default version, use ListPolicyVersions.

For information about versions for managed policies, see Versioning for managed policies in the IAM User Guide.

" }, "DeleteRole":{ "name":"DeleteRole", @@ -564,7 +571,7 @@ {"shape":"ConcurrentModificationException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Deletes the specified role. The role must not have any policies attached. For more information about roles, go to Working with Roles.

Make sure that you do not have any Amazon EC2 instances running with the role you are about to delete. Deleting a role or instance profile that is associated with a running instance will break any applications running on the instance.

" + "documentation":"

Deletes the specified role. The role must not have any policies attached. For more information about roles, see Working with roles.

Make sure that you do not have any Amazon EC2 instances running with the role you are about to delete. Deleting a role or instance profile that is associated with a running instance will break any applications running on the instance.

" }, "DeleteRolePermissionsBoundary":{ "name":"DeleteRolePermissionsBoundary", @@ -593,7 +600,7 @@ {"shape":"UnmodifiableEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Deletes the specified inline policy that is embedded in the specified IAM role.

A role can also have managed policies attached to it. To detach a managed policy from a role, use DetachRolePolicy. For more information about policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

" + "documentation":"

Deletes the specified inline policy that is embedded in the specified IAM role.

A role can also have managed policies attached to it. To detach a managed policy from a role, use DetachRolePolicy. For more information about policies, refer to Managed policies and inline policies in the IAM User Guide.

" }, "DeleteSAMLProvider":{ "name":"DeleteSAMLProvider", @@ -620,7 +627,7 @@ "errors":[ {"shape":"NoSuchEntityException"} ], - "documentation":"

Deletes the specified SSH public key.

The SSH public key deleted by this operation is used only for authenticating the associated IAM user to an AWS CodeCommit repository. For more information about using SSH keys to authenticate to an AWS CodeCommit repository, see Set up AWS CodeCommit for SSH Connections in the AWS CodeCommit User Guide.

" + "documentation":"

Deletes the specified SSH public key.

The SSH public key deleted by this operation is used only for authenticating the associated IAM user to an AWS CodeCommit repository. For more information about using SSH keys to authenticate to an AWS CodeCommit repository, see Set up AWS CodeCommit for SSH connections in the AWS CodeCommit User Guide.

" }, "DeleteServerCertificate":{ "name":"DeleteServerCertificate", @@ -635,7 +642,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Deletes the specified server certificate.

For more information about working with server certificates, see Working with Server Certificates in the IAM User Guide. This topic also includes a list of AWS services that can use the server certificates that you manage with IAM.

If you are using a server certificate with Elastic Load Balancing, deleting the certificate could have implications for your application. If Elastic Load Balancing doesn't detect the deletion of bound certificates, it may continue to use the certificates. This could cause Elastic Load Balancing to stop accepting traffic. We recommend that you remove the reference to the certificate from Elastic Load Balancing before using this command to delete the certificate. For more information, go to DeleteLoadBalancerListeners in the Elastic Load Balancing API Reference.

" + "documentation":"

Deletes the specified server certificate.

For more information about working with server certificates, see Working with server certificates in the IAM User Guide. This topic also includes a list of AWS services that can use the server certificates that you manage with IAM.

If you are using a server certificate with Elastic Load Balancing, deleting the certificate could have implications for your application. If Elastic Load Balancing doesn't detect the deletion of bound certificates, it may continue to use the certificates. This could cause Elastic Load Balancing to stop accepting traffic. We recommend that you remove the reference to the certificate from Elastic Load Balancing before using this command to delete the certificate. For more information, see DeleteLoadBalancerListeners in the Elastic Load Balancing API Reference.

" }, "DeleteServiceLinkedRole":{ "name":"DeleteServiceLinkedRole", @@ -653,7 +660,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Submits a service-linked role deletion request and returns a DeletionTaskId, which you can use to check the status of the deletion. Before you call this operation, confirm that the role has no active sessions and that any resources used by the role in the linked service are deleted. If you call this operation more than once for the same service-linked role and an earlier deletion task is not complete, then the DeletionTaskId of the earlier request is returned.

If you submit a deletion request for a service-linked role whose linked service is still accessing a resource, then the deletion task fails. If it fails, the GetServiceLinkedRoleDeletionStatus API operation returns the reason for the failure, usually including the resources that must be deleted. To delete the service-linked role, you must first remove those resources from the linked service and then submit the deletion request again. Resources are specific to the service that is linked to the role. For more information about removing resources from a service, see the AWS documentation for your service.

For more information about service-linked roles, see Roles Terms and Concepts: AWS Service-Linked Role in the IAM User Guide.

" + "documentation":"

Submits a service-linked role deletion request and returns a DeletionTaskId, which you can use to check the status of the deletion. Before you call this operation, confirm that the role has no active sessions and that any resources used by the role in the linked service are deleted. If you call this operation more than once for the same service-linked role and an earlier deletion task is not complete, then the DeletionTaskId of the earlier request is returned.

If you submit a deletion request for a service-linked role whose linked service is still accessing a resource, then the deletion task fails. If it fails, the GetServiceLinkedRoleDeletionStatus operation returns the reason for the failure, usually including the resources that must be deleted. To delete the service-linked role, you must first remove those resources from the linked service and then submit the deletion request again. Resources are specific to the service that is linked to the role. For more information about removing resources from a service, see the AWS documentation for your service.

For more information about service-linked roles, see Roles terms and concepts: AWS service-linked role in the IAM User Guide.

" }, "DeleteServiceSpecificCredential":{ "name":"DeleteServiceSpecificCredential", @@ -695,7 +702,7 @@ {"shape":"ConcurrentModificationException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Deletes the specified IAM user. Unlike the AWS Management Console, when you delete a user programmatically, you must delete the items attached to the user manually, or the deletion fails. For more information, see Deleting an IAM User. Before attempting to delete a user, remove the following items:

" + "documentation":"

Deletes the specified IAM user. Unlike the AWS Management Console, when you delete a user programmatically, you must delete the items attached to the user manually, or the deletion fails. For more information, see Deleting an IAM user. Before attempting to delete a user, remove the following items:

" }, "DeleteUserPermissionsBoundary":{ "name":"DeleteUserPermissionsBoundary", @@ -722,7 +729,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Deletes the specified inline policy that is embedded in the specified IAM user.

A user can also have managed policies attached to it. To detach a managed policy from a user, use DetachUserPolicy. For more information about policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

" + "documentation":"

Deletes the specified inline policy that is embedded in the specified IAM user.

A user can also have managed policies attached to it. To detach a managed policy from a user, use DetachUserPolicy. For more information about policies, refer to Managed policies and inline policies in the IAM User Guide.

" }, "DeleteVirtualMFADevice":{ "name":"DeleteVirtualMFADevice", @@ -752,7 +759,7 @@ {"shape":"InvalidInputException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Removes the specified managed policy from the specified IAM group.

A group can also have inline policies embedded with it. To delete an inline policy, use the DeleteGroupPolicy API. For information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

" + "documentation":"

Removes the specified managed policy from the specified IAM group.

A group can also have inline policies embedded with it. To delete an inline policy, use DeleteGroupPolicy. For information about policies, see Managed policies and inline policies in the IAM User Guide.

" }, "DetachRolePolicy":{ "name":"DetachRolePolicy", @@ -768,7 +775,7 @@ {"shape":"UnmodifiableEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Removes the specified managed policy from the specified role.

A role can also have inline policies embedded with it. To delete an inline policy, use the DeleteRolePolicy API. For information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

" + "documentation":"

Removes the specified managed policy from the specified role.

A role can also have inline policies embedded with it. To delete an inline policy, use DeleteRolePolicy. For information about policies, see Managed policies and inline policies in the IAM User Guide.

" }, "DetachUserPolicy":{ "name":"DetachUserPolicy", @@ -783,7 +790,7 @@ {"shape":"InvalidInputException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Removes the specified managed policy from the specified user.

A user can also have inline policies embedded with it. To delete an inline policy, use the DeleteUserPolicy API. For information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

" + "documentation":"

Removes the specified managed policy from the specified user.

A user can also have inline policies embedded with it. To delete an inline policy, use DeleteUserPolicy. For information about policies, see Managed policies and inline policies in the IAM User Guide.

" }, "EnableMFADevice":{ "name":"EnableMFADevice", @@ -816,7 +823,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Generates a credential report for the AWS account. For more information about the credential report, see Getting Credential Reports in the IAM User Guide.

" + "documentation":"

Generates a credential report for the AWS account. For more information about the credential report, see Getting credential reports in the IAM User Guide.

" }, "GenerateOrganizationsAccessReport":{ "name":"GenerateOrganizationsAccessReport", @@ -832,7 +839,7 @@ "errors":[ {"shape":"ReportGenerationLimitExceededException"} ], - "documentation":"

Generates a report for service last accessed data for AWS Organizations. You can generate a report for any entities (organization root, organizational unit, or account) or policies in your organization.

To call this operation, you must be signed in using your AWS Organizations master account credentials. You can use your long-term IAM user or root user credentials, or temporary credentials from assuming an IAM role. SCPs must be enabled for your organization root. You must have the required IAM and AWS Organizations permissions. For more information, see Refining Permissions Using Service Last Accessed Data in the IAM User Guide.

You can generate a service last accessed data report for entities by specifying only the entity's path. This data includes a list of services that are allowed by any service control policies (SCPs) that apply to the entity.

You can generate a service last accessed data report for a policy by specifying an entity's path and an optional AWS Organizations policy ID. This data includes a list of services that are allowed by the specified SCP.

For each service in both report types, the data includes the most recent account activity that the policy allows to account principals in the entity or the entity's children. For important information about the data, reporting period, permissions required, troubleshooting, and supported Regions see Reducing Permissions Using Service Last Accessed Data in the IAM User Guide.

The data includes all attempts to access AWS, not just the successful ones. This includes all attempts that were made using the AWS Management Console, the AWS API through any of the SDKs, or any of the command line tools. An unexpected entry in the service last accessed data does not mean that an account has been compromised, because the request might have been denied. Refer to your CloudTrail logs as the authoritative source for information about all API calls and whether they were successful or denied access. For more information, see Logging IAM Events with CloudTrail in the IAM User Guide.

This operation returns a JobId. Use this parameter in the GetOrganizationsAccessReport operation to check the status of the report generation. To check the status of this request, use the JobId parameter in the GetOrganizationsAccessReport operation and test the JobStatus response parameter. When the job is complete, you can retrieve the report.

To generate a service last accessed data report for entities, specify an entity path without specifying the optional AWS Organizations policy ID. The type of entity that you specify determines the data returned in the report.

To generate a service last accessed data report for policies, specify an entity path and the optional AWS Organizations policy ID. The type of entity that you specify determines the data returned for each service.

Service last accessed data does not use other policy types when determining whether a principal could access a service. These other policy types include identity-based policies, resource-based policies, access control lists, IAM permissions boundaries, and STS assume role policies. It only applies SCP logic. For more about the evaluation of policy types, see Evaluating Policies in the IAM User Guide.

For more information about service last accessed data, see Reducing Policy Scope by Viewing User Activity in the IAM User Guide.

" + "documentation":"

Generates a report for service last accessed data for AWS Organizations. You can generate a report for any entities (organization root, organizational unit, or account) or policies in your organization.

To call this operation, you must be signed in using your AWS Organizations management account credentials. You can use your long-term IAM user or root user credentials, or temporary credentials from assuming an IAM role. SCPs must be enabled for your organization root. You must have the required IAM and AWS Organizations permissions. For more information, see Refining permissions using service last accessed data in the IAM User Guide.

You can generate a service last accessed data report for entities by specifying only the entity's path. This data includes a list of services that are allowed by any service control policies (SCPs) that apply to the entity.

You can generate a service last accessed data report for a policy by specifying an entity's path and an optional AWS Organizations policy ID. This data includes a list of services that are allowed by the specified SCP.

For each service in both report types, the data includes the most recent account activity that the policy allows to account principals in the entity or the entity's children. For important information about the data, reporting period, permissions required, troubleshooting, and supported Regions see Reducing permissions using service last accessed data in the IAM User Guide.

The data includes all attempts to access AWS, not just the successful ones. This includes all attempts that were made using the AWS Management Console, the AWS API through any of the SDKs, or any of the command line tools. An unexpected entry in the service last accessed data does not mean that an account has been compromised, because the request might have been denied. Refer to your CloudTrail logs as the authoritative source for information about all API calls and whether they were successful or denied access. For more information, see Logging IAM events with CloudTrail in the IAM User Guide.

This operation returns a JobId. Use this parameter in the GetOrganizationsAccessReport operation to check the status of the report generation. To check the status of this request, use the JobId parameter in the GetOrganizationsAccessReport operation and test the JobStatus response parameter. When the job is complete, you can retrieve the report.

To generate a service last accessed data report for entities, specify an entity path without specifying the optional AWS Organizations policy ID. The type of entity that you specify determines the data returned in the report.

To generate a service last accessed data report for policies, specify an entity path and the optional AWS Organizations policy ID. The type of entity that you specify determines the data returned for each service.

Service last accessed data does not use other policy types when determining whether a principal could access a service. These other policy types include identity-based policies, resource-based policies, access control lists, IAM permissions boundaries, and STS assume role policies. It only applies SCP logic. For more about the evaluation of policy types, see Evaluating policies in the IAM User Guide.

For more information about service last accessed data, see Reducing policy scope by viewing user activity in the IAM User Guide.

" }, "GenerateServiceLastAccessedDetails":{ "name":"GenerateServiceLastAccessedDetails", @@ -849,7 +856,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"InvalidInputException"} ], - "documentation":"

Generates a report that includes details about when an IAM resource (user, group, role, or policy) was last used in an attempt to access AWS services. Recent activity usually appears within four hours. IAM reports activity for the last 365 days, or less if your Region began supporting this feature within the last year. For more information, see Regions Where Data Is Tracked.

The service last accessed data includes all attempts to access an AWS API, not just the successful ones. This includes all attempts that were made using the AWS Management Console, the AWS API through any of the SDKs, or any of the command line tools. An unexpected entry in the service last accessed data does not mean that your account has been compromised, because the request might have been denied. Refer to your CloudTrail logs as the authoritative source for information about all API calls and whether they were successful or denied access. For more information, see Logging IAM Events with CloudTrail in the IAM User Guide.

The GenerateServiceLastAccessedDetails operation returns a JobId. Use this parameter in the following operations to retrieve the following details from your report:

To check the status of the GenerateServiceLastAccessedDetails request, use the JobId parameter in the same operations and test the JobStatus response parameter.

For additional information about the permissions policies that allow an identity (user, group, or role) to access specific services, use the ListPoliciesGrantingServiceAccess operation.

Service last accessed data does not use other policy types when determining whether a resource could access a service. These other policy types include resource-based policies, access control lists, AWS Organizations policies, IAM permissions boundaries, and AWS STS assume role policies. It only applies permissions policy logic. For more about the evaluation of policy types, see Evaluating Policies in the IAM User Guide.

For more information about service and action last accessed data, see Reducing Permissions Using Service Last Accessed Data in the IAM User Guide.

" + "documentation":"

Generates a report that includes details about when an IAM resource (user, group, role, or policy) was last used in an attempt to access AWS services. Recent activity usually appears within four hours. IAM reports activity for the last 365 days, or less if your Region began supporting this feature within the last year. For more information, see Regions where data is tracked.

The service last accessed data includes all attempts to access an AWS API, not just the successful ones. This includes all attempts that were made using the AWS Management Console, the AWS API through any of the SDKs, or any of the command line tools. An unexpected entry in the service last accessed data does not mean that your account has been compromised, because the request might have been denied. Refer to your CloudTrail logs as the authoritative source for information about all API calls and whether they were successful or denied access. For more information, see Logging IAM events with CloudTrail in the IAM User Guide.

The GenerateServiceLastAccessedDetails operation returns a JobId. Use this parameter in the following operations to retrieve the following details from your report:

To check the status of the GenerateServiceLastAccessedDetails request, use the JobId parameter in the same operations and test the JobStatus response parameter.

For additional information about the permissions policies that allow an identity (user, group, or role) to access specific services, use the ListPoliciesGrantingServiceAccess operation.

Service last accessed data does not use other policy types when determining whether a resource could access a service. These other policy types include resource-based policies, access control lists, AWS Organizations policies, IAM permissions boundaries, and AWS STS assume role policies. It only applies permissions policy logic. For more about the evaluation of policy types, see Evaluating policies in the IAM User Guide.

For more information about service and action last accessed data, see Reducing permissions using service last accessed data in the IAM User Guide.

" }, "GetAccessKeyLastUsed":{ "name":"GetAccessKeyLastUsed", @@ -878,7 +885,7 @@ "errors":[ {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves information about all IAM users, groups, roles, and policies in your AWS account, including their relationships to one another. Use this API to obtain a snapshot of the configuration of IAM permissions (users, groups, roles, and policies) in your account.

Policies returned by this API are URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the policy back to plain JSON text. For example, if you use Java, you can use the decode method of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar functionality.

You can optionally filter the results using the Filter parameter. You can paginate the results using the MaxItems and Marker parameters.

" + "documentation":"

Retrieves information about all IAM users, groups, roles, and policies in your AWS account, including their relationships to one another. Use this operation to obtain a snapshot of the configuration of IAM permissions (users, groups, roles, and policies) in your account.

Policies returned by this operation are URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the policy back to plain JSON text. For example, if you use Java, you can use the decode method of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar functionality.

You can optionally filter the results using the Filter parameter. You can paginate the results using the MaxItems and Marker parameters.

" }, "GetAccountPasswordPolicy":{ "name":"GetAccountPasswordPolicy", @@ -894,7 +901,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves the password policy for the AWS account. For more information about using a password policy, go to Managing an IAM Password Policy.

" + "documentation":"

Retrieves the password policy for the AWS account. This tells you the complexity requirements and mandatory rotation periods for the IAM user passwords in your account. For more information about using a password policy, see Managing an IAM password policy.

" }, "GetAccountSummary":{ "name":"GetAccountSummary", @@ -909,7 +916,7 @@ "errors":[ {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves information about IAM entity usage and IAM quotas in the AWS account.

The number and size of IAM resources in an AWS account are limited. For more information, see IAM and STS Quotas in the IAM User Guide.

" + "documentation":"

Retrieves information about IAM entity usage and IAM quotas in the AWS account.

For information about IAM quotas, see IAM and STS quotas in the IAM User Guide.

" }, "GetContextKeysForCustomPolicy":{ "name":"GetContextKeysForCustomPolicy", @@ -942,7 +949,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"InvalidInputException"} ], - "documentation":"

Gets a list of all of the context keys referenced in all the IAM policies that are attached to the specified IAM entity. The entity can be an IAM user, group, or role. If you specify a user, then the request also includes all of the policies attached to groups that the user is a member of.

You can optionally include a list of one or more additional policies, specified as strings. If you want to include only a list of policies by string, use GetContextKeysForCustomPolicy instead.

Note: This API discloses information about the permissions granted to other users. If you do not want users to see other user's permissions, then consider allowing them to use GetContextKeysForCustomPolicy instead.

Context keys are variables maintained by AWS and its services that provide details about the context of an API query request. Context keys can be evaluated by testing against a value in an IAM policy. Use GetContextKeysForPrincipalPolicy to understand what key names and values you must supply when you call SimulatePrincipalPolicy.

" + "documentation":"

Gets a list of all of the context keys referenced in all the IAM policies that are attached to the specified IAM entity. The entity can be an IAM user, group, or role. If you specify a user, then the request also includes all of the policies attached to groups that the user is a member of.

You can optionally include a list of one or more additional policies, specified as strings. If you want to include only a list of policies by string, use GetContextKeysForCustomPolicy instead.

Note: This operation discloses information about the permissions granted to other users. If you do not want users to see other user's permissions, then consider allowing them to use GetContextKeysForCustomPolicy instead.

Context keys are variables maintained by AWS and its services that provide details about the context of an API query request. Context keys can be evaluated by testing against a value in an IAM policy. Use GetContextKeysForPrincipalPolicy to understand what key names and values you must supply when you call SimulatePrincipalPolicy.

" }, "GetCredentialReport":{ "name":"GetCredentialReport", @@ -960,7 +967,7 @@ {"shape":"CredentialReportNotReadyException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves a credential report for the AWS account. For more information about the credential report, see Getting Credential Reports in the IAM User Guide.

" + "documentation":"

Retrieves a credential report for the AWS account. For more information about the credential report, see Getting credential reports in the IAM User Guide.

" }, "GetGroup":{ "name":"GetGroup", @@ -994,7 +1001,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves the specified inline policy document that is embedded in the specified IAM group.

Policies returned by this API are URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the policy back to plain JSON text. For example, if you use Java, you can use the decode method of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar functionality.

An IAM group can also have managed policies attached to it. To retrieve a managed policy document that is attached to a group, use GetPolicy to determine the policy's default version, then use GetPolicyVersion to retrieve the policy document.

For more information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

" + "documentation":"

Retrieves the specified inline policy document that is embedded in the specified IAM group.

Policies returned by this operation are URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the policy back to plain JSON text. For example, if you use Java, you can use the decode method of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar functionality.

An IAM group can also have managed policies attached to it. To retrieve a managed policy document that is attached to a group, use GetPolicy to determine the policy's default version, then use GetPolicyVersion to retrieve the policy document.

For more information about policies, see Managed policies and inline policies in the IAM User Guide.

" }, "GetInstanceProfile":{ "name":"GetInstanceProfile", @@ -1011,7 +1018,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves information about the specified instance profile, including the instance profile's path, GUID, ARN, and role. For more information about instance profiles, see About Instance Profiles in the IAM User Guide.

" + "documentation":"

Retrieves information about the specified instance profile, including the instance profile's path, GUID, ARN, and role. For more information about instance profiles, see About instance profiles in the IAM User Guide.

" }, "GetLoginProfile":{ "name":"GetLoginProfile", @@ -1028,7 +1035,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves the user name and password-creation date for the specified IAM user. If the user has not been assigned a password, the operation returns a 404 (NoSuchEntity) error.

" + "documentation":"

Retrieves the user name and password creation date for the specified IAM user. If the user has not been assigned a password, the operation returns a 404 (NoSuchEntity) error.

" }, "GetOpenIDConnectProvider":{ "name":"GetOpenIDConnectProvider", @@ -1062,7 +1069,7 @@ "errors":[ {"shape":"NoSuchEntityException"} ], - "documentation":"

Retrieves the service last accessed data report for AWS Organizations that was previously generated using the GenerateOrganizationsAccessReport operation. This operation retrieves the status of your report job and the report contents.

Depending on the parameters that you passed when you generated the report, the data returned could include different information. For details, see GenerateOrganizationsAccessReport.

To call this operation, you must be signed in to the master account in your organization. SCPs must be enabled for your organization root. You must have permissions to perform this operation. For more information, see Refining Permissions Using Service Last Accessed Data in the IAM User Guide.

For each service that principals in an account (root users, IAM users, or IAM roles) could access using SCPs, the operation returns details about the most recent access attempt. If there was no attempt, the service is listed without details about the most recent attempt to access the service. If the operation fails, it returns the reason that it failed.

By default, the list is sorted by service namespace.

" + "documentation":"

Retrieves the service last accessed data report for AWS Organizations that was previously generated using the GenerateOrganizationsAccessReport operation. This operation retrieves the status of your report job and the report contents.

Depending on the parameters that you passed when you generated the report, the data returned could include different information. For details, see GenerateOrganizationsAccessReport.

To call this operation, you must be signed in to the management account in your organization. SCPs must be enabled for your organization root. You must have permissions to perform this operation. For more information, see Refining permissions using service last accessed data in the IAM User Guide.

For each service that principals in an account (root users, IAM users, or IAM roles) could access using SCPs, the operation returns details about the most recent access attempt. If there was no attempt, the service is listed without details about the most recent attempt to access the service. If the operation fails, it returns the reason that it failed.

By default, the list is sorted by service namespace.

" }, "GetPolicy":{ "name":"GetPolicy", @@ -1080,7 +1087,7 @@ {"shape":"InvalidInputException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves information about the specified managed policy, including the policy's default version and the total number of IAM users, groups, and roles to which the policy is attached. To retrieve the list of the specific users, groups, and roles that the policy is attached to, use the ListEntitiesForPolicy API. This API returns metadata about the policy. To retrieve the actual policy document for a specific version of the policy, use GetPolicyVersion.

This API retrieves information about managed policies. To retrieve information about an inline policy that is embedded with an IAM user, group, or role, use the GetUserPolicy, GetGroupPolicy, or GetRolePolicy API.

For more information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

" + "documentation":"

Retrieves information about the specified managed policy, including the policy's default version and the total number of IAM users, groups, and roles to which the policy is attached. To retrieve the list of the specific users, groups, and roles that the policy is attached to, use ListEntitiesForPolicy. This operation returns metadata about the policy. To retrieve the actual policy document for a specific version of the policy, use GetPolicyVersion.

This operation retrieves information about managed policies. To retrieve information about an inline policy that is embedded with an IAM user, group, or role, use GetUserPolicy, GetGroupPolicy, or GetRolePolicy.

For more information about policies, see Managed policies and inline policies in the IAM User Guide.

" }, "GetPolicyVersion":{ "name":"GetPolicyVersion", @@ -1098,7 +1105,7 @@ {"shape":"InvalidInputException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves information about the specified version of the specified managed policy, including the policy document.

Policies returned by this API are URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the policy back to plain JSON text. For example, if you use Java, you can use the decode method of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar functionality.

To list the available versions for a policy, use ListPolicyVersions.

This API retrieves information about managed policies. To retrieve information about an inline policy that is embedded in a user, group, or role, use the GetUserPolicy, GetGroupPolicy, or GetRolePolicy API.

For more information about the types of policies, see Managed Policies and Inline Policies in the IAM User Guide.

For more information about managed policy versions, see Versioning for Managed Policies in the IAM User Guide.

" + "documentation":"

Retrieves information about the specified version of the specified managed policy, including the policy document.

Policies returned by this operation are URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the policy back to plain JSON text. For example, if you use Java, you can use the decode method of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar functionality.

To list the available versions for a policy, use ListPolicyVersions.

This operation retrieves information about managed policies. To retrieve information about an inline policy that is embedded in a user, group, or role, use GetUserPolicy, GetGroupPolicy, or GetRolePolicy.

For more information about the types of policies, see Managed policies and inline policies in the IAM User Guide.

For more information about managed policy versions, see Versioning for managed policies in the IAM User Guide.

" }, "GetRole":{ "name":"GetRole", @@ -1115,7 +1122,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves information about the specified role, including the role's path, GUID, ARN, and the role's trust policy that grants permission to assume the role. For more information about roles, see Working with Roles.

Policies returned by this API are URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the policy back to plain JSON text. For example, if you use Java, you can use the decode method of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar functionality.

" + "documentation":"

Retrieves information about the specified role, including the role's path, GUID, ARN, and the role's trust policy that grants permission to assume the role. For more information about roles, see Working with roles.

Policies returned by this operation are URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the policy back to plain JSON text. For example, if you use Java, you can use the decode method of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar functionality.

" }, "GetRolePolicy":{ "name":"GetRolePolicy", @@ -1132,7 +1139,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves the specified inline policy document that is embedded with the specified IAM role.

Policies returned by this API are URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the policy back to plain JSON text. For example, if you use Java, you can use the decode method of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar functionality.

An IAM role can also have managed policies attached to it. To retrieve a managed policy document that is attached to a role, use GetPolicy to determine the policy's default version, then use GetPolicyVersion to retrieve the policy document.

For more information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

For more information about roles, see Using Roles to Delegate Permissions and Federate Identities.

" + "documentation":"

Retrieves the specified inline policy document that is embedded with the specified IAM role.

Policies returned by this operation are URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the policy back to plain JSON text. For example, if you use Java, you can use the decode method of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar functionality.

An IAM role can also have managed policies attached to it. To retrieve a managed policy document that is attached to a role, use GetPolicy to determine the policy's default version, then use GetPolicyVersion to retrieve the policy document.

For more information about policies, see Managed policies and inline policies in the IAM User Guide.

For more information about roles, see Using roles to delegate permissions and federate identities.

" }, "GetSAMLProvider":{ "name":"GetSAMLProvider", @@ -1167,7 +1174,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"UnrecognizedPublicKeyEncodingException"} ], - "documentation":"

Retrieves the specified SSH public key, including metadata about the key.

The SSH public key retrieved by this operation is used only for authenticating the associated IAM user to an AWS CodeCommit repository. For more information about using SSH keys to authenticate to an AWS CodeCommit repository, see Set up AWS CodeCommit for SSH Connections in the AWS CodeCommit User Guide.

" + "documentation":"

Retrieves the specified SSH public key, including metadata about the key.

The SSH public key retrieved by this operation is used only for authenticating the associated IAM user to an AWS CodeCommit repository. For more information about using SSH keys to authenticate to an AWS CodeCommit repository, see Set up AWS CodeCommit for SSH connections in the AWS CodeCommit User Guide.

" }, "GetServerCertificate":{ "name":"GetServerCertificate", @@ -1184,7 +1191,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves information about the specified server certificate stored in IAM.

For more information about working with server certificates, see Working with Server Certificates in the IAM User Guide. This topic includes a list of AWS services that can use the server certificates that you manage with IAM.

" + "documentation":"

Retrieves information about the specified server certificate stored in IAM.

For more information about working with server certificates, see Working with server certificates in the IAM User Guide. This topic includes a list of AWS services that can use the server certificates that you manage with IAM.

" }, "GetServiceLastAccessedDetails":{ "name":"GetServiceLastAccessedDetails", @@ -1201,7 +1208,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"InvalidInputException"} ], - "documentation":"

Retrieves a service last accessed report that was created using the GenerateServiceLastAccessedDetails operation. You can use the JobId parameter in GetServiceLastAccessedDetails to retrieve the status of your report job. When the report is complete, you can retrieve the generated report. The report includes a list of AWS services that the resource (user, group, role, or managed policy) can access.

Service last accessed data does not use other policy types when determining whether a resource could access a service. These other policy types include resource-based policies, access control lists, AWS Organizations policies, IAM permissions boundaries, and AWS STS assume role policies. It only applies permissions policy logic. For more about the evaluation of policy types, see Evaluating Policies in the IAM User Guide.

For each service that the resource could access using permissions policies, the operation returns details about the most recent access attempt. If there was no attempt, the service is listed without details about the most recent attempt to access the service. If the operation fails, the GetServiceLastAccessedDetails operation returns the reason that it failed.

The GetServiceLastAccessedDetails operation returns a list of services. This list includes the number of entities that have attempted to access the service and the date and time of the last attempt. It also returns the ARN of the following entity, depending on the resource ARN that you used to generate the report:

By default, the list is sorted by service namespace.

If you specified ACTION_LEVEL granularity when you generated the report, this operation returns service and action last accessed data. This includes the most recent access attempt for each tracked action within a service. Otherwise, this operation returns only service data.

For more information about service and action last accessed data, see Reducing Permissions Using Service Last Accessed Data in the IAM User Guide.

" + "documentation":"

Retrieves a service last accessed report that was created using the GenerateServiceLastAccessedDetails operation. You can use the JobId parameter in GetServiceLastAccessedDetails to retrieve the status of your report job. When the report is complete, you can retrieve the generated report. The report includes a list of AWS services that the resource (user, group, role, or managed policy) can access.

Service last accessed data does not use other policy types when determining whether a resource could access a service. These other policy types include resource-based policies, access control lists, AWS Organizations policies, IAM permissions boundaries, and AWS STS assume role policies. It only applies permissions policy logic. For more about the evaluation of policy types, see Evaluating policies in the IAM User Guide.

For each service that the resource could access using permissions policies, the operation returns details about the most recent access attempt. If there was no attempt, the service is listed without details about the most recent attempt to access the service. If the operation fails, the GetServiceLastAccessedDetails operation returns the reason that it failed.

The GetServiceLastAccessedDetails operation returns a list of services. This list includes the number of entities that have attempted to access the service and the date and time of the last attempt. It also returns the ARN of the following entity, depending on the resource ARN that you used to generate the report:

By default, the list is sorted by service namespace.

If you specified ACTION_LEVEL granularity when you generated the report, this operation returns service and action last accessed data. This includes the most recent access attempt for each tracked action within a service. Otherwise, this operation returns only service data.

For more information about service and action last accessed data, see Reducing permissions using service last accessed data in the IAM User Guide.

" }, "GetServiceLastAccessedDetailsWithEntities":{ "name":"GetServiceLastAccessedDetailsWithEntities", @@ -1236,7 +1243,7 @@ {"shape":"InvalidInputException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves the status of your service-linked role deletion. After you use the DeleteServiceLinkedRole API operation to submit a service-linked role for deletion, you can use the DeletionTaskId parameter in GetServiceLinkedRoleDeletionStatus to check the status of the deletion. If the deletion fails, this operation returns the reason that it failed, if that information is returned by the service.

" + "documentation":"

Retrieves the status of your service-linked role deletion. After you use DeleteServiceLinkedRole to submit a service-linked role for deletion, you can use the DeletionTaskId parameter in GetServiceLinkedRoleDeletionStatus to check the status of the deletion. If the deletion fails, this operation returns the reason that it failed, if that information is returned by the service.

" }, "GetUser":{ "name":"GetUser", @@ -1253,7 +1260,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves information about the specified IAM user, including the user's creation date, path, unique ID, and ARN.

If you do not specify a user name, IAM determines the user name implicitly based on the AWS access key ID used to sign the request to this API.

" + "documentation":"

Retrieves information about the specified IAM user, including the user's creation date, path, unique ID, and ARN.

If you do not specify a user name, IAM determines the user name implicitly based on the AWS access key ID used to sign the request to this operation.

" }, "GetUserPolicy":{ "name":"GetUserPolicy", @@ -1270,7 +1277,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves the specified inline policy document that is embedded in the specified IAM user.

Policies returned by this API are URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the policy back to plain JSON text. For example, if you use Java, you can use the decode method of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar functionality.

An IAM user can also have managed policies attached to it. To retrieve a managed policy document that is attached to a user, use GetPolicy to determine the policy's default version. Then use GetPolicyVersion to retrieve the policy document.

For more information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

" + "documentation":"

Retrieves the specified inline policy document that is embedded in the specified IAM user.

Policies returned by this operation are URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the policy back to plain JSON text. For example, if you use Java, you can use the decode method of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar functionality.

An IAM user can also have managed policies attached to it. To retrieve a managed policy document that is attached to a user, use GetPolicy to determine the policy's default version. Then use GetPolicyVersion to retrieve the policy document.

For more information about policies, see Managed policies and inline policies in the IAM User Guide.

" }, "ListAccessKeys":{ "name":"ListAccessKeys", @@ -1303,7 +1310,7 @@ "errors":[ {"shape":"ServiceFailureException"} ], - "documentation":"

Lists the account alias associated with the AWS account (Note: you can have only one). For information about using an AWS account alias, see Using an Alias for Your AWS Account ID in the IAM User Guide.

" + "documentation":"

Lists the account alias associated with the AWS account (Note: you can have only one). For information about using an AWS account alias, see Using an alias for your AWS account ID in the IAM User Guide.

" }, "ListAttachedGroupPolicies":{ "name":"ListAttachedGroupPolicies", @@ -1321,7 +1328,7 @@ {"shape":"InvalidInputException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Lists all managed policies that are attached to the specified IAM group.

An IAM group can also have inline policies embedded with it. To list the inline policies for a group, use the ListGroupPolicies API. For information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

You can paginate the results using the MaxItems and Marker parameters. You can use the PathPrefix parameter to limit the list of policies to only those matching the specified path prefix. If there are no policies attached to the specified group (or none that match the specified path prefix), the operation returns an empty list.

" + "documentation":"

Lists all managed policies that are attached to the specified IAM group.

An IAM group can also have inline policies embedded with it. To list the inline policies for a group, use ListGroupPolicies. For information about policies, see Managed policies and inline policies in the IAM User Guide.

You can paginate the results using the MaxItems and Marker parameters. You can use the PathPrefix parameter to limit the list of policies to only those matching the specified path prefix. If there are no policies attached to the specified group (or none that match the specified path prefix), the operation returns an empty list.

" }, "ListAttachedRolePolicies":{ "name":"ListAttachedRolePolicies", @@ -1339,7 +1346,7 @@ {"shape":"InvalidInputException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Lists all managed policies that are attached to the specified IAM role.

An IAM role can also have inline policies embedded with it. To list the inline policies for a role, use the ListRolePolicies API. For information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

You can paginate the results using the MaxItems and Marker parameters. You can use the PathPrefix parameter to limit the list of policies to only those matching the specified path prefix. If there are no policies attached to the specified role (or none that match the specified path prefix), the operation returns an empty list.

" + "documentation":"

Lists all managed policies that are attached to the specified IAM role.

An IAM role can also have inline policies embedded with it. To list the inline policies for a role, use ListRolePolicies. For information about policies, see Managed policies and inline policies in the IAM User Guide.

You can paginate the results using the MaxItems and Marker parameters. You can use the PathPrefix parameter to limit the list of policies to only those matching the specified path prefix. If there are no policies attached to the specified role (or none that match the specified path prefix), the operation returns an empty list.

" }, "ListAttachedUserPolicies":{ "name":"ListAttachedUserPolicies", @@ -1357,7 +1364,7 @@ {"shape":"InvalidInputException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Lists all managed policies that are attached to the specified IAM user.

An IAM user can also have inline policies embedded with it. To list the inline policies for a user, use the ListUserPolicies API. For information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

You can paginate the results using the MaxItems and Marker parameters. You can use the PathPrefix parameter to limit the list of policies to only those matching the specified path prefix. If there are no policies attached to the specified group (or none that match the specified path prefix), the operation returns an empty list.

" + "documentation":"

Lists all managed policies that are attached to the specified IAM user.

An IAM user can also have inline policies embedded with it. To list the inline policies for a user, use ListUserPolicies. For information about policies, see Managed policies and inline policies in the IAM User Guide.

You can paginate the results using the MaxItems and Marker parameters. You can use the PathPrefix parameter to limit the list of policies to only those matching the specified path prefix. If there are no policies attached to the specified group (or none that match the specified path prefix), the operation returns an empty list.

" }, "ListEntitiesForPolicy":{ "name":"ListEntitiesForPolicy", @@ -1392,7 +1399,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Lists the names of the inline policies that are embedded in the specified IAM group.

An IAM group can also have managed policies attached to it. To list the managed policies that are attached to a group, use ListAttachedGroupPolicies. For more information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

You can paginate the results using the MaxItems and Marker parameters. If there are no inline policies embedded with the specified group, the operation returns an empty list.

" + "documentation":"

Lists the names of the inline policies that are embedded in the specified IAM group.

An IAM group can also have managed policies attached to it. To list the managed policies that are attached to a group, use ListAttachedGroupPolicies. For more information about policies, see Managed policies and inline policies in the IAM User Guide.

You can paginate the results using the MaxItems and Marker parameters. If there are no inline policies embedded with the specified group, the operation returns an empty list.

" }, "ListGroups":{ "name":"ListGroups", @@ -1427,6 +1434,23 @@ ], "documentation":"

Lists the IAM groups that the specified IAM user belongs to.

You can paginate the results using the MaxItems and Marker parameters.

" }, + "ListInstanceProfileTags":{ + "name":"ListInstanceProfileTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListInstanceProfileTagsRequest"}, + "output":{ + "shape":"ListInstanceProfileTagsResponse", + "resultWrapper":"ListInstanceProfileTagsResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Lists the tags that are attached to the specified IAM instance profile. The returned list of tags is sorted by tag key. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

" + }, "ListInstanceProfiles":{ "name":"ListInstanceProfiles", "http":{ @@ -1441,7 +1465,7 @@ "errors":[ {"shape":"ServiceFailureException"} ], - "documentation":"

Lists the instance profiles that have the specified path prefix. If there are none, the operation returns an empty list. For more information about instance profiles, go to About Instance Profiles.

You can paginate the results using the MaxItems and Marker parameters.

" + "documentation":"

Lists the instance profiles that have the specified path prefix. If there are none, the operation returns an empty list. For more information about instance profiles, see About instance profiles.

IAM resource-listing operations return a subset of the available attributes for the resource. For example, this operation does not return tags, even though they are an attribute of the returned object. To view all of the information for an instance profile, see GetInstanceProfile.

You can paginate the results using the MaxItems and Marker parameters.

" }, "ListInstanceProfilesForRole":{ "name":"ListInstanceProfilesForRole", @@ -1458,7 +1482,25 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Lists the instance profiles that have the specified associated IAM role. If there are none, the operation returns an empty list. For more information about instance profiles, go to About Instance Profiles.

You can paginate the results using the MaxItems and Marker parameters.

" + "documentation":"

Lists the instance profiles that have the specified associated IAM role. If there are none, the operation returns an empty list. For more information about instance profiles, go to About instance profiles.

You can paginate the results using the MaxItems and Marker parameters.

" + }, + "ListMFADeviceTags":{ + "name":"ListMFADeviceTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListMFADeviceTagsRequest"}, + "output":{ + "shape":"ListMFADeviceTagsResponse", + "resultWrapper":"ListMFADeviceTagsResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Lists the tags that are attached to the specified IAM virtual multi-factor authentication (MFA) device. The returned list of tags is sorted by tag key. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

" }, "ListMFADevices":{ "name":"ListMFADevices", @@ -1475,7 +1517,25 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Lists the MFA devices for an IAM user. If the request includes a IAM user name, then this operation lists all the MFA devices associated with the specified user. If you do not specify a user name, IAM determines the user name implicitly based on the AWS access key ID signing the request for this API.

You can paginate the results using the MaxItems and Marker parameters.

" + "documentation":"

Lists the MFA devices for an IAM user. If the request includes a IAM user name, then this operation lists all the MFA devices associated with the specified user. If you do not specify a user name, IAM determines the user name implicitly based on the AWS access key ID signing the request for this operation.

You can paginate the results using the MaxItems and Marker parameters.

" + }, + "ListOpenIDConnectProviderTags":{ + "name":"ListOpenIDConnectProviderTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListOpenIDConnectProviderTagsRequest"}, + "output":{ + "shape":"ListOpenIDConnectProviderTagsResponse", + "resultWrapper":"ListOpenIDConnectProviderTagsResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

Lists the tags that are attached to the specified OpenID Connect (OIDC)-compatible identity provider. The returned list of tags is sorted by tag key. For more information, see About web identity federation.

For more information about tagging, see Tagging IAM resources in the IAM User Guide.

" }, "ListOpenIDConnectProviders":{ "name":"ListOpenIDConnectProviders", @@ -1491,7 +1551,7 @@ "errors":[ {"shape":"ServiceFailureException"} ], - "documentation":"

Lists information about the IAM OpenID Connect (OIDC) provider resource objects defined in the AWS account.

" + "documentation":"

Lists information about the IAM OpenID Connect (OIDC) provider resource objects defined in the AWS account.

IAM resource-listing operations return a subset of the available attributes for the resource. For example, this operation does not return tags, even though they are an attribute of the returned object. To view all of the information for an OIDC provider, see GetOpenIDConnectProvider.

" }, "ListPolicies":{ "name":"ListPolicies", @@ -1507,7 +1567,7 @@ "errors":[ {"shape":"ServiceFailureException"} ], - "documentation":"

Lists all the managed policies that are available in your AWS account, including your own customer-defined managed policies and all AWS managed policies.

You can filter the list of policies that is returned using the optional OnlyAttached, Scope, and PathPrefix parameters. For example, to list only the customer managed policies in your AWS account, set Scope to Local. To list only AWS managed policies, set Scope to AWS.

You can paginate the results using the MaxItems and Marker parameters.

For more information about managed policies, see Managed Policies and Inline Policies in the IAM User Guide.

" + "documentation":"

Lists all the managed policies that are available in your AWS account, including your own customer-defined managed policies and all AWS managed policies.

You can filter the list of policies that is returned using the optional OnlyAttached, Scope, and PathPrefix parameters. For example, to list only the customer managed policies in your AWS account, set Scope to Local. To list only AWS managed policies, set Scope to AWS.

You can paginate the results using the MaxItems and Marker parameters.

For more information about managed policies, see Managed policies and inline policies in the IAM User Guide.

IAM resource-listing operations return a subset of the available attributes for the resource. For example, this operation does not return tags, even though they are an attribute of the returned object. To view all of the information for a customer manged policy, see GetPolicy.

" }, "ListPoliciesGrantingServiceAccess":{ "name":"ListPoliciesGrantingServiceAccess", @@ -1524,7 +1584,25 @@ {"shape":"NoSuchEntityException"}, {"shape":"InvalidInputException"} ], - "documentation":"

Retrieves a list of policies that the IAM identity (user, group, or role) can use to access each specified service.

This operation does not use other policy types when determining whether a resource could access a service. These other policy types include resource-based policies, access control lists, AWS Organizations policies, IAM permissions boundaries, and AWS STS assume role policies. It only applies permissions policy logic. For more about the evaluation of policy types, see Evaluating Policies in the IAM User Guide.

The list of policies returned by the operation depends on the ARN of the identity that you provide.

For each managed policy, this operation returns the ARN and policy name. For each inline policy, it returns the policy name and the entity to which it is attached. Inline policies do not have an ARN. For more information about these policy types, see Managed Policies and Inline Policies in the IAM User Guide.

Policies that are attached to users and roles as permissions boundaries are not returned. To view which managed policy is currently used to set the permissions boundary for a user or role, use the GetUser or GetRole operations.

" + "documentation":"

Retrieves a list of policies that the IAM identity (user, group, or role) can use to access each specified service.

This operation does not use other policy types when determining whether a resource could access a service. These other policy types include resource-based policies, access control lists, AWS Organizations policies, IAM permissions boundaries, and AWS STS assume role policies. It only applies permissions policy logic. For more about the evaluation of policy types, see Evaluating policies in the IAM User Guide.

The list of policies returned by the operation depends on the ARN of the identity that you provide.

For each managed policy, this operation returns the ARN and policy name. For each inline policy, it returns the policy name and the entity to which it is attached. Inline policies do not have an ARN. For more information about these policy types, see Managed policies and inline policies in the IAM User Guide.

Policies that are attached to users and roles as permissions boundaries are not returned. To view which managed policy is currently used to set the permissions boundary for a user or role, use the GetUser or GetRole operations.

" + }, + "ListPolicyTags":{ + "name":"ListPolicyTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPolicyTagsRequest"}, + "output":{ + "shape":"ListPolicyTagsResponse", + "resultWrapper":"ListPolicyTagsResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

Lists the tags that are attached to the specified IAM customer managed policy. The returned list of tags is sorted by tag key. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

" }, "ListPolicyVersions":{ "name":"ListPolicyVersions", @@ -1542,7 +1620,7 @@ {"shape":"InvalidInputException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Lists information about the versions of the specified managed policy, including the version that is currently set as the policy's default version.

For more information about managed policies, see Managed Policies and Inline Policies in the IAM User Guide.

" + "documentation":"

Lists information about the versions of the specified managed policy, including the version that is currently set as the policy's default version.

For more information about managed policies, see Managed policies and inline policies in the IAM User Guide.

" }, "ListRolePolicies":{ "name":"ListRolePolicies", @@ -1559,7 +1637,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Lists the names of the inline policies that are embedded in the specified IAM role.

An IAM role can also have managed policies attached to it. To list the managed policies that are attached to a role, use ListAttachedRolePolicies. For more information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

You can paginate the results using the MaxItems and Marker parameters. If there are no inline policies embedded with the specified role, the operation returns an empty list.

" + "documentation":"

Lists the names of the inline policies that are embedded in the specified IAM role.

An IAM role can also have managed policies attached to it. To list the managed policies that are attached to a role, use ListAttachedRolePolicies. For more information about policies, see Managed policies and inline policies in the IAM User Guide.

You can paginate the results using the MaxItems and Marker parameters. If there are no inline policies embedded with the specified role, the operation returns an empty list.

" }, "ListRoleTags":{ "name":"ListRoleTags", @@ -1576,7 +1654,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Lists the tags that are attached to the specified role. The returned list of tags is sorted by tag key. For more information about tagging, see Tagging IAM Identities in the IAM User Guide.

" + "documentation":"

Lists the tags that are attached to the specified role. The returned list of tags is sorted by tag key. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

" }, "ListRoles":{ "name":"ListRoles", @@ -1592,7 +1670,25 @@ "errors":[ {"shape":"ServiceFailureException"} ], - "documentation":"

Lists the IAM roles that have the specified path prefix. If there are none, the operation returns an empty list. For more information about roles, go to Working with Roles.

You can paginate the results using the MaxItems and Marker parameters.

" + "documentation":"

Lists the IAM roles that have the specified path prefix. If there are none, the operation returns an empty list. For more information about roles, see Working with roles.

IAM resource-listing operations return a subset of the available attributes for the resource. For example, this operation does not return tags, even though they are an attribute of the returned object. To view all of the information for a role, see GetRole.

You can paginate the results using the MaxItems and Marker parameters.

" + }, + "ListSAMLProviderTags":{ + "name":"ListSAMLProviderTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListSAMLProviderTagsRequest"}, + "output":{ + "shape":"ListSAMLProviderTagsResponse", + "resultWrapper":"ListSAMLProviderTagsResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

Lists the tags that are attached to the specified Security Assertion Markup Language (SAML) identity provider. The returned list of tags is sorted by tag key. For more information, see About SAML 2.0-based federation.

For more information about tagging, see Tagging IAM resources in the IAM User Guide.

" }, "ListSAMLProviders":{ "name":"ListSAMLProviders", @@ -1608,7 +1704,7 @@ "errors":[ {"shape":"ServiceFailureException"} ], - "documentation":"

Lists the SAML provider resource objects defined in IAM in the account.

This operation requires Signature Version 4.

" + "documentation":"

Lists the SAML provider resource objects defined in IAM in the account. IAM resource-listing operations return a subset of the available attributes for the resource. For example, this operation does not return tags, even though they are an attribute of the returned object. To view all of the information for a SAML provider, see GetSAMLProvider.

This operation requires Signature Version 4.

" }, "ListSSHPublicKeys":{ "name":"ListSSHPublicKeys", @@ -1624,7 +1720,24 @@ "errors":[ {"shape":"NoSuchEntityException"} ], - "documentation":"

Returns information about the SSH public keys associated with the specified IAM user. If none exists, the operation returns an empty list.

The SSH public keys returned by this operation are used only for authenticating the IAM user to an AWS CodeCommit repository. For more information about using SSH keys to authenticate to an AWS CodeCommit repository, see Set up AWS CodeCommit for SSH Connections in the AWS CodeCommit User Guide.

Although each user is limited to a small number of keys, you can still paginate the results using the MaxItems and Marker parameters.

" + "documentation":"

Returns information about the SSH public keys associated with the specified IAM user. If none exists, the operation returns an empty list.

The SSH public keys returned by this operation are used only for authenticating the IAM user to an AWS CodeCommit repository. For more information about using SSH keys to authenticate to an AWS CodeCommit repository, see Set up AWS CodeCommit for SSH connections in the AWS CodeCommit User Guide.

Although each user is limited to a small number of keys, you can still paginate the results using the MaxItems and Marker parameters.

" + }, + "ListServerCertificateTags":{ + "name":"ListServerCertificateTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListServerCertificateTagsRequest"}, + "output":{ + "shape":"ListServerCertificateTagsResponse", + "resultWrapper":"ListServerCertificateTagsResult" + }, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Lists the tags that are attached to the specified IAM server certificate. The returned list of tags is sorted by tag key. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

For certificates in a Region supported by AWS Certificate Manager (ACM), we recommend that you don't use IAM server certificates. Instead, use ACM to provision, manage, and deploy your server certificates. For more information about IAM server certificates, Working with server certificates in the IAM User Guide.

" }, "ListServerCertificates":{ "name":"ListServerCertificates", @@ -1640,7 +1753,7 @@ "errors":[ {"shape":"ServiceFailureException"} ], - "documentation":"

Lists the server certificates stored in IAM that have the specified path prefix. If none exist, the operation returns an empty list.

You can paginate the results using the MaxItems and Marker parameters.

For more information about working with server certificates, see Working with Server Certificates in the IAM User Guide. This topic also includes a list of AWS services that can use the server certificates that you manage with IAM.

" + "documentation":"

Lists the server certificates stored in IAM that have the specified path prefix. If none exist, the operation returns an empty list.

You can paginate the results using the MaxItems and Marker parameters.

For more information about working with server certificates, see Working with server certificates in the IAM User Guide. This topic also includes a list of AWS services that can use the server certificates that you manage with IAM.

IAM resource-listing operations return a subset of the available attributes for the resource. For example, this operation does not return tags, even though they are an attribute of the returned object. To view all of the information for a servercertificate, see GetServerCertificate.

" }, "ListServiceSpecificCredentials":{ "name":"ListServiceSpecificCredentials", @@ -1657,7 +1770,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceNotSupportedException"} ], - "documentation":"

Returns information about the service-specific credentials associated with the specified IAM user. If none exists, the operation returns an empty list. The service-specific credentials returned by this operation are used only for authenticating the IAM user to a specific service. For more information about using service-specific credentials to authenticate to an AWS service, see Set Up service-specific credentials in the AWS CodeCommit User Guide.

" + "documentation":"

Returns information about the service-specific credentials associated with the specified IAM user. If none exists, the operation returns an empty list. The service-specific credentials returned by this operation are used only for authenticating the IAM user to a specific service. For more information about using service-specific credentials to authenticate to an AWS service, see Set up service-specific credentials in the AWS CodeCommit User Guide.

" }, "ListSigningCertificates":{ "name":"ListSigningCertificates", @@ -1674,7 +1787,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Returns information about the signing certificates associated with the specified IAM user. If none exists, the operation returns an empty list.

Although each user is limited to a small number of signing certificates, you can still paginate the results using the MaxItems and Marker parameters.

If the UserName field is not specified, the user name is determined implicitly based on the AWS access key ID used to sign the request for this API. This operation works for access keys under the AWS account. Consequently, you can use this operation to manage AWS account root user credentials even if the AWS account has no associated users.

" + "documentation":"

Returns information about the signing certificates associated with the specified IAM user. If none exists, the operation returns an empty list.

Although each user is limited to a small number of signing certificates, you can still paginate the results using the MaxItems and Marker parameters.

If the UserName field is not specified, the user name is determined implicitly based on the AWS access key ID used to sign the request for this operation. This operation works for access keys under the AWS account. Consequently, you can use this operation to manage AWS account root user credentials even if the AWS account has no associated users.

" }, "ListUserPolicies":{ "name":"ListUserPolicies", @@ -1691,7 +1804,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Lists the names of the inline policies embedded in the specified IAM user.

An IAM user can also have managed policies attached to it. To list the managed policies that are attached to a user, use ListAttachedUserPolicies. For more information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

You can paginate the results using the MaxItems and Marker parameters. If there are no inline policies embedded with the specified user, the operation returns an empty list.

" + "documentation":"

Lists the names of the inline policies embedded in the specified IAM user.

An IAM user can also have managed policies attached to it. To list the managed policies that are attached to a user, use ListAttachedUserPolicies. For more information about policies, see Managed policies and inline policies in the IAM User Guide.

You can paginate the results using the MaxItems and Marker parameters. If there are no inline policies embedded with the specified user, the operation returns an empty list.

" }, "ListUserTags":{ "name":"ListUserTags", @@ -1708,7 +1821,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Lists the tags that are attached to the specified user. The returned list of tags is sorted by tag key. For more information about tagging, see Tagging IAM Identities in the IAM User Guide.

" + "documentation":"

Lists the tags that are attached to the specified IAM user. The returned list of tags is sorted by tag key. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

" }, "ListUsers":{ "name":"ListUsers", @@ -1724,7 +1837,7 @@ "errors":[ {"shape":"ServiceFailureException"} ], - "documentation":"

Lists the IAM users that have the specified path prefix. If no path prefix is specified, the operation returns all users in the AWS account. If there are none, the operation returns an empty list.

You can paginate the results using the MaxItems and Marker parameters.

" + "documentation":"

Lists the IAM users that have the specified path prefix. If no path prefix is specified, the operation returns all users in the AWS account. If there are none, the operation returns an empty list.

IAM resource-listing operations return a subset of the available attributes for the resource. For example, this operation does not return tags, even though they are an attribute of the returned object. To view all of the information for a user, see GetUser.

You can paginate the results using the MaxItems and Marker parameters.

" }, "ListVirtualMFADevices":{ "name":"ListVirtualMFADevices", @@ -1737,7 +1850,7 @@ "shape":"ListVirtualMFADevicesResponse", "resultWrapper":"ListVirtualMFADevicesResult" }, - "documentation":"

Lists the virtual MFA devices defined in the AWS account by assignment status. If you do not specify an assignment status, the operation returns a list of all virtual MFA devices. Assignment status can be Assigned, Unassigned, or Any.

You can paginate the results using the MaxItems and Marker parameters.

" + "documentation":"

Lists the virtual MFA devices defined in the AWS account by assignment status. If you do not specify an assignment status, the operation returns a list of all virtual MFA devices. Assignment status can be Assigned, Unassigned, or Any.

IAM resource-listing operations return a subset of the available attributes for the resource. For example, this operation does not return tags, even though they are an attribute of the returned object. To view all of the information for a virtual MFA device, see ListVirtualMFADevices.

You can paginate the results using the MaxItems and Marker parameters.

" }, "PutGroupPolicy":{ "name":"PutGroupPolicy", @@ -1752,7 +1865,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Adds or updates an inline policy document that is embedded in the specified IAM group.

A user can also have managed policies attached to it. To attach a managed policy to a group, use AttachGroupPolicy. To create a new managed policy, use CreatePolicy. For information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

For information about limits on the number of inline policies that you can embed in a group, see Limitations on IAM Entities in the IAM User Guide.

Because policy documents can be large, you should use POST rather than GET when calling PutGroupPolicy. For general information about using the Query API with IAM, go to Making Query Requests in the IAM User Guide.

" + "documentation":"

Adds or updates an inline policy document that is embedded in the specified IAM group.

A user can also have managed policies attached to it. To attach a managed policy to a group, use AttachGroupPolicy. To create a new managed policy, use CreatePolicy. For information about policies, see Managed policies and inline policies in the IAM User Guide.

For information about the maximum number of inline policies that you can embed in a group, see IAM and STS quotas in the IAM User Guide.

Because policy documents can be large, you should use POST rather than GET when calling PutGroupPolicy. For general information about using the Query API with IAM, see Making query requests in the IAM User Guide.

" }, "PutRolePermissionsBoundary":{ "name":"PutRolePermissionsBoundary", @@ -1768,7 +1881,7 @@ {"shape":"PolicyNotAttachableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Adds or updates the policy that is specified as the IAM role's permissions boundary. You can use an AWS managed policy or a customer managed policy to set the boundary for a role. Use the boundary to control the maximum permissions that the role can have. Setting a permissions boundary is an advanced feature that can affect the permissions for the role.

You cannot set the boundary for a service-linked role.

Policies used as permissions boundaries do not provide permissions. You must also attach a permissions policy to the role. To learn how the effective permissions for a role are evaluated, see IAM JSON Policy Evaluation Logic in the IAM User Guide.

" + "documentation":"

Adds or updates the policy that is specified as the IAM role's permissions boundary. You can use an AWS managed policy or a customer managed policy to set the boundary for a role. Use the boundary to control the maximum permissions that the role can have. Setting a permissions boundary is an advanced feature that can affect the permissions for the role.

You cannot set the boundary for a service-linked role.

Policies used as permissions boundaries do not provide permissions. You must also attach a permissions policy to the role. To learn how the effective permissions for a role are evaluated, see IAM JSON policy evaluation logic in the IAM User Guide.

" }, "PutRolePolicy":{ "name":"PutRolePolicy", @@ -1784,7 +1897,7 @@ {"shape":"UnmodifiableEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Adds or updates an inline policy document that is embedded in the specified IAM role.

When you embed an inline policy in a role, the inline policy is used as part of the role's access (permissions) policy. The role's trust policy is created at the same time as the role, using CreateRole. You can update a role's trust policy using UpdateAssumeRolePolicy. For more information about IAM roles, go to Using Roles to Delegate Permissions and Federate Identities.

A role can also have a managed policy attached to it. To attach a managed policy to a role, use AttachRolePolicy. To create a new managed policy, use CreatePolicy. For information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

For information about limits on the number of inline policies that you can embed with a role, see Limitations on IAM Entities in the IAM User Guide.

Because policy documents can be large, you should use POST rather than GET when calling PutRolePolicy. For general information about using the Query API with IAM, go to Making Query Requests in the IAM User Guide.

" + "documentation":"

Adds or updates an inline policy document that is embedded in the specified IAM role.

When you embed an inline policy in a role, the inline policy is used as part of the role's access (permissions) policy. The role's trust policy is created at the same time as the role, using CreateRole. You can update a role's trust policy using UpdateAssumeRolePolicy. For more information about IAM roles, see Using roles to delegate permissions and federate identities.

A role can also have a managed policy attached to it. To attach a managed policy to a role, use AttachRolePolicy. To create a new managed policy, use CreatePolicy. For information about policies, see Managed policies and inline policies in the IAM User Guide.

For information about the maximum number of inline policies that you can embed with a role, see IAM and STS quotas in the IAM User Guide.

Because policy documents can be large, you should use POST rather than GET when calling PutRolePolicy. For general information about using the Query API with IAM, see Making query requests in the IAM User Guide.

" }, "PutUserPermissionsBoundary":{ "name":"PutUserPermissionsBoundary", @@ -1799,7 +1912,7 @@ {"shape":"PolicyNotAttachableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Adds or updates the policy that is specified as the IAM user's permissions boundary. You can use an AWS managed policy or a customer managed policy to set the boundary for a user. Use the boundary to control the maximum permissions that the user can have. Setting a permissions boundary is an advanced feature that can affect the permissions for the user.

Policies that are used as permissions boundaries do not provide permissions. You must also attach a permissions policy to the user. To learn how the effective permissions for a user are evaluated, see IAM JSON Policy Evaluation Logic in the IAM User Guide.

" + "documentation":"

Adds or updates the policy that is specified as the IAM user's permissions boundary. You can use an AWS managed policy or a customer managed policy to set the boundary for a user. Use the boundary to control the maximum permissions that the user can have. Setting a permissions boundary is an advanced feature that can affect the permissions for the user.

Policies that are used as permissions boundaries do not provide permissions. You must also attach a permissions policy to the user. To learn how the effective permissions for a user are evaluated, see IAM JSON policy evaluation logic in the IAM User Guide.

" }, "PutUserPolicy":{ "name":"PutUserPolicy", @@ -1814,7 +1927,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Adds or updates an inline policy document that is embedded in the specified IAM user.

An IAM user can also have a managed policy attached to it. To attach a managed policy to a user, use AttachUserPolicy. To create a new managed policy, use CreatePolicy. For information about policies, see Managed Policies and Inline Policies in the IAM User Guide.

For information about limits on the number of inline policies that you can embed in a user, see Limitations on IAM Entities in the IAM User Guide.

Because policy documents can be large, you should use POST rather than GET when calling PutUserPolicy. For general information about using the Query API with IAM, go to Making Query Requests in the IAM User Guide.

" + "documentation":"

Adds or updates an inline policy document that is embedded in the specified IAM user.

An IAM user can also have a managed policy attached to it. To attach a managed policy to a user, use AttachUserPolicy. To create a new managed policy, use CreatePolicy. For information about policies, see Managed policies and inline policies in the IAM User Guide.

For information about the maximum number of inline policies that you can embed in a user, see IAM and STS quotas in the IAM User Guide.

Because policy documents can be large, you should use POST rather than GET when calling PutUserPolicy. For general information about using the Query API with IAM, see Making query requests in the IAM User Guide.

" }, "RemoveClientIDFromOpenIDConnectProvider":{ "name":"RemoveClientIDFromOpenIDConnectProvider", @@ -1843,7 +1956,7 @@ {"shape":"UnmodifiableEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Removes the specified IAM role from the specified EC2 instance profile.

Make sure that you do not have any Amazon EC2 instances running with the role you are about to remove from the instance profile. Removing a role from an instance profile that is associated with a running instance might break any applications running on the instance.

For more information about IAM roles, go to Working with Roles. For more information about instance profiles, go to About Instance Profiles.

" + "documentation":"

Removes the specified IAM role from the specified EC2 instance profile.

Make sure that you do not have any Amazon EC2 instances running with the role you are about to remove from the instance profile. Removing a role from an instance profile that is associated with a running instance might break any applications running on the instance.

For more information about IAM roles, see Working with roles. For more information about instance profiles, see About instance profiles.

" }, "RemoveUserFromGroup":{ "name":"RemoveUserFromGroup", @@ -1888,7 +2001,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Synchronizes the specified MFA device with its IAM resource object on the AWS servers.

For more information about creating and working with virtual MFA devices, go to Using a Virtual MFA Device in the IAM User Guide.

" + "documentation":"

Synchronizes the specified MFA device with its IAM resource object on the AWS servers.

For more information about creating and working with virtual MFA devices, see Using a virtual MFA device in the IAM User Guide.

" }, "SetDefaultPolicyVersion":{ "name":"SetDefaultPolicyVersion", @@ -1903,7 +2016,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Sets the specified version of the specified policy as the policy's default (operative) version.

This operation affects all users, groups, and roles that the policy is attached to. To list the users, groups, and roles that the policy is attached to, use the ListEntitiesForPolicy API.

For information about managed policies, see Managed Policies and Inline Policies in the IAM User Guide.

" + "documentation":"

Sets the specified version of the specified policy as the policy's default (operative) version.

This operation affects all users, groups, and roles that the policy is attached to. To list the users, groups, and roles that the policy is attached to, use ListEntitiesForPolicy.

For information about managed policies, see Managed policies and inline policies in the IAM User Guide.

" }, "SetSecurityTokenServicePreferences":{ "name":"SetSecurityTokenServicePreferences", @@ -1915,7 +2028,7 @@ "errors":[ {"shape":"ServiceFailureException"} ], - "documentation":"

Sets the specified version of the global endpoint token as the token version used for the AWS account.

By default, AWS Security Token Service (STS) is available as a global service, and all STS requests go to a single endpoint at https://sts.amazonaws.com. AWS recommends using Regional STS endpoints to reduce latency, build in redundancy, and increase session token availability. For information about Regional endpoints for STS, see AWS Regions and Endpoints in the AWS General Reference.

If you make an STS call to the global endpoint, the resulting session tokens might be valid in some Regions but not others. It depends on the version that is set in this operation. Version 1 tokens are valid only in AWS Regions that are available by default. These tokens do not work in manually enabled Regions, such as Asia Pacific (Hong Kong). Version 2 tokens are valid in all Regions. However, version 2 tokens are longer and might affect systems where you temporarily store tokens. For information, see Activating and Deactivating STS in an AWS Region in the IAM User Guide.

To view the current session token version, see the GlobalEndpointTokenVersion entry in the response of the GetAccountSummary operation.

" + "documentation":"

Sets the specified version of the global endpoint token as the token version used for the AWS account.

By default, AWS Security Token Service (STS) is available as a global service, and all STS requests go to a single endpoint at https://sts.amazonaws.com. AWS recommends using Regional STS endpoints to reduce latency, build in redundancy, and increase session token availability. For information about Regional endpoints for STS, see AWS AWS Security Token Service endpoints and quotas in the AWS General Reference.

If you make an STS call to the global endpoint, the resulting session tokens might be valid in some Regions but not others. It depends on the version that is set in this operation. Version 1 tokens are valid only in AWS Regions that are available by default. These tokens do not work in manually enabled Regions, such as Asia Pacific (Hong Kong). Version 2 tokens are valid in all Regions. However, version 2 tokens are longer and might affect systems where you temporarily store tokens. For information, see Activating and deactivating STS in an AWS region in the IAM User Guide.

To view the current session token version, see the GlobalEndpointTokenVersion entry in the response of the GetAccountSummary operation.

" }, "SimulateCustomPolicy":{ "name":"SimulateCustomPolicy", @@ -1932,7 +2045,7 @@ {"shape":"InvalidInputException"}, {"shape":"PolicyEvaluationException"} ], - "documentation":"

Simulate how a set of IAM policies and optionally a resource-based policy works with a list of API operations and AWS resources to determine the policies' effective permissions. The policies are provided as strings.

The simulation does not perform the API operations; it only checks the authorization to determine if the simulated policies allow or deny the operations.

If you want to simulate existing policies that are attached to an IAM user, group, or role, use SimulatePrincipalPolicy instead.

Context keys are variables that are maintained by AWS and its services and which provide details about the context of an API query request. You can use the Condition element of an IAM policy to evaluate context keys. To get the list of context keys that the policies require for correct simulation, use GetContextKeysForCustomPolicy.

If the output is long, you can use MaxItems and Marker parameters to paginate the results.

" + "documentation":"

Simulate how a set of IAM policies and optionally a resource-based policy works with a list of API operations and AWS resources to determine the policies' effective permissions. The policies are provided as strings.

The simulation does not perform the API operations; it only checks the authorization to determine if the simulated policies allow or deny the operations. You can simulate resources that don't exist in your account.

If you want to simulate existing policies that are attached to an IAM user, group, or role, use SimulatePrincipalPolicy instead.

Context keys are variables that are maintained by AWS and its services and which provide details about the context of an API query request. You can use the Condition element of an IAM policy to evaluate context keys. To get the list of context keys that the policies require for correct simulation, use GetContextKeysForCustomPolicy.

If the output is long, you can use MaxItems and Marker parameters to paginate the results.

For more information about using the policy simulator, see Testing IAM policies with the IAM policy simulator in the IAM User Guide.

" }, "SimulatePrincipalPolicy":{ "name":"SimulatePrincipalPolicy", @@ -1950,7 +2063,71 @@ {"shape":"InvalidInputException"}, {"shape":"PolicyEvaluationException"} ], - "documentation":"

Simulate how a set of IAM policies attached to an IAM entity works with a list of API operations and AWS resources to determine the policies' effective permissions. The entity can be an IAM user, group, or role. If you specify a user, then the simulation also includes all of the policies that are attached to groups that the user belongs to.

You can optionally include a list of one or more additional policies specified as strings to include in the simulation. If you want to simulate only policies specified as strings, use SimulateCustomPolicy instead.

You can also optionally include one resource-based policy to be evaluated with each of the resources included in the simulation.

The simulation does not perform the API operations; it only checks the authorization to determine if the simulated policies allow or deny the operations.

Note: This API discloses information about the permissions granted to other users. If you do not want users to see other user's permissions, then consider allowing them to use SimulateCustomPolicy instead.

Context keys are variables maintained by AWS and its services that provide details about the context of an API query request. You can use the Condition element of an IAM policy to evaluate context keys. To get the list of context keys that the policies require for correct simulation, use GetContextKeysForPrincipalPolicy.

If the output is long, you can use the MaxItems and Marker parameters to paginate the results.

" + "documentation":"

Simulate how a set of IAM policies attached to an IAM entity works with a list of API operations and AWS resources to determine the policies' effective permissions. The entity can be an IAM user, group, or role. If you specify a user, then the simulation also includes all of the policies that are attached to groups that the user belongs to. You can simulate resources that don't exist in your account.

You can optionally include a list of one or more additional policies specified as strings to include in the simulation. If you want to simulate only policies specified as strings, use SimulateCustomPolicy instead.

You can also optionally include one resource-based policy to be evaluated with each of the resources included in the simulation.

The simulation does not perform the API operations; it only checks the authorization to determine if the simulated policies allow or deny the operations.

Note: This operation discloses information about the permissions granted to other users. If you do not want users to see other user's permissions, then consider allowing them to use SimulateCustomPolicy instead.

Context keys are variables maintained by AWS and its services that provide details about the context of an API query request. You can use the Condition element of an IAM policy to evaluate context keys. To get the list of context keys that the policies require for correct simulation, use GetContextKeysForPrincipalPolicy.

If the output is long, you can use the MaxItems and Marker parameters to paginate the results.

For more information about using the policy simulator, see Testing IAM policies with the IAM policy simulator in the IAM User Guide.

" + }, + "TagInstanceProfile":{ + "name":"TagInstanceProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagInstanceProfileRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Adds one or more tags to an IAM instance profile. If a tag with the same key name already exists, then that tag is overwritten with the new value.

Each tag consists of a key name and an associated value. By assigning tags to your resources, you can do the following:

" + }, + "TagMFADevice":{ + "name":"TagMFADevice", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagMFADeviceRequest"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Adds one or more tags to an IAM virtual multi-factor authentication (MFA) device. If a tag with the same key name already exists, then that tag is overwritten with the new value.

A tag consists of a key name and an associated value. By assigning tags to your resources, you can do the following:

" + }, + "TagOpenIDConnectProvider":{ + "name":"TagOpenIDConnectProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagOpenIDConnectProviderRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidInputException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Adds one or more tags to an OpenID Connect (OIDC)-compatible identity provider. For more information about these providers, see About web identity federation. If a tag with the same key name already exists, then that tag is overwritten with the new value.

A tag consists of a key name and an associated value. By assigning tags to your resources, you can do the following:

" + }, + "TagPolicy":{ + "name":"TagPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagPolicyRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidInputException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Adds one or more tags to an IAM customer managed policy. If a tag with the same key name already exists, then that tag is overwritten with the new value.

A tag consists of a key name and an associated value. By assigning tags to your resources, you can do the following:

" }, "TagRole":{ "name":"TagRole", @@ -1966,7 +2143,39 @@ {"shape":"ConcurrentModificationException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Adds one or more tags to an IAM role. The role can be a regular role or a service-linked role. If a tag with the same key name already exists, then that tag is overwritten with the new value.

A tag consists of a key name and an associated value. By assigning tags to your resources, you can do the following:

For more information about tagging, see Tagging IAM Identities in the IAM User Guide.

" + "documentation":"

Adds one or more tags to an IAM role. The role can be a regular role or a service-linked role. If a tag with the same key name already exists, then that tag is overwritten with the new value.

A tag consists of a key name and an associated value. By assigning tags to your resources, you can do the following:

For more information about tagging, see Tagging IAM identities in the IAM User Guide.

" + }, + "TagSAMLProvider":{ + "name":"TagSAMLProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagSAMLProviderRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidInputException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Adds one or more tags to a Security Assertion Markup Language (SAML) identity provider. For more information about these providers, see About SAML 2.0-based federation . If a tag with the same key name already exists, then that tag is overwritten with the new value.

A tag consists of a key name and an associated value. By assigning tags to your resources, you can do the following:

" + }, + "TagServerCertificate":{ + "name":"TagServerCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagServerCertificateRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Adds one or more tags to an IAM server certificate. If a tag with the same key name already exists, then that tag is overwritten with the new value.

For certificates in a Region supported by AWS Certificate Manager (ACM), we recommend that you don't use IAM server certificates. Instead, use ACM to provision, manage, and deploy your server certificates. For more information about IAM server certificates, Working with server certificates in the IAM User Guide.

A tag consists of a key name and an associated value. By assigning tags to your resources, you can do the following:

" }, "TagUser":{ "name":"TagUser", @@ -1982,7 +2191,67 @@ {"shape":"ConcurrentModificationException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Adds one or more tags to an IAM user. If a tag with the same key name already exists, then that tag is overwritten with the new value.

A tag consists of a key name and an associated value. By assigning tags to your resources, you can do the following:

For more information about tagging, see Tagging IAM Identities in the IAM User Guide.

" + "documentation":"

Adds one or more tags to an IAM user. If a tag with the same key name already exists, then that tag is overwritten with the new value.

A tag consists of a key name and an associated value. By assigning tags to your resources, you can do the following:

For more information about tagging, see Tagging IAM identities in the IAM User Guide.

" + }, + "UntagInstanceProfile":{ + "name":"UntagInstanceProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagInstanceProfileRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Removes the specified tags from the IAM instance profile. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

" + }, + "UntagMFADevice":{ + "name":"UntagMFADevice", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagMFADeviceRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Removes the specified tags from the IAM virtual multi-factor authentication (MFA) device. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

" + }, + "UntagOpenIDConnectProvider":{ + "name":"UntagOpenIDConnectProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagOpenIDConnectProviderRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Removes the specified tags from the specified OpenID Connect (OIDC)-compatible identity provider in IAM. For more information about OIDC providers, see About web identity federation. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

" + }, + "UntagPolicy":{ + "name":"UntagPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagPolicyRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Removes the specified tags from the customer managed policy. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

" }, "UntagRole":{ "name":"UntagRole", @@ -1996,7 +2265,37 @@ {"shape":"ConcurrentModificationException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Removes the specified tags from the role. For more information about tagging, see Tagging IAM Identities in the IAM User Guide.

" + "documentation":"

Removes the specified tags from the role. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

" + }, + "UntagSAMLProvider":{ + "name":"UntagSAMLProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagSAMLProviderRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Removes the specified tags from the specified Security Assertion Markup Language (SAML) identity provider in IAM. For more information about these providers, see About web identity federation. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

" + }, + "UntagServerCertificate":{ + "name":"UntagServerCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagServerCertificateRequest"}, + "errors":[ + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Removes the specified tags from the IAM server certificate. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

For certificates in a Region supported by AWS Certificate Manager (ACM), we recommend that you don't use IAM server certificates. Instead, use ACM to provision, manage, and deploy your server certificates. For more information about IAM server certificates, Working with server certificates in the IAM User Guide.

" }, "UntagUser":{ "name":"UntagUser", @@ -2010,7 +2309,7 @@ {"shape":"ConcurrentModificationException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Removes the specified tags from the user. For more information about tagging, see Tagging IAM Identities in the IAM User Guide.

" + "documentation":"

Removes the specified tags from the user. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

" }, "UpdateAccessKey":{ "name":"UpdateAccessKey", @@ -2024,7 +2323,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Changes the status of the specified access key from Active to Inactive, or vice versa. This operation can be used to disable a user's key as part of a key rotation workflow.

If the UserName is not specified, the user name is determined implicitly based on the AWS access key ID used to sign the request. This operation works for access keys under the AWS account. Consequently, you can use this operation to manage AWS account root user credentials even if the AWS account has no associated users.

For information about rotating keys, see Managing Keys and Certificates in the IAM User Guide.

" + "documentation":"

Changes the status of the specified access key from Active to Inactive, or vice versa. This operation can be used to disable a user's key as part of a key rotation workflow.

If the UserName is not specified, the user name is determined implicitly based on the AWS access key ID used to sign the request. This operation works for access keys under the AWS account. Consequently, you can use this operation to manage AWS account root user credentials even if the AWS account has no associated users.

For information about rotating keys, see Managing keys and certificates in the IAM User Guide.

" }, "UpdateAccountPasswordPolicy":{ "name":"UpdateAccountPasswordPolicy", @@ -2039,7 +2338,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Updates the password policy settings for the AWS account.

For more information about using a password policy, see Managing an IAM Password Policy in the IAM User Guide.

" + "documentation":"

Updates the password policy settings for the AWS account.

For more information about using a password policy, see Managing an IAM password policy in the IAM User Guide.

" }, "UpdateAssumeRolePolicy":{ "name":"UpdateAssumeRolePolicy", @@ -2055,7 +2354,7 @@ {"shape":"UnmodifiableEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Updates the policy that grants an IAM entity permission to assume a role. This is typically referred to as the \"role trust policy\". For more information about roles, go to Using Roles to Delegate Permissions and Federate Identities.

" + "documentation":"

Updates the policy that grants an IAM entity permission to assume a role. This is typically referred to as the \"role trust policy\". For more information about roles, see Using roles to delegate permissions and federate identities.

" }, "UpdateGroup":{ "name":"UpdateGroup", @@ -2070,7 +2369,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Updates the name and/or the path of the specified IAM group.

You should understand the implications of changing a group's path or name. For more information, see Renaming Users and Groups in the IAM User Guide.

The person making the request (the principal), must have permission to change the role group with the old name and the new name. For example, to change the group named Managers to MGRs, the principal must have a policy that allows them to update both groups. If the principal has permission to update the Managers group, but not the MGRs group, then the update fails. For more information about permissions, see Access Management.

" + "documentation":"

Updates the name and/or the path of the specified IAM group.

You should understand the implications of changing a group's path or name. For more information, see Renaming users and groups in the IAM User Guide.

The person making the request (the principal), must have permission to change the role group with the old name and the new name. For example, to change the group named Managers to MGRs, the principal must have a policy that allows them to update both groups. If the principal has permission to update the Managers group, but not the MGRs group, then the update fails. For more information about permissions, see Access management.

" }, "UpdateLoginProfile":{ "name":"UpdateLoginProfile", @@ -2086,7 +2385,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Changes the password for the specified IAM user.

IAM users can change their own passwords by calling ChangePassword. For more information about modifying passwords, see Managing Passwords in the IAM User Guide.

" + "documentation":"

Changes the password for the specified IAM user. You can use the AWS CLI, the AWS API, or the Users page in the IAM console to change the password for any IAM user. Use ChangePassword to change your own password in the My Security Credentials page in the AWS Management Console.

For more information about modifying passwords, see Managing passwords in the IAM User Guide.

" }, "UpdateOpenIDConnectProviderThumbprint":{ "name":"UpdateOpenIDConnectProviderThumbprint", @@ -2167,7 +2466,7 @@ "errors":[ {"shape":"NoSuchEntityException"} ], - "documentation":"

Sets the status of an IAM user's SSH public key to active or inactive. SSH public keys that are inactive cannot be used for authentication. This operation can be used to disable a user's SSH public key as part of a key rotation work flow.

The SSH public key affected by this operation is used only for authenticating the associated IAM user to an AWS CodeCommit repository. For more information about using SSH keys to authenticate to an AWS CodeCommit repository, see Set up AWS CodeCommit for SSH Connections in the AWS CodeCommit User Guide.

" + "documentation":"

Sets the status of an IAM user's SSH public key to active or inactive. SSH public keys that are inactive cannot be used for authentication. This operation can be used to disable a user's SSH public key as part of a key rotation work flow.

The SSH public key affected by this operation is used only for authenticating the associated IAM user to an AWS CodeCommit repository. For more information about using SSH keys to authenticate to an AWS CodeCommit repository, see Set up AWS CodeCommit for SSH connections in the AWS CodeCommit User Guide.

" }, "UpdateServerCertificate":{ "name":"UpdateServerCertificate", @@ -2182,7 +2481,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Updates the name and/or the path of the specified server certificate stored in IAM.

For more information about working with server certificates, see Working with Server Certificates in the IAM User Guide. This topic also includes a list of AWS services that can use the server certificates that you manage with IAM.

You should understand the implications of changing a server certificate's path or name. For more information, see Renaming a Server Certificate in the IAM User Guide.

The person making the request (the principal), must have permission to change the server certificate with the old name and the new name. For example, to change the certificate named ProductionCert to ProdCert, the principal must have a policy that allows them to update both certificates. If the principal has permission to update the ProductionCert group, but not the ProdCert certificate, then the update fails. For more information about permissions, see Access Management in the IAM User Guide.

" + "documentation":"

Updates the name and/or the path of the specified server certificate stored in IAM.

For more information about working with server certificates, see Working with server certificates in the IAM User Guide. This topic also includes a list of AWS services that can use the server certificates that you manage with IAM.

You should understand the implications of changing a server certificate's path or name. For more information, see Renaming a server certificate in the IAM User Guide.

The person making the request (the principal), must have permission to change the server certificate with the old name and the new name. For example, to change the certificate named ProductionCert to ProdCert, the principal must have a policy that allows them to update both certificates. If the principal has permission to update the ProductionCert group, but not the ProdCert certificate, then the update fails. For more information about permissions, see Access management in the IAM User Guide.

" }, "UpdateServiceSpecificCredential":{ "name":"UpdateServiceSpecificCredential", @@ -2225,7 +2524,7 @@ {"shape":"ConcurrentModificationException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Updates the name and/or the path of the specified IAM user.

You should understand the implications of changing an IAM user's path or name. For more information, see Renaming an IAM User and Renaming an IAM Group in the IAM User Guide.

To change a user name, the requester must have appropriate permissions on both the source object and the target object. For example, to change Bob to Robert, the entity making the request must have permission on Bob and Robert, or must have permission on all (*). For more information about permissions, see Permissions and Policies.

" + "documentation":"

Updates the name and/or the path of the specified IAM user.

You should understand the implications of changing an IAM user's path or name. For more information, see Renaming an IAM user and Renaming an IAM group in the IAM User Guide.

To change a user name, the requester must have appropriate permissions on both the source object and the target object. For example, to change Bob to Robert, the entity making the request must have permission on Bob and Robert, or must have permission on all (*). For more information about permissions, see Permissions and policies.

" }, "UploadSSHPublicKey":{ "name":"UploadSSHPublicKey", @@ -2245,7 +2544,7 @@ {"shape":"DuplicateSSHPublicKeyException"}, {"shape":"UnrecognizedPublicKeyEncodingException"} ], - "documentation":"

Uploads an SSH public key and associates it with the specified IAM user.

The SSH public key uploaded by this operation can be used only for authenticating the associated IAM user to an AWS CodeCommit repository. For more information about using SSH keys to authenticate to an AWS CodeCommit repository, see Set up AWS CodeCommit for SSH Connections in the AWS CodeCommit User Guide.

" + "documentation":"

Uploads an SSH public key and associates it with the specified IAM user.

The SSH public key uploaded by this operation can be used only for authenticating the associated IAM user to an AWS CodeCommit repository. For more information about using SSH keys to authenticate to an AWS CodeCommit repository, see Set up AWS CodeCommit for SSH connections in the AWS CodeCommit User Guide.

" }, "UploadServerCertificate":{ "name":"UploadServerCertificate", @@ -2260,12 +2559,14 @@ }, "errors":[ {"shape":"LimitExceededException"}, + {"shape":"InvalidInputException"}, {"shape":"EntityAlreadyExistsException"}, {"shape":"MalformedCertificateException"}, {"shape":"KeyPairMismatchException"}, + {"shape":"ConcurrentModificationException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Uploads a server certificate entity for the AWS account. The server certificate entity includes a public key certificate, a private key, and an optional certificate chain, which should all be PEM-encoded.

We recommend that you use AWS Certificate Manager to provision, manage, and deploy your server certificates. With ACM you can request a certificate, deploy it to AWS resources, and let ACM handle certificate renewals for you. Certificates provided by ACM are free. For more information about using ACM, see the AWS Certificate Manager User Guide.

For more information about working with server certificates, see Working with Server Certificates in the IAM User Guide. This topic includes a list of AWS services that can use the server certificates that you manage with IAM.

For information about the number of server certificates you can upload, see Limitations on IAM Entities and Objects in the IAM User Guide.

Because the body of the public key certificate, private key, and the certificate chain can be large, you should use POST rather than GET when calling UploadServerCertificate. For information about setting up signatures and authorization through the API, go to Signing AWS API Requests in the AWS General Reference. For general information about using the Query API with IAM, go to Calling the API by Making HTTP Query Requests in the IAM User Guide.

" + "documentation":"

Uploads a server certificate entity for the AWS account. The server certificate entity includes a public key certificate, a private key, and an optional certificate chain, which should all be PEM-encoded.

We recommend that you use AWS Certificate Manager to provision, manage, and deploy your server certificates. With ACM you can request a certificate, deploy it to AWS resources, and let ACM handle certificate renewals for you. Certificates provided by ACM are free. For more information about using ACM, see the AWS Certificate Manager User Guide.

For more information about working with server certificates, see Working with server certificates in the IAM User Guide. This topic includes a list of AWS services that can use the server certificates that you manage with IAM.

For information about the number of server certificates you can upload, see IAM and STS quotas in the IAM User Guide.

Because the body of the public key certificate, private key, and the certificate chain can be large, you should use POST rather than GET when calling UploadServerCertificate. For information about setting up signatures and authorization through the API, see Signing AWS API requests in the AWS General Reference. For general information about using the Query API with IAM, see Calling the API by making HTTP query requests in the IAM User Guide.

" }, "UploadSigningCertificate":{ "name":"UploadSigningCertificate", @@ -2287,7 +2588,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Uploads an X.509 signing certificate and associates it with the specified IAM user. Some AWS services use X.509 signing certificates to validate requests that are signed with a corresponding private key. When you upload the certificate, its default status is Active.

If the UserName is not specified, the IAM user name is determined implicitly based on the AWS access key ID used to sign the request. This operation works for access keys under the AWS account. Consequently, you can use this operation to manage AWS account root user credentials even if the AWS account has no associated users.

Because the body of an X.509 certificate can be large, you should use POST rather than GET when calling UploadSigningCertificate. For information about setting up signatures and authorization through the API, go to Signing AWS API Requests in the AWS General Reference. For general information about using the Query API with IAM, go to Making Query Requests in the IAM User Guide.

" + "documentation":"

Uploads an X.509 signing certificate and associates it with the specified IAM user. Some AWS services require you to use certificates to validate requests that are signed with a corresponding private key. When you upload the certificate, its default status is Active.

For information about when you would use an X.509 signing certificate, see Managing server certificates in IAM in the IAM User Guide.

If the UserName is not specified, the IAM user name is determined implicitly based on the AWS access key ID used to sign the request. This operation works for access keys under the AWS account. Consequently, you can use this operation to manage AWS account root user credentials even if the AWS account has no associated users.

Because the body of an X.509 certificate can be large, you should use POST rather than GET when calling UploadSigningCertificate. For information about setting up signatures and authorization through the API, see Signing AWS API requests in the AWS General Reference. For general information about using the Query API with IAM, see Making query requests in the IAM User Guide.

" } }, "shapes":{ @@ -2311,7 +2612,7 @@ }, "ServiceNamespace":{ "shape":"serviceNamespaceType", - "documentation":"

The namespace of the service in which access was attempted.

To learn the service namespace of a service, go to Actions, Resources, and Condition Keys for AWS Services in the IAM User Guide. Choose the name of the service to view details for that service. In the first paragraph, find the service prefix. For example, (service prefix: a4b). For more information about service namespaces, see AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The namespace of the service in which access was attempted.

To learn the service namespace of a service, see Actions, resources, and condition keys for AWS services in the Service Authorization Reference. Choose the name of the service to view details for that service. In the first paragraph, find the service prefix. For example, (service prefix: a4b). For more information about service namespaces, see AWS service namespaces in the AWS General Reference.

" }, "Region":{ "shape":"stringType", @@ -2386,7 +2687,7 @@ }, "Region":{ "shape":"stringType", - "documentation":"

The AWS Region where this access key was most recently used. The value for this field is \"N/A\" in the following situations:

For more information about AWS Regions, see Regions and Endpoints in the Amazon Web Services General Reference.

" + "documentation":"

The AWS Region where this access key was most recently used. The value for this field is \"N/A\" in the following situations:

For more information about AWS Regions, see Regions and endpoints in the Amazon Web Services General Reference.

" } }, "documentation":"

Contains information about the last time an AWS access key was used since IAM began tracking this information on April 22, 2015.

This data type is used as a response element in the GetAccessKeyLastUsed operation.

" @@ -2490,7 +2791,7 @@ }, "PolicyArn":{ "shape":"arnType", - "documentation":"

The Amazon Resource Name (ARN) of the IAM policy you want to attach.

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM policy you want to attach.

For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference.

" } } }, @@ -2507,7 +2808,7 @@ }, "PolicyArn":{ "shape":"arnType", - "documentation":"

The Amazon Resource Name (ARN) of the IAM policy you want to attach.

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM policy you want to attach.

For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference.

" } } }, @@ -2524,7 +2825,7 @@ }, "PolicyArn":{ "shape":"arnType", - "documentation":"

The Amazon Resource Name (ARN) of the IAM policy you want to attach.

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM policy you want to attach.

For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference.

" } } }, @@ -2540,7 +2841,7 @@ "documentation":"

The ARN of the policy used to set the permissions boundary for the user or role.

" } }, - "documentation":"

Contains information about an attached permissions boundary.

An attached permissions boundary is a managed policy that has been attached to a user or role to set the permissions boundary.

For more information about permissions boundaries, see Permissions Boundaries for IAM Identities in the IAM User Guide.

" + "documentation":"

Contains information about an attached permissions boundary.

An attached permissions boundary is a managed policy that has been attached to a user or role to set the permissions boundary.

For more information about permissions boundaries, see Permissions boundaries for IAM identities in the IAM User Guide.

" }, "AttachedPolicy":{ "type":"structure", @@ -2551,7 +2852,7 @@ }, "PolicyArn":{"shape":"arnType"} }, - "documentation":"

Contains information about an attached policy.

An attached policy is a managed policy that has been attached to a user, group, or role. This data type is used as a response element in the ListAttachedGroupPolicies, ListAttachedRolePolicies, ListAttachedUserPolicies, and GetAccountAuthorizationDetails operations.

For more information about managed policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

" + "documentation":"

Contains information about an attached policy.

An attached policy is a managed policy that has been attached to a user, group, or role. This data type is used as a response element in the ListAttachedGroupPolicies, ListAttachedRolePolicies, ListAttachedUserPolicies, and GetAccountAuthorizationDetails operations.

For more information about managed policies, refer to Managed policies and inline policies in the IAM User Guide.

" }, "BootstrapDatum":{ "type":"blob", @@ -2678,7 +2979,7 @@ "members":{ "Path":{ "shape":"pathType", - "documentation":"

The path to the group. For more information about paths, see IAM Identifiers in the IAM User Guide.

This parameter is optional. If it is not included, it defaults to a slash (/).

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" + "documentation":"

The path to the group. For more information about paths, see IAM identifiers in the IAM User Guide.

This parameter is optional. If it is not included, it defaults to a slash (/).

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" }, "GroupName":{ "shape":"groupNameType", @@ -2708,6 +3009,10 @@ "Path":{ "shape":"pathType", "documentation":"

The path to the instance profile. For more information about paths, see IAM Identifiers in the IAM User Guide.

This parameter is optional. If it is not included, it defaults to a slash (/).

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" + }, + "Tags":{ + "shape":"tagListType", + "documentation":"

A list of tags that you want to attach to the newly created IAM instance profile. Each tag consists of a key name and an associated value. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

If any one of the tags is invalid or if you exceed the allowed maximum number of tags, then the entire request fails and the resource is not created.

" } } }, @@ -2771,7 +3076,11 @@ }, "ThumbprintList":{ "shape":"thumbprintListType", - "documentation":"

A list of server certificate thumbprints for the OpenID Connect (OIDC) identity provider's server certificates. Typically this list includes only one entry. However, IAM lets you have up to five thumbprints for an OIDC provider. This lets you maintain multiple thumbprints if the identity provider is rotating certificates.

The server certificate thumbprint is the hex-encoded SHA-1 hash value of the X.509 certificate used by the domain where the OpenID Connect provider makes its keys available. It is always a 40-character string.

You must provide at least one thumbprint when creating an IAM OIDC provider. For example, assume that the OIDC provider is server.example.com and the provider stores its keys at https://keys.server.example.com/openid-connect. In that case, the thumbprint string would be the hex-encoded SHA-1 hash value of the certificate used by https://keys.server.example.com.

For more information about obtaining the OIDC provider's thumbprint, see Obtaining the Thumbprint for an OpenID Connect Provider in the IAM User Guide.

" + "documentation":"

A list of server certificate thumbprints for the OpenID Connect (OIDC) identity provider's server certificates. Typically this list includes only one entry. However, IAM lets you have up to five thumbprints for an OIDC provider. This lets you maintain multiple thumbprints if the identity provider is rotating certificates.

The server certificate thumbprint is the hex-encoded SHA-1 hash value of the X.509 certificate used by the domain where the OpenID Connect provider makes its keys available. It is always a 40-character string.

You must provide at least one thumbprint when creating an IAM OIDC provider. For example, assume that the OIDC provider is server.example.com and the provider stores its keys at https://keys.server.example.com/openid-connect. In that case, the thumbprint string would be the hex-encoded SHA-1 hash value of the certificate used by https://keys.server.example.com.

For more information about obtaining the OIDC provider's thumbprint, see Obtaining the thumbprint for an OpenID Connect provider in the IAM User Guide.

" + }, + "Tags":{ + "shape":"tagListType", + "documentation":"

A list of tags that you want to attach to the new IAM OpenID Connect (OIDC) provider. Each tag consists of a key name and an associated value. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

If any one of the tags is invalid or if you exceed the allowed maximum number of tags, then the entire request fails and the resource is not created.

" } } }, @@ -2781,6 +3090,10 @@ "OpenIDConnectProviderArn":{ "shape":"arnType", "documentation":"

The Amazon Resource Name (ARN) of the new IAM OpenID Connect provider that is created. For more information, see OpenIDConnectProviderListEntry.

" + }, + "Tags":{ + "shape":"tagListType", + "documentation":"

A list of tags that are attached to the new IAM OIDC provider. The returned list of tags is sorted by tag key. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

" } }, "documentation":"

Contains the response to a successful CreateOpenIDConnectProvider request.

" @@ -2798,7 +3111,7 @@ }, "Path":{ "shape":"policyPathType", - "documentation":"

The path for the policy.

For more information about paths, see IAM Identifiers in the IAM User Guide.

This parameter is optional. If it is not included, it defaults to a slash (/).

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" + "documentation":"

The path for the policy.

For more information about paths, see IAM identifiers in the IAM User Guide.

This parameter is optional. If it is not included, it defaults to a slash (/).

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" }, "PolicyDocument":{ "shape":"policyDocumentType", @@ -2807,6 +3120,10 @@ "Description":{ "shape":"policyDescriptionType", "documentation":"

A friendly description of the policy.

Typically used to store information about the permissions defined in the policy. For example, \"Grants access to production DynamoDB tables.\"

The policy description is immutable. After a value is assigned, it cannot be changed.

" + }, + "Tags":{ + "shape":"tagListType", + "documentation":"

A list of tags that you want to attach to the new IAM customer managed policy. Each tag consists of a key name and an associated value. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

If any one of the tags is invalid or if you exceed the allowed maximum number of tags, then the entire request fails and the resource is not created.

" } } }, @@ -2829,7 +3146,7 @@ "members":{ "PolicyArn":{ "shape":"arnType", - "documentation":"

The Amazon Resource Name (ARN) of the IAM policy to which you want to add a new version.

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM policy to which you want to add a new version.

For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference.

" }, "PolicyDocument":{ "shape":"policyDocumentType", @@ -2837,7 +3154,7 @@ }, "SetAsDefault":{ "shape":"booleanType", - "documentation":"

Specifies whether to set this version as the policy's default version.

When this parameter is true, the new policy version becomes the operative version. That is, it becomes the version that is in effect for the IAM users, groups, and roles that the policy is attached to.

For more information about managed policy versions, see Versioning for Managed Policies in the IAM User Guide.

" + "documentation":"

Specifies whether to set this version as the policy's default version.

When this parameter is true, the new policy version becomes the operative version. That is, it becomes the version that is in effect for the IAM users, groups, and roles that the policy is attached to.

For more information about managed policy versions, see Versioning for managed policies in the IAM User Guide.

" } } }, @@ -2876,7 +3193,7 @@ }, "MaxSessionDuration":{ "shape":"roleMaxSessionDurationType", - "documentation":"

The maximum session duration (in seconds) that you want to set for the specified role. If you do not specify a value for this setting, the default maximum of one hour is applied. This setting can have a value from 1 hour to 12 hours.

Anyone who assumes the role from the AWS CLI or API can use the DurationSeconds API parameter or the duration-seconds CLI parameter to request a longer session. The MaxSessionDuration setting determines the maximum duration that can be requested using the DurationSeconds parameter. If users don't specify a value for the DurationSeconds parameter, their security credentials are valid for one hour by default. This applies when you use the AssumeRole* API operations or the assume-role* CLI operations but does not apply when you use those operations to create a console URL. For more information, see Using IAM Roles in the IAM User Guide.

" + "documentation":"

The maximum session duration (in seconds) that you want to set for the specified role. If you do not specify a value for this setting, the default maximum of one hour is applied. This setting can have a value from 1 hour to 12 hours.

Anyone who assumes the role from the AWS CLI or API can use the DurationSeconds API parameter or the duration-seconds CLI parameter to request a longer session. The MaxSessionDuration setting determines the maximum duration that can be requested using the DurationSeconds parameter. If users don't specify a value for the DurationSeconds parameter, their security credentials are valid for one hour by default. This applies when you use the AssumeRole* API operations or the assume-role* CLI operations but does not apply when you use those operations to create a console URL. For more information, see Using IAM roles in the IAM User Guide.

" }, "PermissionsBoundary":{ "shape":"arnType", @@ -2884,7 +3201,7 @@ }, "Tags":{ "shape":"tagListType", - "documentation":"

A list of tags that you want to attach to the newly created role. Each tag consists of a key name and an associated value. For more information about tagging, see Tagging IAM Identities in the IAM User Guide.

If any one of the tags is invalid or if you exceed the allowed number of tags per role, then the entire request fails and the role is not created.

" + "documentation":"

A list of tags that you want to attach to the new role. Each tag consists of a key name and an associated value. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

If any one of the tags is invalid or if you exceed the allowed maximum number of tags, then the entire request fails and the resource is not created.

" } } }, @@ -2908,11 +3225,15 @@ "members":{ "SAMLMetadataDocument":{ "shape":"SAMLMetadataDocumentType", - "documentation":"

An XML document generated by an identity provider (IdP) that supports SAML 2.0. The document includes the issuer's name, expiration information, and keys that can be used to validate the SAML authentication response (assertions) that are received from the IdP. You must generate the metadata document using the identity management software that is used as your organization's IdP.

For more information, see About SAML 2.0-based Federation in the IAM User Guide

" + "documentation":"

An XML document generated by an identity provider (IdP) that supports SAML 2.0. The document includes the issuer's name, expiration information, and keys that can be used to validate the SAML authentication response (assertions) that are received from the IdP. You must generate the metadata document using the identity management software that is used as your organization's IdP.

For more information, see About SAML 2.0-based federation in the IAM User Guide

" }, "Name":{ "shape":"SAMLProviderNameType", "documentation":"

The name of the provider to create.

This parameter allows (through its regex pattern) a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: _+=,.@-

" + }, + "Tags":{ + "shape":"tagListType", + "documentation":"

A list of tags that you want to attach to the new IAM SAML provider. Each tag consists of a key name and an associated value. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

If any one of the tags is invalid or if you exceed the allowed maximum number of tags, then the entire request fails and the resource is not created.

" } } }, @@ -2922,6 +3243,10 @@ "SAMLProviderArn":{ "shape":"arnType", "documentation":"

The Amazon Resource Name (ARN) of the new SAML provider resource in IAM.

" + }, + "Tags":{ + "shape":"tagListType", + "documentation":"

A list of tags that are attached to the new IAM SAML provider. The returned list of tags is sorted by tag key. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

" } }, "documentation":"

Contains the response to a successful CreateSAMLProvider request.

" @@ -2932,7 +3257,7 @@ "members":{ "AWSServiceName":{ "shape":"groupNameType", - "documentation":"

The service principal for the AWS service to which this role is attached. You use a string similar to a URL but without the http:// in front. For example: elasticbeanstalk.amazonaws.com.

Service principals are unique and case-sensitive. To find the exact service principal for your service-linked role, see AWS Services That Work with IAM in the IAM User Guide. Look for the services that have Yes in the Service-Linked Role column. Choose the Yes link to view the service-linked role documentation for that service.

" + "documentation":"

The service principal for the AWS service to which this role is attached. You use a string similar to a URL but without the http:// in front. For example: elasticbeanstalk.amazonaws.com.

Service principals are unique and case-sensitive. To find the exact service principal for your service-linked role, see AWS services that work with IAM in the IAM User Guide. Look for the services that have Yes in the Service-Linked Role column. Choose the Yes link to view the service-linked role documentation for that service.

" }, "Description":{ "shape":"roleDescriptionType", @@ -2985,7 +3310,7 @@ "members":{ "Path":{ "shape":"pathType", - "documentation":"

The path for the user name. For more information about paths, see IAM Identifiers in the IAM User Guide.

This parameter is optional. If it is not included, it defaults to a slash (/).

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" + "documentation":"

The path for the user name. For more information about paths, see IAM identifiers in the IAM User Guide.

This parameter is optional. If it is not included, it defaults to a slash (/).

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" }, "UserName":{ "shape":"userNameType", @@ -2997,7 +3322,7 @@ }, "Tags":{ "shape":"tagListType", - "documentation":"

A list of tags that you want to attach to the newly created user. Each tag consists of a key name and an associated value. For more information about tagging, see Tagging IAM Identities in the IAM User Guide.

If any one of the tags is invalid or if you exceed the allowed number of tags per user, then the entire request fails and the user is not created.

" + "documentation":"

A list of tags that you want to attach to the new user. Each tag consists of a key name and an associated value. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

If any one of the tags is invalid or if you exceed the allowed maximum number of tags, then the entire request fails and the resource is not created.

" } } }, @@ -3017,11 +3342,15 @@ "members":{ "Path":{ "shape":"pathType", - "documentation":"

The path for the virtual MFA device. For more information about paths, see IAM Identifiers in the IAM User Guide.

This parameter is optional. If it is not included, it defaults to a slash (/).

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" + "documentation":"

The path for the virtual MFA device. For more information about paths, see IAM identifiers in the IAM User Guide.

This parameter is optional. If it is not included, it defaults to a slash (/).

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

" }, "VirtualMFADeviceName":{ "shape":"virtualMFADeviceName", "documentation":"

The name of the virtual MFA device. Use with path to uniquely identify a virtual MFA device.

This parameter allows (through its regex pattern) a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: _+=,.@-

" + }, + "Tags":{ + "shape":"tagListType", + "documentation":"

A list of tags that you want to attach to the new IAM virtual MFA device. Each tag consists of a key name and an associated value. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

If any one of the tags is invalid or if you exceed the allowed maximum number of tags, then the entire request fails and the resource is not created.

" } } }, @@ -3041,7 +3370,7 @@ "members":{ "message":{"shape":"credentialReportExpiredExceptionMessage"} }, - "documentation":"

The request was rejected because the most recent credential report has expired. To generate a new credential report, use GenerateCredentialReport. For more information about credential report expiration, see Getting Credential Reports in the IAM User Guide.

", + "documentation":"

The request was rejected because the most recent credential report has expired. To generate a new credential report, use GenerateCredentialReport. For more information about credential report expiration, see Getting credential reports in the IAM User Guide.

", "error":{ "code":"ReportExpired", "httpStatusCode":410, @@ -3192,7 +3521,7 @@ "members":{ "PolicyArn":{ "shape":"arnType", - "documentation":"

The Amazon Resource Name (ARN) of the IAM policy you want to delete.

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM policy you want to delete.

For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference.

" } } }, @@ -3205,11 +3534,11 @@ "members":{ "PolicyArn":{ "shape":"arnType", - "documentation":"

The Amazon Resource Name (ARN) of the IAM policy from which you want to delete a version.

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM policy from which you want to delete a version.

For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference.

" }, "VersionId":{ "shape":"policyVersionIdType", - "documentation":"

The policy version to delete.

This parameter allows (through its regex pattern) a string of characters that consists of the lowercase letter 'v' followed by one or two digits, and optionally followed by a period '.' and a string of letters and digits.

For more information about managed policy versions, see Versioning for Managed Policies in the IAM User Guide.

" + "documentation":"

The policy version to delete.

This parameter allows (through its regex pattern) a string of characters that consists of the lowercase letter 'v' followed by one or two digits, and optionally followed by a period '.' and a string of letters and digits.

For more information about managed policy versions, see Versioning for managed policies in the IAM User Guide.

" } } }, @@ -3423,7 +3752,7 @@ }, "PolicyArn":{ "shape":"arnType", - "documentation":"

The Amazon Resource Name (ARN) of the IAM policy you want to detach.

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM policy you want to detach.

For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference.

" } } }, @@ -3440,7 +3769,7 @@ }, "PolicyArn":{ "shape":"arnType", - "documentation":"

The Amazon Resource Name (ARN) of the IAM policy you want to detach.

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM policy you want to detach.

For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference.

" } } }, @@ -3457,7 +3786,7 @@ }, "PolicyArn":{ "shape":"arnType", - "documentation":"

The Amazon Resource Name (ARN) of the IAM policy you want to detach.

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM policy you want to detach.

For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference.

" } } }, @@ -3566,7 +3895,7 @@ }, "Path":{ "shape":"pathType", - "documentation":"

The path to the entity (user or role). For more information about paths, see IAM Identifiers in the IAM User Guide.

" + "documentation":"

The path to the entity (user or role). For more information about paths, see IAM identifiers in the IAM User Guide.

" } }, "documentation":"

Contains details about the specified entity (user or role).

This data type is an element of the EntityDetails object.

" @@ -3659,7 +3988,7 @@ }, "EvalDecisionDetails":{ "shape":"EvalDecisionDetailsType", - "documentation":"

Additional details about the results of the cross-account evaluation decision. This parameter is populated for only cross-account simulations. It contains a brief summary of how each policy type contributes to the final evaluation decision.

If the simulation evaluates policies within the same account and includes a resource ARN, then the parameter is present but the response is empty. If the simulation evaluates policies within the same account and specifies all resources (*), then the parameter is not returned.

When you make a cross-account request, AWS evaluates the request in the trusting account and the trusted account. The request is allowed only if both evaluations return true. For more information about how policies are evaluated, see Evaluating Policies Within a Single Account.

If an AWS Organizations SCP included in the evaluation denies access, the simulation ends. In this case, policy evaluation does not proceed any further and this parameter is not returned.

" + "documentation":"

Additional details about the results of the cross-account evaluation decision. This parameter is populated for only cross-account simulations. It contains a brief summary of how each policy type contributes to the final evaluation decision.

If the simulation evaluates policies within the same account and includes a resource ARN, then the parameter is present but the response is empty. If the simulation evaluates policies within the same account and specifies all resources (*), then the parameter is not returned.

When you make a cross-account request, AWS evaluates the request in the trusting account and the trusted account. The request is allowed only if both evaluations return true. For more information about how policies are evaluated, see Evaluating policies within a single account.

If an AWS Organizations SCP included in the evaluation denies access, the simulation ends. In this case, policy evaluation does not proceed any further and this parameter is not returned.

" }, "ResourceSpecificResults":{ "shape":"ResourceSpecificResultListType", @@ -3850,7 +4179,7 @@ "members":{ "PolicySourceArn":{ "shape":"arnType", - "documentation":"

The ARN of a user, group, or role whose policies contain the context keys that you want listed. If you specify a user, the list includes context keys that are found in all policies that are attached to the user. The list also includes all groups that the user is a member of. If you pick a group or a role, then it includes only those context keys that are found in policies attached to that entity. Note that all parameters are shown in unencoded form here for clarity, but must be URL encoded to be included as a part of a real HTML request.

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The ARN of a user, group, or role whose policies contain the context keys that you want listed. If you specify a user, the list includes context keys that are found in all policies that are attached to the user. The list also includes all groups that the user is a member of. If you pick a group or a role, then it includes only those context keys that are found in policies attached to that entity. Note that all parameters are shown in unencoded form here for clarity, but must be URL encoded to be included as a part of a real HTML request.

For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference.

" }, "PolicyInputList":{ "shape":"SimulationPolicyListType", @@ -4008,7 +4337,7 @@ "members":{ "OpenIDConnectProviderArn":{ "shape":"arnType", - "documentation":"

The Amazon Resource Name (ARN) of the OIDC provider resource object in IAM to get information for. You can get a list of OIDC provider resource ARNs by using the ListOpenIDConnectProviders operation.

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The Amazon Resource Name (ARN) of the OIDC provider resource object in IAM to get information for. You can get a list of OIDC provider resource ARNs by using the ListOpenIDConnectProviders operation.

For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference.

" } } }, @@ -4030,6 +4359,10 @@ "CreateDate":{ "shape":"dateType", "documentation":"

The date and time when the IAM OIDC provider resource object was created in the AWS account.

" + }, + "Tags":{ + "shape":"tagListType", + "documentation":"

A list of tags that are attached to the specified IAM OIDC provider. The returned list of tags is sorted by tag key. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

" } }, "documentation":"

Contains the response to a successful GetOpenIDConnectProvider request.

" @@ -4104,7 +4437,7 @@ "members":{ "PolicyArn":{ "shape":"arnType", - "documentation":"

The Amazon Resource Name (ARN) of the managed policy that you want information about.

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The Amazon Resource Name (ARN) of the managed policy that you want information about.

For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference.

" } } }, @@ -4127,7 +4460,7 @@ "members":{ "PolicyArn":{ "shape":"arnType", - "documentation":"

The Amazon Resource Name (ARN) of the managed policy that you want information about.

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The Amazon Resource Name (ARN) of the managed policy that you want information about.

For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference.

" }, "VersionId":{ "shape":"policyVersionIdType", @@ -4212,7 +4545,7 @@ "members":{ "SAMLProviderArn":{ "shape":"arnType", - "documentation":"

The Amazon Resource Name (ARN) of the SAML provider resource object in IAM to get information about.

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The Amazon Resource Name (ARN) of the SAML provider resource object in IAM to get information about.

For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference.

" } } }, @@ -4230,6 +4563,10 @@ "ValidUntil":{ "shape":"dateType", "documentation":"

The expiration date and time for the SAML provider.

" + }, + "Tags":{ + "shape":"tagListType", + "documentation":"

A list of tags that are attached to the specified IAM SAML provider. The returned list of tags is sorted by tag key. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

" } }, "documentation":"

Contains the response to a successful GetSAMLProvider request.

" @@ -4361,7 +4698,7 @@ }, "ServiceNamespace":{ "shape":"serviceNamespaceType", - "documentation":"

The service namespace for an AWS service. Provide the service namespace to learn when the IAM entity last attempted to access the specified service.

To learn the service namespace for a service, go to Actions, Resources, and Condition Keys for AWS Services in the IAM User Guide. Choose the name of the service to view details for that service. In the first paragraph, find the service prefix. For example, (service prefix: a4b). For more information about service namespaces, see AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The service namespace for an AWS service. Provide the service namespace to learn when the IAM entity last attempted to access the specified service.

To learn the service namespace for a service, see Actions, resources, and condition keys for AWS services in the IAM User Guide. Choose the name of the service to view details for that service. In the first paragraph, find the service prefix. For example, (service prefix: a4b). For more information about service namespaces, see AWS service namespaces in the AWS General Reference.

" }, "MaxItems":{ "shape":"maxItemsType", @@ -4491,7 +4828,7 @@ "members":{ "User":{ "shape":"User", - "documentation":"

A structure containing details about the IAM user.

Due to a service issue, password last used data does not include password use from May 3, 2018 22:50 PDT to May 23, 2018 14:08 PDT. This affects last sign-in dates shown in the IAM console and password last used dates in the IAM credential report, and returned by this GetUser API. If users signed in during the affected time, the password last used date that is returned is the date the user last signed in before May 3, 2018. For users that signed in after May 23, 2018 14:08 PDT, the returned password last used date is accurate.

You can use password last used information to identify unused credentials for deletion. For example, you might delete users who did not sign in to AWS in the last 90 days. In cases like this, we recommend that you adjust your evaluation window to include dates after May 23, 2018. Alternatively, if your users use access keys to access AWS programmatically you can refer to access key last used information because it is accurate for all dates.

" + "documentation":"

A structure containing details about the IAM user.

Due to a service issue, password last used data does not include password use from May 3, 2018 22:50 PDT to May 23, 2018 14:08 PDT. This affects last sign-in dates shown in the IAM console and password last used dates in the IAM credential report, and returned by this operation. If users signed in during the affected time, the password last used date that is returned is the date the user last signed in before May 3, 2018. For users that signed in after May 23, 2018 14:08 PDT, the returned password last used date is accurate.

You can use password last used information to identify unused credentials for deletion. For example, you might delete users who did not sign in to AWS in the last 90 days. In cases like this, we recommend that you adjust your evaluation window to include dates after May 23, 2018. Alternatively, if your users use access keys to access AWS programmatically you can refer to access key last used information because it is accurate for all dates.

" } }, "documentation":"

Contains the response to a successful GetUser request.

" @@ -4508,7 +4845,7 @@ "members":{ "Path":{ "shape":"pathType", - "documentation":"

The path to the group. For more information about paths, see IAM Identifiers in the IAM User Guide.

" + "documentation":"

The path to the group. For more information about paths, see IAM identifiers in the IAM User Guide.

" }, "GroupName":{ "shape":"groupNameType", @@ -4516,11 +4853,11 @@ }, "GroupId":{ "shape":"idType", - "documentation":"

The stable and unique string identifying the group. For more information about IDs, see IAM Identifiers in the IAM User Guide.

" + "documentation":"

The stable and unique string identifying the group. For more information about IDs, see IAM identifiers in the IAM User Guide.

" }, "Arn":{ "shape":"arnType", - "documentation":"

The Amazon Resource Name (ARN) specifying the group. For more information about ARNs and how to use them in policies, see IAM Identifiers in the IAM User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) specifying the group. For more information about ARNs and how to use them in policies, see IAM identifiers in the IAM User Guide.

" }, "CreateDate":{ "shape":"dateType", @@ -4534,7 +4871,7 @@ "members":{ "Path":{ "shape":"pathType", - "documentation":"

The path to the group. For more information about paths, see IAM Identifiers in the IAM User Guide.

" + "documentation":"

The path to the group. For more information about paths, see IAM identifiers in the IAM User Guide.

" }, "GroupName":{ "shape":"groupNameType", @@ -4542,7 +4879,7 @@ }, "GroupId":{ "shape":"idType", - "documentation":"

The stable and unique string identifying the group. For more information about IDs, see IAM Identifiers in the IAM User Guide.

" + "documentation":"

The stable and unique string identifying the group. For more information about IDs, see IAM identifiers in the IAM User Guide.

" }, "Arn":{"shape":"arnType"}, "CreateDate":{ @@ -4573,7 +4910,7 @@ "members":{ "Path":{ "shape":"pathType", - "documentation":"

The path to the instance profile. For more information about paths, see IAM Identifiers in the IAM User Guide.

" + "documentation":"

The path to the instance profile. For more information about paths, see IAM identifiers in the IAM User Guide.

" }, "InstanceProfileName":{ "shape":"instanceProfileNameType", @@ -4581,11 +4918,11 @@ }, "InstanceProfileId":{ "shape":"idType", - "documentation":"

The stable and unique string identifying the instance profile. For more information about IDs, see IAM Identifiers in the IAM User Guide.

" + "documentation":"

The stable and unique string identifying the instance profile. For more information about IDs, see IAM identifiers in the IAM User Guide.

" }, "Arn":{ "shape":"arnType", - "documentation":"

The Amazon Resource Name (ARN) specifying the instance profile. For more information about ARNs and how to use them in policies, see IAM Identifiers in the IAM User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) specifying the instance profile. For more information about ARNs and how to use them in policies, see IAM identifiers in the IAM User Guide.

" }, "CreateDate":{ "shape":"dateType", @@ -4594,6 +4931,10 @@ "Roles":{ "shape":"roleListType", "documentation":"

The role associated with the instance profile.

" + }, + "Tags":{ + "shape":"tagListType", + "documentation":"

A list of tags that are attached to the instance profile. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

" } }, "documentation":"

Contains information about an instance profile.

This data type is used as a response element in the following operations:

" @@ -4681,7 +5022,7 @@ "members":{ "message":{"shape":"limitExceededMessage"} }, - "documentation":"

The request was rejected because it attempted to create resources beyond the current AWS account limitations. The error message describes the limit exceeded.

", + "documentation":"

The request was rejected because it attempted to create resources beyond the current AWS account limits. The error message describes the limit exceeded.

", "error":{ "code":"LimitExceeded", "httpStatusCode":409, @@ -4884,7 +5225,7 @@ "members":{ "PolicyArn":{ "shape":"arnType", - "documentation":"

The Amazon Resource Name (ARN) of the IAM policy for which you want the versions.

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM policy for which you want the versions.

For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference.

" }, "EntityFilter":{ "shape":"EntityType", @@ -5044,6 +5385,42 @@ }, "documentation":"

Contains the response to a successful ListGroups request.

" }, + "ListInstanceProfileTagsRequest":{ + "type":"structure", + "required":["InstanceProfileName"], + "members":{ + "InstanceProfileName":{ + "shape":"instanceProfileNameType", + "documentation":"

The name of the IAM instance profile whose tags you want to see.

This parameter accepts (through its regex pattern) a string of characters that consist of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

" + }, + "Marker":{ + "shape":"markerType", + "documentation":"

Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

" + }, + "MaxItems":{ + "shape":"maxItemsType", + "documentation":"

(Optional) Use this only when paginating results to indicate the maximum number of items that you want in the response. If additional items exist beyond the maximum that you specify, the IsTruncated response element is true.

If you do not include this parameter, it defaults to 100. Note that IAM might return fewer results, even when more results are available. In that case, the IsTruncated response element returns true, and Marker contains a value to include in the subsequent call that tells the service where to continue from.

" + } + } + }, + "ListInstanceProfileTagsResponse":{ + "type":"structure", + "required":["Tags"], + "members":{ + "Tags":{ + "shape":"tagListType", + "documentation":"

The list of tags that are currently attached to the IAM instance profile. Each tag consists of a key name and an associated value. If no tags are attached to the specified resource, the response contains an empty list.

" + }, + "IsTruncated":{ + "shape":"booleanType", + "documentation":"

A flag that indicates whether there are more items to return. If your results were truncated, you can use the Marker request parameter to make a subsequent pagination request that retrieves more items. Note that IAM might return fewer than the MaxItems number of results even when more results are available. Check IsTruncated after every call to ensure that you receive all of your results.

" + }, + "Marker":{ + "shape":"responseMarkerType", + "documentation":"

When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

" + } + } + }, "ListInstanceProfilesForRoleRequest":{ "type":"structure", "required":["RoleName"], @@ -5117,6 +5494,42 @@ }, "documentation":"

Contains the response to a successful ListInstanceProfiles request.

" }, + "ListMFADeviceTagsRequest":{ + "type":"structure", + "required":["SerialNumber"], + "members":{ + "SerialNumber":{ + "shape":"serialNumberType", + "documentation":"

The unique identifier for the IAM virtual MFA device whose tags you want to see. For virtual MFA devices, the serial number is the same as the ARN.

This parameter accepts (through its regex pattern) a string of characters that consist of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

" + }, + "Marker":{ + "shape":"markerType", + "documentation":"

Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

" + }, + "MaxItems":{ + "shape":"maxItemsType", + "documentation":"

(Optional) Use this only when paginating results to indicate the maximum number of items that you want in the response. If additional items exist beyond the maximum that you specify, the IsTruncated response element is true.

If you do not include this parameter, it defaults to 100. Note that IAM might return fewer results, even when more results are available. In that case, the IsTruncated response element returns true, and Marker contains a value to include in the subsequent call that tells the service where to continue from.

" + } + } + }, + "ListMFADeviceTagsResponse":{ + "type":"structure", + "required":["Tags"], + "members":{ + "Tags":{ + "shape":"tagListType", + "documentation":"

The list of tags that are currently attached to the virtual MFA device. Each tag consists of a key name and an associated value. If no tags are attached to the specified resource, the response contains an empty list.

" + }, + "IsTruncated":{ + "shape":"booleanType", + "documentation":"

A flag that indicates whether there are more items to return. If your results were truncated, you can use the Marker request parameter to make a subsequent pagination request that retrieves more items. Note that IAM might return fewer than the MaxItems number of results even when more results are available. Check IsTruncated after every call to ensure that you receive all of your results.

" + }, + "Marker":{ + "shape":"responseMarkerType", + "documentation":"

When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

" + } + } + }, "ListMFADevicesRequest":{ "type":"structure", "members":{ @@ -5153,6 +5566,42 @@ }, "documentation":"

Contains the response to a successful ListMFADevices request.

" }, + "ListOpenIDConnectProviderTagsRequest":{ + "type":"structure", + "required":["OpenIDConnectProviderArn"], + "members":{ + "OpenIDConnectProviderArn":{ + "shape":"arnType", + "documentation":"

The ARN of the OpenID Connect (OIDC) identity provider whose tags you want to see.

This parameter accepts (through its regex pattern) a string of characters that consist of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

" + }, + "Marker":{ + "shape":"markerType", + "documentation":"

Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

" + }, + "MaxItems":{ + "shape":"maxItemsType", + "documentation":"

(Optional) Use this only when paginating results to indicate the maximum number of items that you want in the response. If additional items exist beyond the maximum that you specify, the IsTruncated response element is true.

If you do not include this parameter, it defaults to 100. Note that IAM might return fewer results, even when more results are available. In that case, the IsTruncated response element returns true, and Marker contains a value to include in the subsequent call that tells the service where to continue from.

" + } + } + }, + "ListOpenIDConnectProviderTagsResponse":{ + "type":"structure", + "required":["Tags"], + "members":{ + "Tags":{ + "shape":"tagListType", + "documentation":"

The list of tags that are currently attached to the OpenID Connect (OIDC) identity provider. Each tag consists of a key name and an associated value. If no tags are attached to the specified resource, the response contains an empty list.

" + }, + "IsTruncated":{ + "shape":"booleanType", + "documentation":"

A flag that indicates whether there are more items to return. If your results were truncated, you can use the Marker request parameter to make a subsequent pagination request that retrieves more items. Note that IAM might return fewer than the MaxItems number of results even when more results are available. Check IsTruncated after every call to ensure that you receive all of your results.

" + }, + "Marker":{ + "shape":"responseMarkerType", + "documentation":"

When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

" + } + } + }, "ListOpenIDConnectProvidersRequest":{ "type":"structure", "members":{ @@ -5173,7 +5622,7 @@ "members":{ "ServiceNamespace":{ "shape":"serviceNamespaceType", - "documentation":"

The namespace of the service that was accessed.

To learn the service namespace of a service, go to Actions, Resources, and Condition Keys for AWS Services in the IAM User Guide. Choose the name of the service to view details for that service. In the first paragraph, find the service prefix. For example, (service prefix: a4b). For more information about service namespaces, see AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The namespace of the service that was accessed.

To learn the service namespace of a service, see Actions, resources, and condition keys for AWS services in the Service Authorization Reference. Choose the name of the service to view details for that service. In the first paragraph, find the service prefix. For example, (service prefix: a4b). For more information about service namespaces, see AWS service namespaces in the AWS General Reference.

" }, "Policies":{ "shape":"policyGrantingServiceAccessListType", @@ -5199,7 +5648,7 @@ }, "ServiceNamespaces":{ "shape":"serviceNamespaceListType", - "documentation":"

The service namespace for the AWS services whose policies you want to list.

To learn the service namespace for a service, go to Actions, Resources, and Condition Keys for AWS Services in the IAM User Guide. Choose the name of the service to view details for that service. In the first paragraph, find the service prefix. For example, (service prefix: a4b). For more information about service namespaces, see AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The service namespace for the AWS services whose policies you want to list.

To learn the service namespace for a service, see Actions, resources, and condition keys for AWS services in the IAM User Guide. Choose the name of the service to view details for that service. In the first paragraph, find the service prefix. For example, (service prefix: a4b). For more information about service namespaces, see AWS service namespaces in the AWS General Reference.

" } } }, @@ -5268,13 +5717,49 @@ }, "documentation":"

Contains the response to a successful ListPolicies request.

" }, + "ListPolicyTagsRequest":{ + "type":"structure", + "required":["PolicyArn"], + "members":{ + "PolicyArn":{ + "shape":"arnType", + "documentation":"

The ARN of the IAM customer managed policy whose tags you want to see.

This parameter accepts (through its regex pattern) a string of characters that consist of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

" + }, + "Marker":{ + "shape":"markerType", + "documentation":"

Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

" + }, + "MaxItems":{ + "shape":"maxItemsType", + "documentation":"

(Optional) Use this only when paginating results to indicate the maximum number of items that you want in the response. If additional items exist beyond the maximum that you specify, the IsTruncated response element is true.

If you do not include this parameter, it defaults to 100. Note that IAM might return fewer results, even when more results are available. In that case, the IsTruncated response element returns true, and Marker contains a value to include in the subsequent call that tells the service where to continue from.

" + } + } + }, + "ListPolicyTagsResponse":{ + "type":"structure", + "required":["Tags"], + "members":{ + "Tags":{ + "shape":"tagListType", + "documentation":"

The list of tags that are currently attached to the IAM customer managed policy. Each tag consists of a key name and an associated value. If no tags are attached to the specified resource, the response contains an empty list.

" + }, + "IsTruncated":{ + "shape":"booleanType", + "documentation":"

A flag that indicates whether there are more items to return. If your results were truncated, you can use the Marker request parameter to make a subsequent pagination request that retrieves more items. Note that IAM might return fewer than the MaxItems number of results even when more results are available. Check IsTruncated after every call to ensure that you receive all of your results.

" + }, + "Marker":{ + "shape":"responseMarkerType", + "documentation":"

When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

" + } + } + }, "ListPolicyVersionsRequest":{ "type":"structure", "required":["PolicyArn"], "members":{ "PolicyArn":{ "shape":"arnType", - "documentation":"

The Amazon Resource Name (ARN) of the IAM policy for which you want the versions.

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM policy for which you want the versions.

For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference.

" }, "Marker":{ "shape":"markerType", @@ -5291,7 +5776,7 @@ "members":{ "Versions":{ "shape":"policyDocumentVersionListType", - "documentation":"

A list of policy versions.

For more information about managed policy versions, see Versioning for Managed Policies in the IAM User Guide.

" + "documentation":"

A list of policy versions.

For more information about managed policy versions, see Versioning for managed policies in the IAM User Guide.

" }, "IsTruncated":{ "shape":"booleanType", @@ -5365,7 +5850,7 @@ "members":{ "Tags":{ "shape":"tagListType", - "documentation":"

The list of tags currently that is attached to the role. Each tag consists of a key name and an associated value. If no tags are attached to the specified role, the response contains an empty list.

" + "documentation":"

The list of tags that are currently attached to the role. Each tag consists of a key name and an associated value. If no tags are attached to the specified resource, the response contains an empty list.

" }, "IsTruncated":{ "shape":"booleanType", @@ -5413,6 +5898,42 @@ }, "documentation":"

Contains the response to a successful ListRoles request.

" }, + "ListSAMLProviderTagsRequest":{ + "type":"structure", + "required":["SAMLProviderArn"], + "members":{ + "SAMLProviderArn":{ + "shape":"arnType", + "documentation":"

The ARN of the Security Assertion Markup Language (SAML) identity provider whose tags you want to see.

This parameter accepts (through its regex pattern) a string of characters that consist of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

" + }, + "Marker":{ + "shape":"markerType", + "documentation":"

Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

" + }, + "MaxItems":{ + "shape":"maxItemsType", + "documentation":"

(Optional) Use this only when paginating results to indicate the maximum number of items that you want in the response. If additional items exist beyond the maximum that you specify, the IsTruncated response element is true.

If you do not include this parameter, it defaults to 100. Note that IAM might return fewer results, even when more results are available. In that case, the IsTruncated response element returns true, and Marker contains a value to include in the subsequent call that tells the service where to continue from.

" + } + } + }, + "ListSAMLProviderTagsResponse":{ + "type":"structure", + "required":["Tags"], + "members":{ + "Tags":{ + "shape":"tagListType", + "documentation":"

The list of tags that are currently attached to the Security Assertion Markup Language (SAML) identity provider. Each tag consists of a key name and an associated value. If no tags are attached to the specified resource, the response contains an empty list.

" + }, + "IsTruncated":{ + "shape":"booleanType", + "documentation":"

A flag that indicates whether there are more items to return. If your results were truncated, you can use the Marker request parameter to make a subsequent pagination request that retrieves more items. Note that IAM might return fewer than the MaxItems number of results even when more results are available. Check IsTruncated after every call to ensure that you receive all of your results.

" + }, + "Marker":{ + "shape":"responseMarkerType", + "documentation":"

When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

" + } + } + }, "ListSAMLProvidersRequest":{ "type":"structure", "members":{ @@ -5463,6 +5984,42 @@ }, "documentation":"

Contains the response to a successful ListSSHPublicKeys request.

" }, + "ListServerCertificateTagsRequest":{ + "type":"structure", + "required":["ServerCertificateName"], + "members":{ + "ServerCertificateName":{ + "shape":"serverCertificateNameType", + "documentation":"

The name of the IAM server certificate whose tags you want to see.

This parameter accepts (through its regex pattern) a string of characters that consist of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

" + }, + "Marker":{ + "shape":"markerType", + "documentation":"

Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.

" + }, + "MaxItems":{ + "shape":"maxItemsType", + "documentation":"

(Optional) Use this only when paginating results to indicate the maximum number of items that you want in the response. If additional items exist beyond the maximum that you specify, the IsTruncated response element is true.

If you do not include this parameter, it defaults to 100. Note that IAM might return fewer results, even when more results are available. In that case, the IsTruncated response element returns true, and Marker contains a value to include in the subsequent call that tells the service where to continue from.

" + } + } + }, + "ListServerCertificateTagsResponse":{ + "type":"structure", + "required":["Tags"], + "members":{ + "Tags":{ + "shape":"tagListType", + "documentation":"

The list of tags that are currently attached to the IAM server certificate. Each tag consists of a key name and an associated value. If no tags are attached to the specified resource, the response contains an empty list.

" + }, + "IsTruncated":{ + "shape":"booleanType", + "documentation":"

A flag that indicates whether there are more items to return. If your results were truncated, you can use the Marker request parameter to make a subsequent pagination request that retrieves more items. Note that IAM might return fewer than the MaxItems number of results even when more results are available. Check IsTruncated after every call to ensure that you receive all of your results.

" + }, + "Marker":{ + "shape":"responseMarkerType", + "documentation":"

When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

" + } + } + }, "ListServerCertificatesRequest":{ "type":"structure", "members":{ @@ -5618,7 +6175,7 @@ "members":{ "Tags":{ "shape":"tagListType", - "documentation":"

The list of tags that are currently attached to the user. Each tag consists of a key name and an associated value. If no tags are attached to the specified user, the response contains an empty list.

" + "documentation":"

The list of tags that are currently attached to the user. Each tag consists of a key name and an associated value. If no tags are attached to the specified resource, the response contains an empty list.

" }, "IsTruncated":{ "shape":"booleanType", @@ -5782,16 +6339,16 @@ }, "PolicyId":{ "shape":"idType", - "documentation":"

The stable and unique string identifying the policy.

For more information about IDs, see IAM Identifiers in the IAM User Guide.

" + "documentation":"

The stable and unique string identifying the policy.

For more information about IDs, see IAM identifiers in the IAM User Guide.

" }, "Arn":{"shape":"arnType"}, "Path":{ "shape":"policyPathType", - "documentation":"

The path to the policy.

For more information about paths, see IAM Identifiers in the IAM User Guide.

" + "documentation":"

The path to the policy.

For more information about paths, see IAM identifiers in the IAM User Guide.

" }, "DefaultVersionId":{ "shape":"policyVersionIdType", - "documentation":"

The identifier for the version of the policy that is set as the default (operative) version.

For more information about policy versions, see Versioning for Managed Policies in the IAM User Guide.

" + "documentation":"

The identifier for the version of the policy that is set as the default (operative) version.

For more information about policy versions, see Versioning for managed policies in the IAM User Guide.

" }, "AttachmentCount":{ "shape":"attachmentCountType", @@ -5799,7 +6356,7 @@ }, "PermissionsBoundaryUsageCount":{ "shape":"attachmentCountType", - "documentation":"

The number of entities (users and roles) for which the policy is used as the permissions boundary.

For more information about permissions boundaries, see Permissions Boundaries for IAM Identities in the IAM User Guide.

" + "documentation":"

The number of entities (users and roles) for which the policy is used as the permissions boundary.

For more information about permissions boundaries, see Permissions boundaries for IAM identities in the IAM User Guide.

" }, "IsAttachable":{ "shape":"booleanType", @@ -5822,7 +6379,7 @@ "documentation":"

A list containing information about the versions of the policy.

" } }, - "documentation":"

Contains information about a managed policy, including the policy's ARN, versions, and the number of principal entities (users, groups, and roles) that the policy is attached to.

This data type is used as a response element in the GetAccountAuthorizationDetails operation.

For more information about managed policies, see Managed Policies and Inline Policies in the IAM User Guide.

" + "documentation":"

Contains information about a managed policy, including the policy's ARN, versions, and the number of principal entities (users, groups, and roles) that the policy is attached to.

This data type is used as a response element in the GetAccountAuthorizationDetails operation.

For more information about managed policies, see Managed policies and inline policies in the IAM User Guide.

" }, "ManagedPolicyDetailListType":{ "type":"list", @@ -5878,19 +6435,19 @@ }, "RequireSymbols":{ "shape":"booleanType", - "documentation":"

Specifies whether to require symbols for IAM user passwords.

" + "documentation":"

Specifies whether IAM user passwords must contain at least one of the following symbols:

! @ # $ % ^ & * ( ) _ + - = [ ] { } | '

" }, "RequireNumbers":{ "shape":"booleanType", - "documentation":"

Specifies whether to require numbers for IAM user passwords.

" + "documentation":"

Specifies whether IAM user passwords must contain at least one numeric character (0 to 9).

" }, "RequireUppercaseCharacters":{ "shape":"booleanType", - "documentation":"

Specifies whether to require uppercase characters for IAM user passwords.

" + "documentation":"

Specifies whether IAM user passwords must contain at least one uppercase character (A to Z).

" }, "RequireLowercaseCharacters":{ "shape":"booleanType", - "documentation":"

Specifies whether to require lowercase characters for IAM user passwords.

" + "documentation":"

Specifies whether IAM user passwords must contain at least one lowercase character (a to z).

" }, "AllowUsersToChangePassword":{ "shape":"booleanType", @@ -5951,12 +6508,12 @@ }, "PolicyId":{ "shape":"idType", - "documentation":"

The stable and unique string identifying the policy.

For more information about IDs, see IAM Identifiers in the IAM User Guide.

" + "documentation":"

The stable and unique string identifying the policy.

For more information about IDs, see IAM identifiers in the IAM User Guide.

" }, "Arn":{"shape":"arnType"}, "Path":{ "shape":"policyPathType", - "documentation":"

The path to the policy.

For more information about paths, see IAM Identifiers in the IAM User Guide.

" + "documentation":"

The path to the policy.

For more information about paths, see IAM identifiers in the IAM User Guide.

" }, "DefaultVersionId":{ "shape":"policyVersionIdType", @@ -5968,7 +6525,7 @@ }, "PermissionsBoundaryUsageCount":{ "shape":"attachmentCountType", - "documentation":"

The number of entities (users and roles) for which the policy is used to set the permissions boundary.

For more information about permissions boundaries, see Permissions Boundaries for IAM Identities in the IAM User Guide.

" + "documentation":"

The number of entities (users and roles) for which the policy is used to set the permissions boundary.

For more information about permissions boundaries, see Permissions boundaries for IAM identities in the IAM User Guide.

" }, "IsAttachable":{ "shape":"booleanType", @@ -5985,9 +6542,13 @@ "UpdateDate":{ "shape":"dateType", "documentation":"

The date and time, in ISO 8601 date-time format, when the policy was last updated.

When a policy has only one version, this field contains the date and time when the policy was created. When a policy has more than one version, this field contains the date and time when the most recent policy version was created.

" + }, + "Tags":{ + "shape":"tagListType", + "documentation":"

A list of tags that are attached to the instance profile. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

" } }, - "documentation":"

Contains information about a managed policy.

This data type is used as a response element in the CreatePolicy, GetPolicy, and ListPolicies operations.

For more information about managed policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

" + "documentation":"

Contains information about a managed policy.

This data type is used as a response element in the CreatePolicy, GetPolicy, and ListPolicies operations.

For more information about managed policies, refer to Managed policies and inline policies in the IAM User Guide.

" }, "PolicyDetail":{ "type":"structure", @@ -6036,16 +6597,16 @@ }, "PolicyType":{ "shape":"policyType", - "documentation":"

The policy type. For more information about these policy types, see Managed Policies and Inline Policies in the IAM User Guide.

" + "documentation":"

The policy type. For more information about these policy types, see Managed policies and inline policies in the IAM User Guide.

" }, "PolicyArn":{"shape":"arnType"}, "EntityType":{ "shape":"policyOwnerEntityType", - "documentation":"

The type of entity (user or role) that used the policy to access the service to which the inline policy is attached.

This field is null for managed policies. For more information about these policy types, see Managed Policies and Inline Policies in the IAM User Guide.

" + "documentation":"

The type of entity (user or role) that used the policy to access the service to which the inline policy is attached.

This field is null for managed policies. For more information about these policy types, see Managed policies and inline policies in the IAM User Guide.

" }, "EntityName":{ "shape":"entityNameType", - "documentation":"

The name of the entity (user or role) to which the inline policy is attached.

This field is null for managed policies. For more information about these policy types, see Managed Policies and Inline Policies in the IAM User Guide.

" + "documentation":"

The name of the entity (user or role) to which the inline policy is attached.

This field is null for managed policies. For more information about these policy types, see Managed policies and inline policies in the IAM User Guide.

" } }, "documentation":"

Contains details about the permissions policies that are attached to the specified identity (user, group, or role).

This data type is an element of the ListPoliciesGrantingServiceAccessEntry object.

" @@ -6059,10 +6620,10 @@ }, "GroupId":{ "shape":"idType", - "documentation":"

The stable and unique string identifying the group. For more information about IDs, see IAM Identifiers in the IAM User Guide.

" + "documentation":"

The stable and unique string identifying the group. For more information about IDs, see IAM identifiers in the IAM User Guide.

" } }, - "documentation":"

Contains information about a group that a managed policy is attached to.

This data type is used as a response element in the ListEntitiesForPolicy operation.

For more information about managed policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

" + "documentation":"

Contains information about a group that a managed policy is attached to.

This data type is used as a response element in the ListEntitiesForPolicy operation.

For more information about managed policies, refer to Managed policies and inline policies in the IAM User Guide.

" }, "PolicyGroupListType":{ "type":"list", @@ -6091,10 +6652,10 @@ }, "RoleId":{ "shape":"idType", - "documentation":"

The stable and unique string identifying the role. For more information about IDs, see IAM Identifiers in the IAM User Guide.

" + "documentation":"

The stable and unique string identifying the role. For more information about IDs, see IAM identifiers in the IAM User Guide.

" } }, - "documentation":"

Contains information about a role that a managed policy is attached to.

This data type is used as a response element in the ListEntitiesForPolicy operation.

For more information about managed policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

" + "documentation":"

Contains information about a role that a managed policy is attached to.

This data type is used as a response element in the ListEntitiesForPolicy operation.

For more information about managed policies, refer to Managed policies and inline policies in the IAM User Guide.

" }, "PolicyRoleListType":{ "type":"list", @@ -6114,7 +6675,7 @@ }, "PolicyUsageType":{ "type":"string", - "documentation":"

The policy usage type that indicates whether the policy is used as a permissions policy or as the permissions boundary for an entity.

For more information about permissions boundaries, see Permissions Boundaries for IAM Identities in the IAM User Guide.

", + "documentation":"

The policy usage type that indicates whether the policy is used as a permissions policy or as the permissions boundary for an entity.

For more information about permissions boundaries, see Permissions boundaries for IAM identities in the IAM User Guide.

", "enum":[ "PermissionsPolicy", "PermissionsBoundary" @@ -6129,10 +6690,10 @@ }, "UserId":{ "shape":"idType", - "documentation":"

The stable and unique string identifying the user. For more information about IDs, see IAM Identifiers in the IAM User Guide.

" + "documentation":"

The stable and unique string identifying the user. For more information about IDs, see IAM identifiers in the IAM User Guide.

" } }, - "documentation":"

Contains information about a user that a managed policy is attached to.

This data type is used as a response element in the ListEntitiesForPolicy operation.

For more information about managed policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

" + "documentation":"

Contains information about a user that a managed policy is attached to.

This data type is used as a response element in the ListEntitiesForPolicy operation.

For more information about managed policies, refer to Managed policies and inline policies in the IAM User Guide.

" }, "PolicyUserListType":{ "type":"list", @@ -6158,7 +6719,7 @@ "documentation":"

The date and time, in ISO 8601 date-time format, when the policy version was created.

" } }, - "documentation":"

Contains information about a version of a managed policy.

This data type is used as a response element in the CreatePolicyVersion, GetPolicyVersion, ListPolicyVersions, and GetAccountAuthorizationDetails operations.

For more information about managed policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

" + "documentation":"

Contains information about a version of a managed policy.

This data type is used as a response element in the CreatePolicyVersion, GetPolicyVersion, ListPolicyVersions, and GetAccountAuthorizationDetails operations.

For more information about managed policies, refer to Managed policies and inline policies in the IAM User Guide.

" }, "Position":{ "type":"structure", @@ -6292,7 +6853,7 @@ "members":{ "OpenIDConnectProviderArn":{ "shape":"arnType", - "documentation":"

The Amazon Resource Name (ARN) of the IAM OIDC provider resource to remove the client ID from. You can get a list of OIDC provider ARNs by using the ListOpenIDConnectProviders operation.

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM OIDC provider resource to remove the client ID from. You can get a list of OIDC provider ARNs by using the ListOpenIDConnectProviders operation.

For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference.

" }, "ClientID":{ "shape":"clientIDType", @@ -6475,7 +7036,7 @@ "members":{ "Path":{ "shape":"pathType", - "documentation":"

The path to the role. For more information about paths, see IAM Identifiers in the IAM User Guide.

" + "documentation":"

The path to the role. For more information about paths, see IAM identifiers in the IAM User Guide.

" }, "RoleName":{ "shape":"roleNameType", @@ -6483,11 +7044,11 @@ }, "RoleId":{ "shape":"idType", - "documentation":"

The stable and unique string identifying the role. For more information about IDs, see IAM Identifiers in the IAM User Guide.

" + "documentation":"

The stable and unique string identifying the role. For more information about IDs, see IAM identifiers in the IAM User Guide.

" }, "Arn":{ "shape":"arnType", - "documentation":"

The Amazon Resource Name (ARN) specifying the role. For more information about ARNs and how to use them in policies, see IAM Identifiers in the IAM User Guide guide.

" + "documentation":"

The Amazon Resource Name (ARN) specifying the role. For more information about ARNs and how to use them in policies, see IAM identifiers in the IAM User Guide guide.

" }, "CreateDate":{ "shape":"dateType", @@ -6507,15 +7068,15 @@ }, "PermissionsBoundary":{ "shape":"AttachedPermissionsBoundary", - "documentation":"

The ARN of the policy used to set the permissions boundary for the role.

For more information about permissions boundaries, see Permissions Boundaries for IAM Identities in the IAM User Guide.

" + "documentation":"

The ARN of the policy used to set the permissions boundary for the role.

For more information about permissions boundaries, see Permissions boundaries for IAM identities in the IAM User Guide.

" }, "Tags":{ "shape":"tagListType", - "documentation":"

A list of tags that are attached to the specified role. For more information about tagging, see Tagging IAM Identities in the IAM User Guide.

" + "documentation":"

A list of tags that are attached to the role. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

" }, "RoleLastUsed":{ "shape":"RoleLastUsed", - "documentation":"

Contains information about the last time that an IAM role was used. This includes the date and time and the Region in which the role was last used. Activity is only reported for the trailing 400 days. This period can be shorter if your Region began supporting these features within the last year. The role might have been used more than 400 days ago. For more information, see Regions Where Data Is Tracked in the IAM User Guide.

" + "documentation":"

Contains information about the last time that an IAM role was used. This includes the date and time and the Region in which the role was last used. Activity is only reported for the trailing 400 days. This period can be shorter if your Region began supporting these features within the last year. The role might have been used more than 400 days ago. For more information, see Regions where data is tracked in the IAM User Guide.

" } }, "documentation":"

Contains information about an IAM role. This structure is returned as a response element in several API operations that interact with roles.

" @@ -6525,7 +7086,7 @@ "members":{ "Path":{ "shape":"pathType", - "documentation":"

The path to the role. For more information about paths, see IAM Identifiers in the IAM User Guide.

" + "documentation":"

The path to the role. For more information about paths, see IAM identifiers in the IAM User Guide.

" }, "RoleName":{ "shape":"roleNameType", @@ -6533,7 +7094,7 @@ }, "RoleId":{ "shape":"idType", - "documentation":"

The stable and unique string identifying the role. For more information about IDs, see IAM Identifiers in the IAM User Guide.

" + "documentation":"

The stable and unique string identifying the role. For more information about IDs, see IAM identifiers in the IAM User Guide.

" }, "Arn":{"shape":"arnType"}, "CreateDate":{ @@ -6558,15 +7119,15 @@ }, "PermissionsBoundary":{ "shape":"AttachedPermissionsBoundary", - "documentation":"

The ARN of the policy used to set the permissions boundary for the role.

For more information about permissions boundaries, see Permissions Boundaries for IAM Identities in the IAM User Guide.

" + "documentation":"

The ARN of the policy used to set the permissions boundary for the role.

For more information about permissions boundaries, see Permissions boundaries for IAM identities in the IAM User Guide.

" }, "Tags":{ "shape":"tagListType", - "documentation":"

A list of tags that are attached to the specified role. For more information about tagging, see Tagging IAM Identities in the IAM User Guide.

" + "documentation":"

A list of tags that are attached to the role. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

" }, "RoleLastUsed":{ "shape":"RoleLastUsed", - "documentation":"

Contains information about the last time that an IAM role was used. This includes the date and time and the Region in which the role was last used. Activity is only reported for the trailing 400 days. This period can be shorter if your Region began supporting these features within the last year. The role might have been used more than 400 days ago. For more information, see Regions Where Data Is Tracked in the IAM User Guide.

" + "documentation":"

Contains information about the last time that an IAM role was used. This includes the date and time and the Region in which the role was last used. Activity is only reported for the trailing 400 days. This period can be shorter if your Region began supporting these features within the last year. The role might have been used more than 400 days ago. For more information, see Regions where data is tracked in the IAM User Guide.

" } }, "documentation":"

Contains information about an IAM role, including all of the role's policies.

This data type is used as a response element in the GetAccountAuthorizationDetails operation.

" @@ -6576,14 +7137,14 @@ "members":{ "LastUsedDate":{ "shape":"dateType", - "documentation":"

The date and time, in ISO 8601 date-time format that the role was last used.

This field is null if the role has not been used within the IAM tracking period. For more information about the tracking period, see Regions Where Data Is Tracked in the IAM User Guide.

" + "documentation":"

The date and time, in ISO 8601 date-time format that the role was last used.

This field is null if the role has not been used within the IAM tracking period. For more information about the tracking period, see Regions where data is tracked in the IAM User Guide.

" }, "Region":{ "shape":"stringType", "documentation":"

The name of the AWS Region in which the role was last used.

" } }, - "documentation":"

Contains information about the last time that an IAM role was used. This includes the date and time and the Region in which the role was last used. Activity is only reported for the trailing 400 days. This period can be shorter if your Region began supporting these features within the last year. The role might have been used more than 400 days ago. For more information, see Regions Where Data Is Tracked in the IAM User Guide.

This data type is returned as a response element in the GetRole and GetAccountAuthorizationDetails operations.

" + "documentation":"

Contains information about the last time that an IAM role was used. This includes the date and time and the Region in which the role was last used. Activity is only reported for the trailing 400 days. This period can be shorter if your Region began supporting these features within the last year. The role might have been used more than 400 days ago. For more information, see Regions where data is tracked in the IAM User Guide.

This data type is returned as a response element in the GetRole and GetAccountAuthorizationDetails operations.

" }, "RoleUsageListType":{ "type":"list", @@ -6723,6 +7284,10 @@ "CertificateChain":{ "shape":"certificateChainType", "documentation":"

The contents of the public key certificate chain.

" + }, + "Tags":{ + "shape":"tagListType", + "documentation":"

A list of tags that are attached to the server certificate. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

" } }, "documentation":"

Contains information about a server certificate.

This data type is used as a response element in the GetServerCertificate operation.

" @@ -6738,7 +7303,7 @@ "members":{ "Path":{ "shape":"pathType", - "documentation":"

The path to the server certificate. For more information about paths, see IAM Identifiers in the IAM User Guide.

" + "documentation":"

The path to the server certificate. For more information about paths, see IAM identifiers in the IAM User Guide.

" }, "ServerCertificateName":{ "shape":"serverCertificateNameType", @@ -6746,11 +7311,11 @@ }, "ServerCertificateId":{ "shape":"idType", - "documentation":"

The stable and unique string identifying the server certificate. For more information about IDs, see IAM Identifiers in the IAM User Guide.

" + "documentation":"

The stable and unique string identifying the server certificate. For more information about IDs, see IAM identifiers in the IAM User Guide.

" }, "Arn":{ "shape":"arnType", - "documentation":"

The Amazon Resource Name (ARN) specifying the server certificate. For more information about ARNs and how to use them in policies, see IAM Identifiers in the IAM User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) specifying the server certificate. For more information about ARNs and how to use them in policies, see IAM identifiers in the IAM User Guide.

" }, "UploadDate":{ "shape":"dateType", @@ -6792,7 +7357,7 @@ }, "ServiceNamespace":{ "shape":"serviceNamespaceType", - "documentation":"

The namespace of the service in which access was attempted.

To learn the service namespace of a service, go to Actions, Resources, and Condition Keys for AWS Services in the IAM User Guide. Choose the name of the service to view details for that service. In the first paragraph, find the service prefix. For example, (service prefix: a4b). For more information about service namespaces, see AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The namespace of the service in which access was attempted.

To learn the service namespace of a service, see Actions, resources, and condition keys for AWS services in the Service Authorization Reference. Choose the name of the service to view details for that service. In the first paragraph, find the service prefix. For example, (service prefix: a4b). For more information about service namespaces, see AWS Service Namespaces in the AWS General Reference.

" }, "LastAuthenticatedEntity":{ "shape":"arnType", @@ -6924,11 +7489,11 @@ "members":{ "PolicyArn":{ "shape":"arnType", - "documentation":"

The Amazon Resource Name (ARN) of the IAM policy whose default version you want to set.

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM policy whose default version you want to set.

For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference.

" }, "VersionId":{ "shape":"policyVersionIdType", - "documentation":"

The version of the policy to set as the default (operative) version.

For more information about managed policy versions, see Versioning for Managed Policies in the IAM User Guide.

" + "documentation":"

The version of the policy to set as the default (operative) version.

For more information about managed policy versions, see Versioning for managed policies in the IAM User Guide.

" } } }, @@ -6938,7 +7503,7 @@ "members":{ "GlobalEndpointTokenVersion":{ "shape":"globalEndpointTokenVersion", - "documentation":"

The version of the global endpoint token. Version 1 tokens are valid only in AWS Regions that are available by default. These tokens do not work in manually enabled Regions, such as Asia Pacific (Hong Kong). Version 2 tokens are valid in all Regions. However, version 2 tokens are longer and might affect systems where you temporarily store tokens.

For information, see Activating and Deactivating STS in an AWS Region in the IAM User Guide.

" + "documentation":"

The version of the global endpoint token. Version 1 tokens are valid only in AWS Regions that are available by default. These tokens do not work in manually enabled Regions, such as Asia Pacific (Hong Kong). Version 2 tokens are valid in all Regions. However, version 2 tokens are longer and might affect systems where you temporarily store tokens.

For information, see Activating and deactivating STS in an AWS region in the IAM User Guide.

" } } }, @@ -6987,7 +7552,7 @@ }, "PermissionsBoundaryPolicyInputList":{ "shape":"SimulationPolicyListType", - "documentation":"

The IAM permissions boundary policy to simulate. The permissions boundary sets the maximum permissions that an IAM entity can have. You can input only one permissions boundary when you pass a policy to this operation. For more information about permissions boundaries, see Permissions Boundaries for IAM Entities in the IAM User Guide. The policy input is specified as a string that contains the complete, valid JSON text of a permissions boundary policy.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

" + "documentation":"

The IAM permissions boundary policy to simulate. The permissions boundary sets the maximum permissions that an IAM entity can have. You can input only one permissions boundary when you pass a policy to this operation. For more information about permissions boundaries, see Permissions boundaries for IAM entities in the IAM User Guide. The policy input is specified as a string that contains the complete, valid JSON text of a permissions boundary policy.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

" }, "ActionNames":{ "shape":"ActionNameListType", @@ -6995,7 +7560,7 @@ }, "ResourceArns":{ "shape":"ResourceNameListType", - "documentation":"

A list of ARNs of AWS resources to include in the simulation. If this parameter is not provided, then the value defaults to * (all resources). Each API in the ActionNames parameter is evaluated for each resource in this list. The simulation determines the access result (allowed or denied) of each combination and reports it in the response.

The simulation does not automatically retrieve policies for the specified resources. If you want to include a resource policy in the simulation, then you must include the policy as a string in the ResourcePolicy parameter.

If you include a ResourcePolicy, then it must be applicable to all of the resources included in the simulation or you receive an invalid input error.

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

A list of ARNs of AWS resources to include in the simulation. If this parameter is not provided, then the value defaults to * (all resources). Each API in the ActionNames parameter is evaluated for each resource in this list. The simulation determines the access result (allowed or denied) of each combination and reports it in the response. You can simulate resources that don't exist in your account.

The simulation does not automatically retrieve policies for the specified resources. If you want to include a resource policy in the simulation, then you must include the policy as a string in the ResourcePolicy parameter.

If you include a ResourcePolicy, then it must be applicable to all of the resources included in the simulation or you receive an invalid input error.

For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference.

" }, "ResourcePolicy":{ "shape":"policyDocumentType", @@ -7015,7 +7580,7 @@ }, "ResourceHandlingOption":{ "shape":"ResourceHandlingOptionType", - "documentation":"

Specifies the type of simulation to run. Different API operations that support resource-based policies require different combinations of resources. By specifying the type of simulation to run, you enable the policy simulator to enforce the presence of the required resources to ensure reliable simulation results. If your simulation does not match one of the following scenarios, then you can omit this parameter. The following list shows each of the supported scenario values and the resources that you must define to run the simulation.

Each of the EC2 scenarios requires that you specify instance, image, and security-group resources. If your scenario includes an EBS volume, then you must specify that volume as a resource. If the EC2 scenario includes VPC, then you must supply the network-interface resource. If it includes an IP subnet, then you must specify the subnet resource. For more information on the EC2 scenario options, see Supported Platforms in the Amazon EC2 User Guide.

" + "documentation":"

Specifies the type of simulation to run. Different API operations that support resource-based policies require different combinations of resources. By specifying the type of simulation to run, you enable the policy simulator to enforce the presence of the required resources to ensure reliable simulation results. If your simulation does not match one of the following scenarios, then you can omit this parameter. The following list shows each of the supported scenario values and the resources that you must define to run the simulation.

Each of the EC2 scenarios requires that you specify instance, image, and security-group resources. If your scenario includes an EBS volume, then you must specify that volume as a resource. If the EC2 scenario includes VPC, then you must supply the network-interface resource. If it includes an IP subnet, then you must specify the subnet resource. For more information on the EC2 scenario options, see Supported platforms in the Amazon EC2 User Guide.

" }, "MaxItems":{ "shape":"maxItemsType", @@ -7054,7 +7619,7 @@ "members":{ "PolicySourceArn":{ "shape":"arnType", - "documentation":"

The Amazon Resource Name (ARN) of a user, group, or role whose policies you want to include in the simulation. If you specify a user, group, or role, the simulation includes all policies that are associated with that entity. If you specify a user, the simulation also includes all policies that are attached to any groups the user belongs to.

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The Amazon Resource Name (ARN) of a user, group, or role whose policies you want to include in the simulation. If you specify a user, group, or role, the simulation includes all policies that are associated with that entity. If you specify a user, the simulation also includes all policies that are attached to any groups the user belongs to.

For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference.

" }, "PolicyInputList":{ "shape":"SimulationPolicyListType", @@ -7062,7 +7627,7 @@ }, "PermissionsBoundaryPolicyInputList":{ "shape":"SimulationPolicyListType", - "documentation":"

The IAM permissions boundary policy to simulate. The permissions boundary sets the maximum permissions that the entity can have. You can input only one permissions boundary when you pass a policy to this operation. An IAM entity can only have one permissions boundary in effect at a time. For example, if a permissions boundary is attached to an entity and you pass in a different permissions boundary policy using this parameter, then the new permissions boundary policy is used for the simulation. For more information about permissions boundaries, see Permissions Boundaries for IAM Entities in the IAM User Guide. The policy input is specified as a string containing the complete, valid JSON text of a permissions boundary policy.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

" + "documentation":"

The IAM permissions boundary policy to simulate. The permissions boundary sets the maximum permissions that the entity can have. You can input only one permissions boundary when you pass a policy to this operation. An IAM entity can only have one permissions boundary in effect at a time. For example, if a permissions boundary is attached to an entity and you pass in a different permissions boundary policy using this parameter, then the new permissions boundary policy is used for the simulation. For more information about permissions boundaries, see Permissions boundaries for IAM entities in the IAM User Guide. The policy input is specified as a string containing the complete, valid JSON text of a permissions boundary policy.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

" }, "ActionNames":{ "shape":"ActionNameListType", @@ -7070,7 +7635,7 @@ }, "ResourceArns":{ "shape":"ResourceNameListType", - "documentation":"

A list of ARNs of AWS resources to include in the simulation. If this parameter is not provided, then the value defaults to * (all resources). Each API in the ActionNames parameter is evaluated for each resource in this list. The simulation determines the access result (allowed or denied) of each combination and reports it in the response.

The simulation does not automatically retrieve policies for the specified resources. If you want to include a resource policy in the simulation, then you must include the policy as a string in the ResourcePolicy parameter.

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

A list of ARNs of AWS resources to include in the simulation. If this parameter is not provided, then the value defaults to * (all resources). Each API in the ActionNames parameter is evaluated for each resource in this list. The simulation determines the access result (allowed or denied) of each combination and reports it in the response. You can simulate resources that don't exist in your account.

The simulation does not automatically retrieve policies for the specified resources. If you want to include a resource policy in the simulation, then you must include the policy as a string in the ResourcePolicy parameter.

For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference.

" }, "ResourcePolicy":{ "shape":"policyDocumentType", @@ -7082,7 +7647,7 @@ }, "CallerArn":{ "shape":"ResourceNameType", - "documentation":"

The ARN of the IAM user that you want to specify as the simulated caller of the API operations. If you do not specify a CallerArn, it defaults to the ARN of the user that you specify in PolicySourceArn, if you specified a user. If you include both a PolicySourceArn (for example, arn:aws:iam::123456789012:user/David) and a CallerArn (for example, arn:aws:iam::123456789012:user/Bob), the result is that you simulate calling the API operations as Bob, as if Bob had David's policies.

You can specify only the ARN of an IAM user. You cannot specify the ARN of an assumed role, federated user, or a service principal.

CallerArn is required if you include a ResourcePolicy and the PolicySourceArn is not the ARN for an IAM user. This is required so that the resource-based policy's Principal element has a value to use in evaluating the policy.

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The ARN of the IAM user that you want to specify as the simulated caller of the API operations. If you do not specify a CallerArn, it defaults to the ARN of the user that you specify in PolicySourceArn, if you specified a user. If you include both a PolicySourceArn (for example, arn:aws:iam::123456789012:user/David) and a CallerArn (for example, arn:aws:iam::123456789012:user/Bob), the result is that you simulate calling the API operations as Bob, as if Bob had David's policies.

You can specify only the ARN of an IAM user. You cannot specify the ARN of an assumed role, federated user, or a service principal.

CallerArn is required if you include a ResourcePolicy and the PolicySourceArn is not the ARN for an IAM user. This is required so that the resource-based policy's Principal element has a value to use in evaluating the policy.

For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference.

" }, "ContextEntries":{ "shape":"ContextEntryListType", @@ -7090,7 +7655,7 @@ }, "ResourceHandlingOption":{ "shape":"ResourceHandlingOptionType", - "documentation":"

Specifies the type of simulation to run. Different API operations that support resource-based policies require different combinations of resources. By specifying the type of simulation to run, you enable the policy simulator to enforce the presence of the required resources to ensure reliable simulation results. If your simulation does not match one of the following scenarios, then you can omit this parameter. The following list shows each of the supported scenario values and the resources that you must define to run the simulation.

Each of the EC2 scenarios requires that you specify instance, image, and security group resources. If your scenario includes an EBS volume, then you must specify that volume as a resource. If the EC2 scenario includes VPC, then you must supply the network interface resource. If it includes an IP subnet, then you must specify the subnet resource. For more information on the EC2 scenario options, see Supported Platforms in the Amazon EC2 User Guide.

" + "documentation":"

Specifies the type of simulation to run. Different API operations that support resource-based policies require different combinations of resources. By specifying the type of simulation to run, you enable the policy simulator to enforce the presence of the required resources to ensure reliable simulation results. If your simulation does not match one of the following scenarios, then you can omit this parameter. The following list shows each of the supported scenario values and the resources that you must define to run the simulation.

Each of the EC2 scenarios requires that you specify instance, image, and security group resources. If your scenario includes an EBS volume, then you must specify that volume as a resource. If the EC2 scenario includes VPC, then you must supply the network interface resource. If it includes an IP subnet, then you must specify the subnet resource. For more information on the EC2 scenario options, see Supported platforms in the Amazon EC2 User Guide.

" }, "MaxItems":{ "shape":"maxItemsType", @@ -7148,7 +7713,75 @@ "documentation":"

The value associated with this tag. For example, tags with a key name of Department could have values such as Human Resources, Accounting, and Support. Tags with a key name of Cost Center might have values that consist of the number associated with the different cost centers in your company. Typically, many resources have tags with the same key name but with different values.

AWS always interprets the tag Value as a single string. If you need to store an array, you can store comma-separated values in the string. However, you must interpret the value in your code.

" } }, - "documentation":"

A structure that represents user-provided metadata that can be associated with a resource such as an IAM user or role. For more information about tagging, see Tagging IAM Identities in the IAM User Guide.

" + "documentation":"

A structure that represents user-provided metadata that can be associated with an IAM resource. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

" + }, + "TagInstanceProfileRequest":{ + "type":"structure", + "required":[ + "InstanceProfileName", + "Tags" + ], + "members":{ + "InstanceProfileName":{ + "shape":"instanceProfileNameType", + "documentation":"

The name of the IAM instance profile to which you want to add tags.

This parameter accepts (through its regex pattern) a string of characters that consist of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

" + }, + "Tags":{ + "shape":"tagListType", + "documentation":"

The list of tags that you want to attach to the IAM instance profile. Each tag consists of a key name and an associated value.

" + } + } + }, + "TagMFADeviceRequest":{ + "type":"structure", + "required":[ + "SerialNumber", + "Tags" + ], + "members":{ + "SerialNumber":{ + "shape":"serialNumberType", + "documentation":"

The unique identifier for the IAM virtual MFA device to which you want to add tags. For virtual MFA devices, the serial number is the same as the ARN.

This parameter accepts (through its regex pattern) a string of characters that consist of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

" + }, + "Tags":{ + "shape":"tagListType", + "documentation":"

The list of tags that you want to attach to the IAM virtual MFA device. Each tag consists of a key name and an associated value.

" + } + } + }, + "TagOpenIDConnectProviderRequest":{ + "type":"structure", + "required":[ + "OpenIDConnectProviderArn", + "Tags" + ], + "members":{ + "OpenIDConnectProviderArn":{ + "shape":"arnType", + "documentation":"

The ARN of the OIDC identity provider in IAM to which you want to add tags.

This parameter accepts (through its regex pattern) a string of characters that consist of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

" + }, + "Tags":{ + "shape":"tagListType", + "documentation":"

The list of tags that you want to attach to the OIDC identity provider in IAM. Each tag consists of a key name and an associated value.

" + } + } + }, + "TagPolicyRequest":{ + "type":"structure", + "required":[ + "PolicyArn", + "Tags" + ], + "members":{ + "PolicyArn":{ + "shape":"arnType", + "documentation":"

The ARN of the IAM customer managed policy to which you want to add tags.

This parameter accepts (through its regex pattern) a string of characters that consist of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

" + }, + "Tags":{ + "shape":"tagListType", + "documentation":"

The list of tags that you want to attach to the IAM customer managed policy. Each tag consists of a key name and an associated value.

" + } + } }, "TagRoleRequest":{ "type":"structure", @@ -7159,11 +7792,45 @@ "members":{ "RoleName":{ "shape":"roleNameType", - "documentation":"

The name of the role that you want to add tags to.

This parameter accepts (through its regex pattern) a string of characters that consist of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: _+=,.@-

" + "documentation":"

The name of the IAM role to which you want to add tags.

This parameter accepts (through its regex pattern) a string of characters that consist of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: _+=,.@-

" }, "Tags":{ "shape":"tagListType", - "documentation":"

The list of tags that you want to attach to the role. Each tag consists of a key name and an associated value. You can specify this with a JSON string.

" + "documentation":"

The list of tags that you want to attach to the IAM role. Each tag consists of a key name and an associated value.

" + } + } + }, + "TagSAMLProviderRequest":{ + "type":"structure", + "required":[ + "SAMLProviderArn", + "Tags" + ], + "members":{ + "SAMLProviderArn":{ + "shape":"arnType", + "documentation":"

The ARN of the SAML identity provider in IAM to which you want to add tags.

This parameter accepts (through its regex pattern) a string of characters that consist of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

" + }, + "Tags":{ + "shape":"tagListType", + "documentation":"

The list of tags that you want to attach to the SAML identity provider in IAM. Each tag consists of a key name and an associated value.

" + } + } + }, + "TagServerCertificateRequest":{ + "type":"structure", + "required":[ + "ServerCertificateName", + "Tags" + ], + "members":{ + "ServerCertificateName":{ + "shape":"serverCertificateNameType", + "documentation":"

The name of the IAM server certificate to which you want to add tags.

This parameter accepts (through its regex pattern) a string of characters that consist of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

" + }, + "Tags":{ + "shape":"tagListType", + "documentation":"

The list of tags that you want to attach to the IAM server certificate. Each tag consists of a key name and an associated value.

" } } }, @@ -7176,11 +7843,11 @@ "members":{ "UserName":{ "shape":"existingUserNameType", - "documentation":"

The name of the user that you want to add tags to.

This parameter accepts (through its regex pattern) a string of characters that consist of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

" + "documentation":"

The name of the IAM user to which you want to add tags.

This parameter accepts (through its regex pattern) a string of characters that consist of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

" }, "Tags":{ "shape":"tagListType", - "documentation":"

The list of tags that you want to attach to the user. Each tag consists of a key name and an associated value.

" + "documentation":"

The list of tags that you want to attach to the IAM user. Each tag consists of a key name and an associated value.

" } } }, @@ -7233,6 +7900,74 @@ }, "exception":true }, + "UntagInstanceProfileRequest":{ + "type":"structure", + "required":[ + "InstanceProfileName", + "TagKeys" + ], + "members":{ + "InstanceProfileName":{ + "shape":"instanceProfileNameType", + "documentation":"

The name of the IAM instance profile from which you want to remove tags.

This parameter accepts (through its regex pattern) a string of characters that consist of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

" + }, + "TagKeys":{ + "shape":"tagKeyListType", + "documentation":"

A list of key names as a simple array of strings. The tags with matching keys are removed from the specified instance profile.

" + } + } + }, + "UntagMFADeviceRequest":{ + "type":"structure", + "required":[ + "SerialNumber", + "TagKeys" + ], + "members":{ + "SerialNumber":{ + "shape":"serialNumberType", + "documentation":"

The unique identifier for the IAM virtual MFA device from which you want to remove tags. For virtual MFA devices, the serial number is the same as the ARN.

This parameter accepts (through its regex pattern) a string of characters that consist of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

" + }, + "TagKeys":{ + "shape":"tagKeyListType", + "documentation":"

A list of key names as a simple array of strings. The tags with matching keys are removed from the specified instance profile.

" + } + } + }, + "UntagOpenIDConnectProviderRequest":{ + "type":"structure", + "required":[ + "OpenIDConnectProviderArn", + "TagKeys" + ], + "members":{ + "OpenIDConnectProviderArn":{ + "shape":"arnType", + "documentation":"

The ARN of the OIDC provider in IAM from which you want to remove tags.

This parameter accepts (through its regex pattern) a string of characters that consist of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

" + }, + "TagKeys":{ + "shape":"tagKeyListType", + "documentation":"

A list of key names as a simple array of strings. The tags with matching keys are removed from the specified OIDC provider.

" + } + } + }, + "UntagPolicyRequest":{ + "type":"structure", + "required":[ + "PolicyArn", + "TagKeys" + ], + "members":{ + "PolicyArn":{ + "shape":"arnType", + "documentation":"

The ARN of the IAM customer managed policy from which you want to remove tags.

This parameter accepts (through its regex pattern) a string of characters that consist of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

" + }, + "TagKeys":{ + "shape":"tagKeyListType", + "documentation":"

A list of key names as a simple array of strings. The tags with matching keys are removed from the specified policy.

" + } + } + }, "UntagRoleRequest":{ "type":"structure", "required":[ @@ -7250,6 +7985,40 @@ } } }, + "UntagSAMLProviderRequest":{ + "type":"structure", + "required":[ + "SAMLProviderArn", + "TagKeys" + ], + "members":{ + "SAMLProviderArn":{ + "shape":"arnType", + "documentation":"

The ARN of the SAML identity provider in IAM from which you want to remove tags.

This parameter accepts (through its regex pattern) a string of characters that consist of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

" + }, + "TagKeys":{ + "shape":"tagKeyListType", + "documentation":"

A list of key names as a simple array of strings. The tags with matching keys are removed from the specified SAML identity provider.

" + } + } + }, + "UntagServerCertificateRequest":{ + "type":"structure", + "required":[ + "ServerCertificateName", + "TagKeys" + ], + "members":{ + "ServerCertificateName":{ + "shape":"serverCertificateNameType", + "documentation":"

The name of the IAM server certificate from which you want to remove tags.

This parameter accepts (through its regex pattern) a string of characters that consist of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-

" + }, + "TagKeys":{ + "shape":"tagKeyListType", + "documentation":"

A list of key names as a simple array of strings. The tags with matching keys are removed from the specified IAM server certificate.

" + } + } + }, "UntagUserRequest":{ "type":"structure", "required":[ @@ -7284,7 +8053,7 @@ }, "Status":{ "shape":"statusType", - "documentation":"

The status you want to assign to the secret access key. Active means that the key can be used for API calls to AWS, while Inactive means that the key cannot be used.

" + "documentation":"

The status you want to assign to the secret access key. Active means that the key can be used for programmatic calls to AWS, while Inactive means that the key cannot be used.

" } } }, @@ -7313,7 +8082,7 @@ }, "AllowUsersToChangePassword":{ "shape":"booleanType", - "documentation":"

Allows all IAM users in your account to use the AWS Management Console to change their own passwords. For more information, see Letting IAM Users Change Their Own Passwords in the IAM User Guide.

If you do not specify a value for this parameter, then the operation uses the default value of false. The result is that IAM users in the account do not automatically have permissions to change their own password.

" + "documentation":"

Allows all IAM users in your account to use the AWS Management Console to change their own passwords. For more information, see Letting IAM users change their own passwords in the IAM User Guide.

If you do not specify a value for this parameter, then the operation uses the default value of false. The result is that IAM users in the account do not automatically have permissions to change their own password.

" }, "MaxPasswordAge":{ "shape":"maxPasswordAgeType", @@ -7391,7 +8160,7 @@ "members":{ "OpenIDConnectProviderArn":{ "shape":"arnType", - "documentation":"

The Amazon Resource Name (ARN) of the IAM OIDC provider resource object for which you want to update the thumbprint. You can get a list of OIDC provider ARNs by using the ListOpenIDConnectProviders operation.

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM OIDC provider resource object for which you want to update the thumbprint. You can get a list of OIDC provider ARNs by using the ListOpenIDConnectProviders operation.

For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference.

" }, "ThumbprintList":{ "shape":"thumbprintListType", @@ -7439,7 +8208,7 @@ }, "MaxSessionDuration":{ "shape":"roleMaxSessionDurationType", - "documentation":"

The maximum session duration (in seconds) that you want to set for the specified role. If you do not specify a value for this setting, the default maximum of one hour is applied. This setting can have a value from 1 hour to 12 hours.

Anyone who assumes the role from the AWS CLI or API can use the DurationSeconds API parameter or the duration-seconds CLI parameter to request a longer session. The MaxSessionDuration setting determines the maximum duration that can be requested using the DurationSeconds parameter. If users don't specify a value for the DurationSeconds parameter, their security credentials are valid for one hour by default. This applies when you use the AssumeRole* API operations or the assume-role* CLI operations but does not apply when you use those operations to create a console URL. For more information, see Using IAM Roles in the IAM User Guide.

" + "documentation":"

The maximum session duration (in seconds) that you want to set for the specified role. If you do not specify a value for this setting, the default maximum of one hour is applied. This setting can have a value from 1 hour to 12 hours.

Anyone who assumes the role from the AWS CLI or API can use the DurationSeconds API parameter or the duration-seconds CLI parameter to request a longer session. The MaxSessionDuration setting determines the maximum duration that can be requested using the DurationSeconds parameter. If users don't specify a value for the DurationSeconds parameter, their security credentials are valid for one hour by default. This applies when you use the AssumeRole* API operations or the assume-role* CLI operations but does not apply when you use those operations to create a console URL. For more information, see Using IAM roles in the IAM User Guide.

" } } }, @@ -7461,7 +8230,7 @@ }, "SAMLProviderArn":{ "shape":"arnType", - "documentation":"

The Amazon Resource Name (ARN) of the SAML provider to update.

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

The Amazon Resource Name (ARN) of the SAML provider to update.

For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference.

" } } }, @@ -7553,7 +8322,7 @@ }, "Status":{ "shape":"statusType", - "documentation":"

The status you want to assign to the certificate. Active means that the certificate can be used for API calls to AWS Inactive means that the certificate cannot be used.

" + "documentation":"

The status you want to assign to the certificate. Active means that the certificate can be used for programmatic calls to AWS Inactive means that the certificate cannot be used.

" } } }, @@ -7612,7 +8381,7 @@ "members":{ "Path":{ "shape":"pathType", - "documentation":"

The path for the server certificate. For more information about paths, see IAM Identifiers in the IAM User Guide.

This parameter is optional. If it is not included, it defaults to a slash (/). This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

If you are uploading a server certificate specifically for use with Amazon CloudFront distributions, you must specify a path using the path parameter. The path must begin with /cloudfront and must include a trailing slash (for example, /cloudfront/test/).

" + "documentation":"

The path for the server certificate. For more information about paths, see IAM identifiers in the IAM User Guide.

This parameter is optional. If it is not included, it defaults to a slash (/). This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

If you are uploading a server certificate specifically for use with Amazon CloudFront distributions, you must specify a path using the path parameter. The path must begin with /cloudfront and must include a trailing slash (for example, /cloudfront/test/).

" }, "ServerCertificateName":{ "shape":"serverCertificateNameType", @@ -7629,6 +8398,10 @@ "CertificateChain":{ "shape":"certificateChainType", "documentation":"

The contents of the certificate chain. This is typically a concatenation of the PEM-encoded public key certificates of the chain.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

" + }, + "Tags":{ + "shape":"tagListType", + "documentation":"

A list of tags that you want to attach to the new IAM server certificate resource. Each tag consists of a key name and an associated value. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

If any one of the tags is invalid or if you exceed the allowed maximum number of tags, then the entire request fails and the resource is not created.

" } } }, @@ -7638,6 +8411,10 @@ "ServerCertificateMetadata":{ "shape":"ServerCertificateMetadata", "documentation":"

The meta information of the uploaded server certificate without its certificate body, certificate chain, and private key.

" + }, + "Tags":{ + "shape":"tagListType", + "documentation":"

A list of tags that are attached to the new IAM server certificate. The returned list of tags is sorted by tag key. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

" } }, "documentation":"

Contains the response to a successful UploadServerCertificate request.

" @@ -7679,7 +8456,7 @@ "members":{ "Path":{ "shape":"pathType", - "documentation":"

The path to the user. For more information about paths, see IAM Identifiers in the IAM User Guide.

" + "documentation":"

The path to the user. For more information about paths, see IAM identifiers in the IAM User Guide.

The ARN of the policy used to set the permissions boundary for the user.

" }, "UserName":{ "shape":"userNameType", @@ -7687,7 +8464,7 @@ }, "UserId":{ "shape":"idType", - "documentation":"

The stable and unique string identifying the user. For more information about IDs, see IAM Identifiers in the IAM User Guide.

" + "documentation":"

The stable and unique string identifying the user. For more information about IDs, see IAM identifiers in the IAM User Guide.

" }, "Arn":{ "shape":"arnType", @@ -7699,15 +8476,15 @@ }, "PasswordLastUsed":{ "shape":"dateType", - "documentation":"

The date and time, in ISO 8601 date-time format, when the user's password was last used to sign in to an AWS website. For a list of AWS websites that capture a user's last sign-in time, see the Credential Reports topic in the IAM User Guide. If a password is used more than once in a five-minute span, only the first use is returned in this field. If the field is null (no value), then it indicates that they never signed in with a password. This can be because:

A null value does not mean that the user never had a password. Also, if the user does not currently have a password but had one in the past, then this field contains the date and time the most recent password was used.

This value is returned only in the GetUser and ListUsers operations.

" + "documentation":"

The date and time, in ISO 8601 date-time format, when the user's password was last used to sign in to an AWS website. For a list of AWS websites that capture a user's last sign-in time, see the Credential reports topic in the IAM User Guide. If a password is used more than once in a five-minute span, only the first use is returned in this field. If the field is null (no value), then it indicates that they never signed in with a password. This can be because:

A null value does not mean that the user never had a password. Also, if the user does not currently have a password but had one in the past, then this field contains the date and time the most recent password was used.

This value is returned only in the GetUser and ListUsers operations.

" }, "PermissionsBoundary":{ "shape":"AttachedPermissionsBoundary", - "documentation":"

The ARN of the policy used to set the permissions boundary for the user.

For more information about permissions boundaries, see Permissions Boundaries for IAM Identities in the IAM User Guide.

" + "documentation":"

For more information about permissions boundaries, see Permissions boundaries for IAM identities in the IAM User Guide.

" }, "Tags":{ "shape":"tagListType", - "documentation":"

A list of tags that are associated with the specified user. For more information about tagging, see Tagging IAM Identities in the IAM User Guide.

" + "documentation":"

A list of tags that are associated with the user. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

" } }, "documentation":"

Contains information about an IAM user entity.

This data type is used as a response element in the following operations:

" @@ -7717,7 +8494,7 @@ "members":{ "Path":{ "shape":"pathType", - "documentation":"

The path to the user. For more information about paths, see IAM Identifiers in the IAM User Guide.

" + "documentation":"

The path to the user. For more information about paths, see IAM identifiers in the IAM User Guide.

" }, "UserName":{ "shape":"userNameType", @@ -7725,7 +8502,7 @@ }, "UserId":{ "shape":"idType", - "documentation":"

The stable and unique string identifying the user. For more information about IDs, see IAM Identifiers in the IAM User Guide.

" + "documentation":"

The stable and unique string identifying the user. For more information about IDs, see IAM identifiers in the IAM User Guide.

" }, "Arn":{"shape":"arnType"}, "CreateDate":{ @@ -7746,11 +8523,11 @@ }, "PermissionsBoundary":{ "shape":"AttachedPermissionsBoundary", - "documentation":"

The ARN of the policy used to set the permissions boundary for the user.

For more information about permissions boundaries, see Permissions Boundaries for IAM Identities in the IAM User Guide.

" + "documentation":"

The ARN of the policy used to set the permissions boundary for the user.

For more information about permissions boundaries, see Permissions boundaries for IAM identities in the IAM User Guide.

" }, "Tags":{ "shape":"tagListType", - "documentation":"

A list of tags that are associated with the specified user. For more information about tagging, see Tagging IAM Identities in the IAM User Guide.

" + "documentation":"

A list of tags that are associated with the user. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

" } }, "documentation":"

Contains information about an IAM user, including all the user's policies and all the IAM groups the user is in.

This data type is used as a response element in the GetAccountAuthorizationDetails operation.

" @@ -7778,6 +8555,10 @@ "EnableDate":{ "shape":"dateType", "documentation":"

The date and time on which the virtual MFA device was enabled.

" + }, + "Tags":{ + "shape":"tagListType", + "documentation":"

A list of tags that are attached to the virtual MFA device. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

" } }, "documentation":"

Contains information about a virtual MFA device.

" @@ -7809,7 +8590,7 @@ }, "arnType":{ "type":"string", - "documentation":"

The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources.

For more information about ARNs, go to Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

", + "documentation":"

The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources.

For more information about ARNs, go to Amazon Resource Names (ARNs) in the AWS General Reference.

", "max":2048, "min":20 }, diff --git a/botocore/data/imagebuilder/2019-12-02/service-2.json b/botocore/data/imagebuilder/2019-12-02/service-2.json index 97d10b7e..9be54020 100644 --- a/botocore/data/imagebuilder/2019-12-02/service-2.json +++ b/botocore/data/imagebuilder/2019-12-02/service-2.json @@ -638,6 +638,26 @@ ], "documentation":"

Returns a list of image build versions.

" }, + "ListImagePackages":{ + "name":"ListImagePackages", + "http":{ + "method":"POST", + "requestUri":"/ListImagePackages" + }, + "input":{"shape":"ListImagePackagesRequest"}, + "output":{"shape":"ListImagePackagesResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidPaginationTokenException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"CallRateLimitExceededException"} + ], + "documentation":"

List the Packages that are associated with an Image Build Version, as determined by AWS Systems Manager Inventory at build time.

" + }, "ListImagePipelineImages":{ "name":"ListImagePipelineImages", "http":{ @@ -2279,6 +2299,7 @@ "io1", "io2", "gp2", + "gp3", "sc1", "st1" ] @@ -2704,6 +2725,24 @@ "type":"string", "pattern":"^arn:aws[^:]*:imagebuilder:[^:]+:(?:\\d{12}|aws):(?:image-recipe|container-recipe|infrastructure-configuration|distribution-configuration|component|image|image-pipeline)/[a-z0-9-_]+(?:/(?:(?:x|\\d+)\\.(?:x|\\d+)\\.(?:x|\\d+))(?:/\\d+)?)?$" }, + "ImagePackage":{ + "type":"structure", + "members":{ + "packageName":{ + "shape":"NonEmptyString", + "documentation":"

The name of the package as reported to the operating system package manager.

" + }, + "packageVersion":{ + "shape":"NonEmptyString", + "documentation":"

The version of the package as reported to the operating system package manager.

" + } + }, + "documentation":"

Represents a package installed on an Image Builder image.

" + }, + "ImagePackageList":{ + "type":"list", + "member":{"shape":"ImagePackage"} + }, "ImagePipeline":{ "type":"structure", "members":{ @@ -3145,7 +3184,7 @@ "documentation":"

The instance types of the infrastructure configuration.

" }, "instanceProfileName":{ - "shape":"NonEmptyString", + "shape":"InstanceProfileNameType", "documentation":"

The instance profile of the infrastructure configuration.

" }, "securityGroupIds":{ @@ -3225,6 +3264,14 @@ "tags":{ "shape":"TagMap", "documentation":"

The tags of the infrastructure configuration.

" + }, + "instanceTypes":{ + "shape":"InstanceTypeList", + "documentation":"

The instance types of the infrastructure configuration.

" + }, + "instanceProfileName":{ + "shape":"InstanceProfileNameType", + "documentation":"

The instance profile of the infrastructure configuration.

" } }, "documentation":"

The infrastructure used when building EC2 AMIs.

" @@ -3271,6 +3318,12 @@ "type":"list", "member":{"shape":"InstanceBlockDeviceMapping"} }, + "InstanceProfileNameType":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^[\\w+=,.@-]+$" + }, "InstanceType":{"type":"string"}, "InstanceTypeList":{ "type":"list", @@ -3547,6 +3600,42 @@ } } }, + "ListImagePackagesRequest":{ + "type":"structure", + "required":["imageBuildVersionArn"], + "members":{ + "imageBuildVersionArn":{ + "shape":"ImageBuildVersionArn", + "documentation":"

Filter results for the ListImagePackages request by the Image Build Version ARN

" + }, + "maxResults":{ + "shape":"RestrictedInteger", + "documentation":"

The maxiumum number of results to return from the ListImagePackages request.

", + "box":true + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

" + } + } + }, + "ListImagePackagesResponse":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"NonEmptyString", + "documentation":"

The request ID that uniquely identifies this request.

" + }, + "imagePackageList":{ + "shape":"ImagePackageList", + "documentation":"

The list of Image Packages returned in the response.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

" + } + } + }, "ListImagePipelineImagesRequest":{ "type":"structure", "required":["imagePipelineArn"], @@ -4040,6 +4129,10 @@ "shape":"NonEmptyString", "documentation":"

The cron expression determines how often EC2 Image Builder evaluates your pipelineExecutionStartCondition.

For information on how to format a cron expression in Image Builder, see Use cron expressions in EC2 Image Builder.

" }, + "timezone":{ + "shape":"Timezone", + "documentation":"

The timezone that applies to the scheduling expression. For example, \"Etc/UTC\", \"America/Los_Angeles\" in the IANA timezone format. If not specified this defaults to UTC.

" + }, "pipelineExecutionStartCondition":{ "shape":"PipelineExecutionStartCondition", "documentation":"

The condition configures when the pipeline should trigger a new image build. When the pipelineExecutionStartCondition is set to EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE, and you use semantic version filters on the source image or components in your image recipe, EC2 Image Builder will build a new image only when there are new versions of the image or components in your recipe that match the semantic version filter. When it is set to EXPRESSION_MATCH_ONLY, it will build a new image every time the CRON expression matches the current time. For semantic version syntax, see CreateComponent in the EC2 Image Builder API Reference.

" @@ -4186,6 +4279,12 @@ }, "documentation":"

The container repository where the output container image is stored.

" }, + "Timezone":{ + "type":"string", + "max":100, + "min":3, + "pattern":"[a-zA-Z0-9]{2,}(?:\\/[a-zA-z0-9-_+]+)*" + }, "UntagResourceRequest":{ "type":"structure", "required":[ diff --git a/botocore/data/iotevents/2018-07-27/service-2.json b/botocore/data/iotevents/2018-07-27/service-2.json index b678ef87..595629f5 100644 --- a/botocore/data/iotevents/2018-07-27/service-2.json +++ b/botocore/data/iotevents/2018-07-27/service-2.json @@ -102,6 +102,23 @@ ], "documentation":"

Describes a detector model. If the version parameter is not specified, information about the latest version is returned.

" }, + "DescribeDetectorModelAnalysis":{ + "name":"DescribeDetectorModelAnalysis", + "http":{ + "method":"GET", + "requestUri":"/analysis/detector-models/{analysisId}" + }, + "input":{"shape":"DescribeDetectorModelAnalysisRequest"}, + "output":{"shape":"DescribeDetectorModelAnalysisResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Retrieves execution information about a detector model analysis

" + }, "DescribeInput":{ "name":"DescribeInput", "http":{ @@ -137,6 +154,23 @@ ], "documentation":"

Retrieves the current settings of the AWS IoT Events logging options.

" }, + "GetDetectorModelAnalysisResults":{ + "name":"GetDetectorModelAnalysisResults", + "http":{ + "method":"GET", + "requestUri":"/analysis/detector-models/{analysisId}/results" + }, + "input":{"shape":"GetDetectorModelAnalysisResultsRequest"}, + "output":{"shape":"GetDetectorModelAnalysisResultsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Retrieves one or more analysis results of the detector model.

" + }, "ListDetectorModelVersions":{ "name":"ListDetectorModelVersions", "http":{ @@ -220,6 +254,23 @@ ], "documentation":"

Sets or updates the AWS IoT Events logging options.

If you update the value of any loggingOptions field, it takes up to one minute for the change to take effect. If you change the policy attached to the role you specified in the roleArn field (for example, to correct an invalid policy), it takes up to five minutes for that change to take effect.

" }, + "StartDetectorModelAnalysis":{ + "name":"StartDetectorModelAnalysis", + "http":{ + "method":"POST", + "requestUri":"/analysis/detector-models/" + }, + "input":{"shape":"StartDetectorModelAnalysisRequest"}, + "output":{"shape":"StartDetectorModelAnalysisResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"LimitExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Performs an analysis of your detector model. For more information, see Running detector model analyses in the AWS IoT Events Developer Guide.

" + }, "TagResource":{ "name":"TagResource", "http":{ @@ -338,11 +389,11 @@ }, "dynamoDB":{ "shape":"DynamoDBAction", - "documentation":"

Writes to the DynamoDB table that you created. The default action payload contains all attribute-value pairs that have the information about the detector model instance and the event that triggered the action. You can also customize the payload. One column of the DynamoDB table receives all attribute-value pairs in the payload that you specify. For more information, see Actions in AWS IoT Events Developer Guide.

" + "documentation":"

Writes to the DynamoDB table that you created. The default action payload contains all attribute-value pairs that have the information about the detector model instance and the event that triggered the action. You can customize the payload. One column of the DynamoDB table receives all attribute-value pairs in the payload that you specify. For more information, see Actions in AWS IoT Events Developer Guide.

" }, "dynamoDBv2":{ "shape":"DynamoDBv2Action", - "documentation":"

Writes to the DynamoDB table that you created. The default action payload contains all attribute-value pairs that have the information about the detector model instance and the event that triggered the action. You can also customize the payload. A separate column of the DynamoDB table receives one attribute-value pair in the payload that you specify. For more information, see Actions in AWS IoT Events Developer Guide.

" + "documentation":"

Writes to the DynamoDB table that you created. The default action payload contains all attribute-value pairs that have the information about the detector model instance and the event that triggered the action. You can customize the payload. A separate column of the DynamoDB table receives one attribute-value pair in the payload that you specify. For more information, see Actions in AWS IoT Events Developer Guide.

" }, "iotSiteWise":{ "shape":"IotSiteWiseAction", @@ -360,6 +411,66 @@ "max":2048, "min":1 }, + "AnalysisId":{"type":"string"}, + "AnalysisMessage":{"type":"string"}, + "AnalysisResult":{ + "type":"structure", + "members":{ + "type":{ + "shape":"AnalysisType", + "documentation":"

The type of the analysis result. Analyses fall into the following types based on the validators used to generate the analysis result:

For more information, see Running detector model analyses in the AWS IoT Events Developer Guide.

" + }, + "level":{ + "shape":"AnalysisResultLevel", + "documentation":"

The severity level of the analysis result. Analysis results fall into three general categories based on the severity level:

" + }, + "message":{ + "shape":"AnalysisMessage", + "documentation":"

Contains additional information about the analysis result.

" + }, + "locations":{ + "shape":"AnalysisResultLocations", + "documentation":"

Contains one or more locations that you can use to locate the fields in your detector model that the analysis result references.

" + } + }, + "documentation":"

Contains the result of the analysis.

" + }, + "AnalysisResultLevel":{ + "type":"string", + "enum":[ + "INFO", + "WARNING", + "ERROR" + ] + }, + "AnalysisResultLocation":{ + "type":"structure", + "members":{ + "path":{ + "shape":"AnalysisResultLocationPath", + "documentation":"

A JsonPath expression that identifies the error field in your detector model.

" + } + }, + "documentation":"

Contains information that you can use to locate the field in your detector model that the analysis result references.

" + }, + "AnalysisResultLocationPath":{"type":"string"}, + "AnalysisResultLocations":{ + "type":"list", + "member":{"shape":"AnalysisResultLocation"} + }, + "AnalysisResults":{ + "type":"list", + "member":{"shape":"AnalysisResult"} + }, + "AnalysisStatus":{ + "type":"string", + "enum":[ + "RUNNING", + "COMPLETE", + "FAILED" + ] + }, + "AnalysisType":{"type":"string"}, "AssetId":{"type":"string"}, "AssetPropertyAlias":{"type":"string"}, "AssetPropertyBooleanValue":{"type":"string"}, @@ -377,14 +488,14 @@ "members":{ "timeInSeconds":{ "shape":"AssetPropertyTimeInSeconds", - "documentation":"

The timestamp, in seconds, in the Unix epoch format. The valid range is between 1-31556889864403199. You can also specify an expression.

" + "documentation":"

The timestamp, in seconds, in the Unix epoch format. The valid range is between 1-31556889864403199.

" }, "offsetInNanos":{ "shape":"AssetPropertyOffsetInNanos", - "documentation":"

The nanosecond offset converted from timeInSeconds. The valid range is between 0-999999999. You can also specify an expression.

" + "documentation":"

The nanosecond offset converted from timeInSeconds. The valid range is between 0-999999999.

" } }, - "documentation":"

A structure that contains timestamp information. For more information, see TimeInNanos in the AWS IoT SiteWise API Reference.

For parameters that are string data type, you can specify the following options:

" + "documentation":"

A structure that contains timestamp information. For more information, see TimeInNanos in the AWS IoT SiteWise API Reference.

You must use expressions for all parameters in AssetPropertyTimestamp. The expressions accept literals, operators, functions, references, and substitution templates.

Examples

For more information, see Expressions in the AWS IoT Events Developer Guide.

" }, "AssetPropertyValue":{ "type":"structure", @@ -400,32 +511,32 @@ }, "quality":{ "shape":"AssetPropertyQuality", - "documentation":"

The quality of the asset property value. The value must be GOOD, BAD, or UNCERTAIN. You can also specify an expression.

" + "documentation":"

The quality of the asset property value. The value must be 'GOOD', 'BAD', or 'UNCERTAIN'.

" } }, - "documentation":"

A structure that contains value information. For more information, see AssetPropertyValue in the AWS IoT SiteWise API Reference.

For parameters that are string data type, you can specify the following options:

" + "documentation":"

A structure that contains value information. For more information, see AssetPropertyValue in the AWS IoT SiteWise API Reference.

You must use expressions for all parameters in AssetPropertyValue. The expressions accept literals, operators, functions, references, and substitution templates.

Examples

For more information, see Expressions in the AWS IoT Events Developer Guide.

" }, "AssetPropertyVariant":{ "type":"structure", "members":{ "stringValue":{ "shape":"AssetPropertyStringValue", - "documentation":"

The asset property value is a string. You can also specify an expression. If you use an expression, the evaluated result should be a string.

" + "documentation":"

The asset property value is a string. You must use an expression, and the evaluated result should be a string.

" }, "integerValue":{ "shape":"AssetPropertyIntegerValue", - "documentation":"

The asset property value is an integer. You can also specify an expression. If you use an expression, the evaluated result should be an integer.

" + "documentation":"

The asset property value is an integer. You must use an expression, and the evaluated result should be an integer.

" }, "doubleValue":{ "shape":"AssetPropertyDoubleValue", - "documentation":"

The asset property value is a double. You can also specify an expression. If you use an expression, the evaluated result should be a double.

" + "documentation":"

The asset property value is a double. You must use an expression, and the evaluated result should be a double.

" }, "booleanValue":{ "shape":"AssetPropertyBooleanValue", - "documentation":"

The asset property value is a Boolean value that must be TRUE or FALSE. You can also specify an expression. If you use an expression, the evaluated result should be a Boolean value.

" + "documentation":"

The asset property value is a Boolean value that must be 'TRUE' or 'FALSE'. You must use an expression, and the evaluated result should be a Boolean value.

" } }, - "documentation":"

A structure that contains an asset property value. For more information, see Variant in the AWS IoT SiteWise API Reference.

You must specify one of the following value types, depending on the dataType of the specified asset property. For more information, see AssetProperty in the AWS IoT SiteWise API Reference.

For parameters that are string data type, you can specify the following options:

" + "documentation":"

A structure that contains an asset property value. For more information, see Variant in the AWS IoT SiteWise API Reference.

You must use expressions for all parameters in AssetPropertyVariant. The expressions accept literals, operators, functions, references, and substitution templates.

Examples

For more information, see Expressions in the AWS IoT Events Developer Guide.

You must specify one of the following value types, depending on the dataType of the specified asset property. For more information, see AssetProperty in the AWS IoT SiteWise API Reference.

" }, "Attribute":{ "type":"structure", @@ -585,6 +696,27 @@ } }, "DeliveryStreamName":{"type":"string"}, + "DescribeDetectorModelAnalysisRequest":{ + "type":"structure", + "required":["analysisId"], + "members":{ + "analysisId":{ + "shape":"AnalysisId", + "documentation":"

The ID of the analysis result that you want to retrieve.

", + "location":"uri", + "locationName":"analysisId" + } + } + }, + "DescribeDetectorModelAnalysisResponse":{ + "type":"structure", + "members":{ + "status":{ + "shape":"AnalysisStatus", + "documentation":"

The status of the analysis activity. The status can be one of the following values:

" + } + } + }, "DescribeDetectorModelRequest":{ "type":"structure", "required":["detectorModelName"], @@ -847,11 +979,11 @@ "members":{ "hashKeyType":{ "shape":"DynamoKeyType", - "documentation":"

The data type for the hash key (also called the partition key). You can specify the following values:

If you don't specify hashKeyType, the default value is STRING.

" + "documentation":"

The data type for the hash key (also called the partition key). You can specify the following values:

If you don't specify hashKeyType, the default value is 'STRING'.

" }, "hashKeyField":{ "shape":"DynamoKeyField", - "documentation":"

The name of the hash key (also called the partition key).

" + "documentation":"

The name of the hash key (also called the partition key). The hashKeyField value must match the partition key of the target DynamoDB table.

" }, "hashKeyValue":{ "shape":"DynamoKeyValue", @@ -859,11 +991,11 @@ }, "rangeKeyType":{ "shape":"DynamoKeyType", - "documentation":"

The data type for the range key (also called the sort key), You can specify the following values:

If you don't specify rangeKeyField, the default value is STRING.

" + "documentation":"

The data type for the range key (also called the sort key), You can specify the following values:

If you don't specify rangeKeyField, the default value is 'STRING'.

" }, "rangeKeyField":{ "shape":"DynamoKeyField", - "documentation":"

The name of the range key (also called the sort key).

" + "documentation":"

The name of the range key (also called the sort key). The rangeKeyField value must match the sort key of the target DynamoDB table.

" }, "rangeKeyValue":{ "shape":"DynamoKeyValue", @@ -871,7 +1003,7 @@ }, "operation":{ "shape":"DynamoOperation", - "documentation":"

The type of operation to perform. You can specify the following values:

If you don't specify this parameter, AWS IoT Events triggers the INSERT operation.

" + "documentation":"

The type of operation to perform. You can specify the following values:

If you don't specify this parameter, AWS IoT Events triggers the 'INSERT' operation.

" }, "payloadField":{ "shape":"DynamoKeyField", @@ -879,11 +1011,11 @@ }, "tableName":{ "shape":"DynamoTableName", - "documentation":"

The name of the DynamoDB table.

" + "documentation":"

The name of the DynamoDB table. The tableName value must match the table name of the target DynamoDB table.

" }, "payload":{"shape":"Payload"} }, - "documentation":"

Defines an action to write to the Amazon DynamoDB table that you created. The standard action payload contains all attribute-value pairs that have the information about the detector model instance and the event that triggered the action. You can also customize the payload. One column of the DynamoDB table receives all attribute-value pairs in the payload that you specify.

The tableName and hashKeyField values must match the table name and the partition key of the DynamoDB table.

If the DynamoDB table also has a sort key, you must specify rangeKeyField. The rangeKeyField value must match the sort key.

The hashKeyValue and rangeKeyValue use substitution templates. These templates provide data at runtime. The syntax is ${sql-expression}.

You can use expressions for parameters that are string data type. For more information, see Expressions in the AWS IoT Events Developer Guide.

If the defined payload type is a string, DynamoDBAction writes non-JSON data to the DynamoDB table as binary data. The DynamoDB console displays the data as Base64-encoded text. The payloadField is <payload-field>_raw.

" + "documentation":"

Defines an action to write to the Amazon DynamoDB table that you created. The standard action payload contains all the information about the detector model instance and the event that triggered the action. You can customize the payload. One column of the DynamoDB table receives all attribute-value pairs in the payload that you specify.

You must use expressions for all parameters in DynamoDBAction. The expressions accept literals, operators, functions, references, and substitution templates.

Examples

For more information, see Expressions in the AWS IoT Events Developer Guide.

If the defined payload type is a string, DynamoDBAction writes non-JSON data to the DynamoDB table as binary data. The DynamoDB console displays the data as Base64-encoded text. The value for the payloadField parameter is <payload-field>_raw.

" }, "DynamoDBv2Action":{ "type":"structure", @@ -895,7 +1027,7 @@ }, "payload":{"shape":"Payload"} }, - "documentation":"

Defines an action to write to the Amazon DynamoDB table that you created. The default action payload contains all attribute-value pairs that have the information about the detector model instance and the event that triggered the action. You can also customize the payload. A separate column of the DynamoDB table receives one attribute-value pair in the payload that you specify.

The type value for Payload must be JSON.

You can use expressions for parameters that are strings. For more information, see Expressions in the AWS IoT Events Developer Guide.

" + "documentation":"

Defines an action to write to the Amazon DynamoDB table that you created. The default action payload contains all the information about the detector model instance and the event that triggered the action. You can customize the payload. A separate column of the DynamoDB table receives one attribute-value pair in the payload that you specify.

You must use expressions for all parameters in DynamoDBv2Action. The expressions accept literals, operators, functions, references, and substitution templates.

Examples

For more information, see Expressions in the AWS IoT Events Developer Guide.

The value for the type parameter in Payload must be JSON.

" }, "DynamoKeyField":{"type":"string"}, "DynamoKeyType":{"type":"string"}, @@ -959,6 +1091,43 @@ "type":"string", "pattern":"([\\n\\t])|(\\r\\n)|(,)" }, + "GetDetectorModelAnalysisResultsRequest":{ + "type":"structure", + "required":["analysisId"], + "members":{ + "analysisId":{ + "shape":"AnalysisId", + "documentation":"

The ID of the analysis result that you want to retrieve.

", + "location":"uri", + "locationName":"analysisId" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token that you can use to return the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxAnalysisResults", + "documentation":"

The maximum number of results to be returned per request.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "GetDetectorModelAnalysisResultsResponse":{ + "type":"structure", + "members":{ + "analysisResults":{ + "shape":"AnalysisResults", + "documentation":"

Contains information about one or more analysis results.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token that you can use to return the next set of results, or null if there are no more results.

" + } + } + }, "Input":{ "type":"structure", "members":{ @@ -1121,26 +1290,26 @@ "members":{ "entryId":{ "shape":"AssetPropertyEntryId", - "documentation":"

A unique identifier for this entry. You can use the entry ID to track which data entry causes an error in case of failure. The default is a new unique identifier. You can also specify an expression.

" + "documentation":"

A unique identifier for this entry. You can use the entry ID to track which data entry causes an error in case of failure. The default is a new unique identifier.

" }, "assetId":{ "shape":"AssetId", - "documentation":"

The ID of the asset that has the specified property. You can specify an expression.

" + "documentation":"

The ID of the asset that has the specified property.

" }, "propertyId":{ "shape":"AssetPropertyId", - "documentation":"

The ID of the asset property. You can specify an expression.

" + "documentation":"

The ID of the asset property.

" }, "propertyAlias":{ "shape":"AssetPropertyAlias", - "documentation":"

The alias of the asset property. You can also specify an expression.

" + "documentation":"

The alias of the asset property.

" }, "propertyValue":{ "shape":"AssetPropertyValue", "documentation":"

The value to send to the asset property. This value contains timestamp, quality, and value (TQV) information.

" } }, - "documentation":"

Sends information about the detector model instance and the event that triggered the action to a specified asset property in AWS IoT SiteWise.

You must specify either propertyAlias or both assetId and propertyId to identify the target asset property in AWS IoT SiteWise.

For parameters that are string data type, you can specify the following options:

" + "documentation":"

Sends information about the detector model instance and the event that triggered the action to a specified asset property in AWS IoT SiteWise.

You must use expressions for all parameters in IotSiteWiseAction. The expressions accept literals, operators, functions, references, and substitutions templates.

Examples

You must specify either propertyAlias or both assetId and propertyId to identify the target asset property in AWS IoT SiteWise.

For more information, see Expressions in the AWS IoT Events Developer Guide.

" }, "IotTopicPublishAction":{ "type":"structure", @@ -1202,13 +1371,13 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

The token for the next set of results.

", + "documentation":"

The token that you can use to return the next set of results.

", "location":"querystring", "locationName":"nextToken" }, "maxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of results to return at one time.

", + "documentation":"

The maximum number of results to be returned per request.

", "location":"querystring", "locationName":"maxResults" } @@ -1223,7 +1392,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

A token to retrieve the next set of results, or null if there are no additional results.

" + "documentation":"

The token that you can use to return the next set of results, or null if there are no more results.

" } } }, @@ -1232,13 +1401,13 @@ "members":{ "nextToken":{ "shape":"NextToken", - "documentation":"

The token for the next set of results.

", + "documentation":"

The token that you can use to return the next set of results.

", "location":"querystring", "locationName":"nextToken" }, "maxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of results to return at one time.

", + "documentation":"

The maximum number of results to be returned per request.

", "location":"querystring", "locationName":"maxResults" } @@ -1253,7 +1422,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

A token to retrieve the next set of results, or null if there are no additional results.

" + "documentation":"

The token that you can use to return the next set of results, or null if there are no more results.

" } } }, @@ -1262,13 +1431,13 @@ "members":{ "nextToken":{ "shape":"NextToken", - "documentation":"

The token for the next set of results.

", + "documentation":"

The token that you can use to return the next set of results.

", "location":"querystring", "locationName":"nextToken" }, "maxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of results to return at one time.

", + "documentation":"

The maximum number of results to be returned per request.

", "location":"querystring", "locationName":"maxResults" } @@ -1283,7 +1452,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

A token to retrieve the next set of results, or null if there are no additional results.

" + "documentation":"

The token that you can use to return the next set of results, or null if there are no more results.

" } } }, @@ -1349,6 +1518,7 @@ "max":128, "min":1 }, + "MaxAnalysisResults":{"type":"integer"}, "MaxResults":{ "type":"integer", "max":250, @@ -1571,6 +1741,22 @@ }, "documentation":"

Sends information about the detector model instance and the event that triggered the action to an Amazon SQS queue.

" }, + "StartDetectorModelAnalysisRequest":{ + "type":"structure", + "required":["detectorModelDefinition"], + "members":{ + "detectorModelDefinition":{"shape":"DetectorModelDefinition"} + } + }, + "StartDetectorModelAnalysisResponse":{ + "type":"structure", + "members":{ + "analysisId":{ + "shape":"AnalysisId", + "documentation":"

The ID that you can use to retrieve the analysis result.

" + } + } + }, "State":{ "type":"structure", "required":["stateName"], diff --git a/botocore/data/iotsitewise/2019-12-02/service-2.json b/botocore/data/iotsitewise/2019-12-02/service-2.json index 7c67f5fd..8c0492ba 100644 --- a/botocore/data/iotsitewise/2019-12-02/service-2.json +++ b/botocore/data/iotsitewise/2019-12-02/service-2.json @@ -3610,6 +3610,17 @@ }, "documentation":"

Contains information for a group identity in an access policy.

" }, + "IAMRoleIdentity":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"ARN", + "documentation":"

The ARN of the IAM role. For more information, see IAM ARNs in the IAM User Guide.

" + } + }, + "documentation":"

Contains information about an AWS Identity and Access Management (IAM) role. For more information, see IAM roles in the IAM User Guide.

" + }, "IAMUserIdentity":{ "type":"structure", "required":["arn"], @@ -3647,6 +3658,10 @@ "iamUser":{ "shape":"IAMUserIdentity", "documentation":"

An IAM user identity.

" + }, + "iamRole":{ + "shape":"IAMRoleIdentity", + "documentation":"

An IAM role identity.

" } }, "documentation":"

Contains an identity that can access an AWS IoT SiteWise Monitor resource.

Currently, you can't use AWS APIs to retrieve AWS SSO identity IDs. You can find the AWS SSO identity IDs in the URL of user and group pages in the AWS SSO console.

" @@ -4232,7 +4247,7 @@ "type":"structure", "members":{ }, - "documentation":"

Contains an asset measurement property. This structure is empty. For more information, see Measurements in the AWS IoT SiteWise User Guide.

" + "documentation":"

Contains an asset measurement property. For more information, see Measurements in the AWS IoT SiteWise User Guide.

" }, "Metric":{ "type":"structure", diff --git a/botocore/data/iotwireless/2020-11-22/service-2.json b/botocore/data/iotwireless/2020-11-22/service-2.json index 23509c6a..14cca46e 100644 --- a/botocore/data/iotwireless/2020-11-22/service-2.json +++ b/botocore/data/iotwireless/2020-11-22/service-2.json @@ -990,6 +990,10 @@ "shape":"ClientRequestToken", "documentation":"

Each resource must have a unique client request token. If you try to create a new resource with the same token as a resource that already exists, an exception occurs. If you omit this value, AWS SDKs will automatically generate a unique client request.

", "idempotencyToken":true + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags attached to the specified resource. Tags are metadata that can be used to manage a resource

" } } }, @@ -999,6 +1003,10 @@ "Sidewalk":{ "shape":"SidewalkAccountInfo", "documentation":"

The Sidewalk account credentials.

" + }, + "Arn":{ + "shape":"PartnerAccountArn", + "documentation":"

The Amazon Resource Name of the resource.

" } } }, @@ -1351,6 +1359,10 @@ "shape":"ClientRequestToken", "documentation":"

Each resource must have a unique client request token. If you try to create a new resource with the same token as a resource that already exists, an exception occurs. If you omit this value, AWS SDKs will automatically generate a unique client request.

", "idempotencyToken":true + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags attached to the specified resource. Tags are metadata that can be used to manage a resource

" } } }, @@ -1360,6 +1372,10 @@ "Id":{ "shape":"WirelessGatewayTaskDefinitionId", "documentation":"

The ID of the new wireless gateway task definition.

" + }, + "Arn":{ + "shape":"WirelessGatewayTaskDefinitionArn", + "documentation":"

The Amazon Resource Name of the resource.

" } } }, @@ -2027,7 +2043,7 @@ }, "LoRaWANNetworkServerCertificateId":{ "shape":"IotCertificateId", - "documentation":"

The ID of the certificate associated with the wireless gateway and used for LoRaWANNetworkServer endpoint.

" + "documentation":"

The ID of the certificate that is associated with the wireless gateway and used for the LoRaWANNetworkServer endpoint.

" } } }, @@ -2157,6 +2173,10 @@ "Update":{ "shape":"UpdateWirelessGatewayTaskCreate", "documentation":"

Information about the gateways to update.

" + }, + "Arn":{ + "shape":"WirelessGatewayTaskDefinitionArn", + "documentation":"

The Amazon Resource Name of the resource.

" } } }, @@ -2946,6 +2966,7 @@ "max":32, "min":1 }, + "PartnerAccountArn":{"type":"string"}, "PartnerAccountId":{ "type":"string", "max":256 @@ -3076,6 +3097,7 @@ }, "Seq":{ "type":"integer", + "max":16383, "min":0 }, "ServiceProfile":{ @@ -3169,6 +3191,10 @@ "Fingerprint":{ "shape":"Fingerprint", "documentation":"

The fingerprint of the Sidewalk application server private key.

" + }, + "Arn":{ + "shape":"PartnerAccountArn", + "documentation":"

The Amazon Resource Name of the resource.

" } }, "documentation":"

Information about a Sidewalk account.

" @@ -3532,6 +3558,10 @@ "LoRaWAN":{ "shape":"LoRaWANUpdateGatewayTaskEntry", "documentation":"

The properties that relate to the LoRaWAN wireless gateway.

" + }, + "Arn":{ + "shape":"WirelessGatewayTaskDefinitionArn", + "documentation":"

The Amazon Resource Name of the resource.

" } }, "documentation":"

UpdateWirelessGatewayTaskEntry object.

" @@ -3669,6 +3699,7 @@ "type":"list", "member":{"shape":"WirelessGatewayStatistics"} }, + "WirelessGatewayTaskDefinitionArn":{"type":"string"}, "WirelessGatewayTaskDefinitionId":{ "type":"string", "max":36, diff --git a/botocore/data/ivs/2020-07-14/service-2.json b/botocore/data/ivs/2020-07-14/service-2.json index e4d60905..552108d3 100644 --- a/botocore/data/ivs/2020-07-14/service-2.json +++ b/botocore/data/ivs/2020-07-14/service-2.json @@ -69,7 +69,8 @@ "name":"DeleteChannel", "http":{ "method":"POST", - "requestUri":"/DeleteChannel" + "requestUri":"/DeleteChannel", + "responseCode":204 }, "input":{"shape":"DeleteChannelRequest"}, "errors":[ @@ -101,7 +102,8 @@ "name":"DeleteStreamKey", "http":{ "method":"POST", - "requestUri":"/DeleteStreamKey" + "requestUri":"/DeleteStreamKey", + "responseCode":204 }, "input":{"shape":"DeleteStreamKeyRequest"}, "errors":[ @@ -200,7 +202,8 @@ "output":{"shape":"ListChannelsResponse"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"} ], "documentation":"

Gets summary information about all channels in your account, in the AWS region where the API request is processed. This list can be filtered to match a specified string.

" }, @@ -1143,7 +1146,10 @@ "member":{"shape":"StreamSummary"} }, "StreamMetadata":{"type":"string"}, - "StreamStartTime":{"type":"timestamp"}, + "StreamStartTime":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, "StreamState":{ "type":"string", "enum":[ diff --git a/botocore/data/kinesis-video-archived-media/2017-09-30/service-2.json b/botocore/data/kinesis-video-archived-media/2017-09-30/service-2.json index 60f8b722..564c813c 100644 --- a/botocore/data/kinesis-video-archived-media/2017-09-30/service-2.json +++ b/botocore/data/kinesis-video-archived-media/2017-09-30/service-2.json @@ -30,7 +30,7 @@ {"shape":"InvalidMediaFrameException"}, {"shape":"NoDataRetentionException"} ], - "documentation":"

Downloads an MP4 file (clip) containing the archived, on-demand media from the specified video stream over the specified time range.

Both the StreamName and the StreamARN parameters are optional, but you must specify either the StreamName or the StreamARN when invoking this API operation.

As a prerequsite to using GetCLip API, you must obtain an endpoint using GetDataEndpoint, specifying GET_CLIP for the APIName parameter.

An Amazon Kinesis video stream has the following requirements for providing data through MP4:

You can monitor the amount of outgoing data by monitoring the GetClip.OutgoingBytes Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for outgoing AWS data apply.

" + "documentation":"

Downloads an MP4 file (clip) containing the archived, on-demand media from the specified video stream over the specified time range.

Both the StreamName and the StreamARN parameters are optional, but you must specify either the StreamName or the StreamARN when invoking this API operation.

As a prerequisite to using GetCLip API, you must obtain an endpoint using GetDataEndpoint, specifying GET_CLIP for the APIName parameter.

An Amazon Kinesis video stream has the following requirements for providing data through MP4:

You can monitor the amount of outgoing data by monitoring the GetClip.OutgoingBytes Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for outgoing AWS data apply.

" }, "GetDASHStreamingSessionURL":{ "name":"GetDASHStreamingSessionURL", @@ -50,7 +50,7 @@ {"shape":"MissingCodecPrivateDataException"}, {"shape":"InvalidCodecPrivateDataException"} ], - "documentation":"

Retrieves an MPEG Dynamic Adaptive Streaming over HTTP (DASH) URL for the stream. You can then open the URL in a media player to view the stream contents.

Both the StreamName and the StreamARN parameters are optional, but you must specify either the StreamName or the StreamARN when invoking this API operation.

An Amazon Kinesis video stream has the following requirements for providing data through MPEG-DASH:

The following procedure shows how to use MPEG-DASH with Kinesis Video Streams:

  1. Get an endpoint using GetDataEndpoint, specifying GET_DASH_STREAMING_SESSION_URL for the APIName parameter.

  2. Retrieve the MPEG-DASH URL using GetDASHStreamingSessionURL. Kinesis Video Streams creates an MPEG-DASH streaming session to be used for accessing content in a stream using the MPEG-DASH protocol. GetDASHStreamingSessionURL returns an authenticated URL (that includes an encrypted session token) for the session's MPEG-DASH manifest (the root resource needed for streaming with MPEG-DASH).

    Don't share or store this token where an unauthorized entity could access it. The token provides access to the content of the stream. Safeguard the token with the same measures that you would use with your AWS credentials.

    The media that is made available through the manifest consists only of the requested stream, time range, and format. No other media data (such as frames outside the requested window or alternate bitrates) is made available.

  3. Provide the URL (containing the encrypted session token) for the MPEG-DASH manifest to a media player that supports the MPEG-DASH protocol. Kinesis Video Streams makes the initialization fragment and media fragments available through the manifest URL. The initialization fragment contains the codec private data for the stream, and other data needed to set up the video or audio decoder and renderer. The media fragments contain encoded video frames or encoded audio samples.

  4. The media player receives the authenticated URL and requests stream metadata and media data normally. When the media player requests data, it calls the following actions:

    • GetDASHManifest: Retrieves an MPEG DASH manifest, which contains the metadata for the media that you want to playback.

    • GetMP4InitFragment: Retrieves the MP4 initialization fragment. The media player typically loads the initialization fragment before loading any media fragments. This fragment contains the \"fytp\" and \"moov\" MP4 atoms, and the child atoms that are needed to initialize the media player decoder.

      The initialization fragment does not correspond to a fragment in a Kinesis video stream. It contains only the codec private data for the stream and respective track, which the media player needs to decode the media frames.

    • GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments contain the \"moof\" and \"mdat\" MP4 atoms and their child atoms, containing the encoded fragment's media frames and their timestamps.

      After the first media fragment is made available in a streaming session, any fragments that don't contain the same codec private data cause an error to be returned when those different media fragments are loaded. Therefore, the codec private data should not change between fragments in a session. This also means that the session fails if the fragments in a stream change from having only video to having both audio and video.

      Data retrieved with this action is billable. See Pricing for details.

The following restrictions apply to MPEG-DASH sessions:

  • A streaming session URL should not be shared between players. The service might throttle a session if multiple media players are sharing it. For connection limits, see Kinesis Video Streams Limits.

  • A Kinesis video stream can have a maximum of ten active MPEG-DASH streaming sessions. If a new session is created when the maximum number of sessions is already active, the oldest (earliest created) session is closed. The number of active GetMedia connections on a Kinesis video stream does not count against this limit, and the number of active MPEG-DASH sessions does not count against the active GetMedia connection limit.

    The maximum limits for active HLS and MPEG-DASH streaming sessions are independent of each other.

You can monitor the amount of data that the media player consumes by monitoring the GetMP4MediaFragment.OutgoingBytes Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for both HLS sessions and outgoing AWS data apply.

For more information about HLS, see HTTP Live Streaming on the Apple Developer site.

If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:

  • x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides.

  • x-amz-RequestId HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.

Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.

For more information, see the Errors section at the bottom of this topic, as well as Common Errors.

" + "documentation":"

Retrieves an MPEG Dynamic Adaptive Streaming over HTTP (DASH) URL for the stream. You can then open the URL in a media player to view the stream contents.

Both the StreamName and the StreamARN parameters are optional, but you must specify either the StreamName or the StreamARN when invoking this API operation.

An Amazon Kinesis video stream has the following requirements for providing data through MPEG-DASH:

The following procedure shows how to use MPEG-DASH with Kinesis Video Streams:

  1. Get an endpoint using GetDataEndpoint, specifying GET_DASH_STREAMING_SESSION_URL for the APIName parameter.

  2. Retrieve the MPEG-DASH URL using GetDASHStreamingSessionURL. Kinesis Video Streams creates an MPEG-DASH streaming session to be used for accessing content in a stream using the MPEG-DASH protocol. GetDASHStreamingSessionURL returns an authenticated URL (that includes an encrypted session token) for the session's MPEG-DASH manifest (the root resource needed for streaming with MPEG-DASH).

    Don't share or store this token where an unauthorized entity can access it. The token provides access to the content of the stream. Safeguard the token with the same measures that you use with your AWS credentials.

    The media that is made available through the manifest consists only of the requested stream, time range, and format. No other media data (such as frames outside the requested window or alternate bitrates) is made available.

  3. Provide the URL (containing the encrypted session token) for the MPEG-DASH manifest to a media player that supports the MPEG-DASH protocol. Kinesis Video Streams makes the initialization fragment and media fragments available through the manifest URL. The initialization fragment contains the codec private data for the stream, and other data needed to set up the video or audio decoder and renderer. The media fragments contain encoded video frames or encoded audio samples.

  4. The media player receives the authenticated URL and requests stream metadata and media data normally. When the media player requests data, it calls the following actions:

    • GetDASHManifest: Retrieves an MPEG DASH manifest, which contains the metadata for the media that you want to playback.

    • GetMP4InitFragment: Retrieves the MP4 initialization fragment. The media player typically loads the initialization fragment before loading any media fragments. This fragment contains the \"fytp\" and \"moov\" MP4 atoms, and the child atoms that are needed to initialize the media player decoder.

      The initialization fragment does not correspond to a fragment in a Kinesis video stream. It contains only the codec private data for the stream and respective track, which the media player needs to decode the media frames.

    • GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments contain the \"moof\" and \"mdat\" MP4 atoms and their child atoms, containing the encoded fragment's media frames and their timestamps.

      After the first media fragment is made available in a streaming session, any fragments that don't contain the same codec private data cause an error to be returned when those different media fragments are loaded. Therefore, the codec private data should not change between fragments in a session. This also means that the session fails if the fragments in a stream change from having only video to having both audio and video.

      Data retrieved with this action is billable. See Pricing for details.

For restrictions that apply to MPEG-DASH sessions, see Kinesis Video Streams Limits.

You can monitor the amount of data that the media player consumes by monitoring the GetMP4MediaFragment.OutgoingBytes Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for both HLS sessions and outgoing AWS data apply.

For more information about HLS, see HTTP Live Streaming on the Apple Developer site.

If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:

  • x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides.

  • x-amz-RequestId HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.

Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.

For more information, see the Errors section at the bottom of this topic, as well as Common Errors.

" }, "GetHLSStreamingSessionURL":{ "name":"GetHLSStreamingSessionURL", @@ -70,7 +70,7 @@ {"shape":"MissingCodecPrivateDataException"}, {"shape":"InvalidCodecPrivateDataException"} ], - "documentation":"

Retrieves an HTTP Live Streaming (HLS) URL for the stream. You can then open the URL in a browser or media player to view the stream contents.

Both the StreamName and the StreamARN parameters are optional, but you must specify either the StreamName or the StreamARN when invoking this API operation.

An Amazon Kinesis video stream has the following requirements for providing data through HLS:

Kinesis Video Streams HLS sessions contain fragments in the fragmented MPEG-4 form (also called fMP4 or CMAF) or the MPEG-2 form (also called TS chunks, which the HLS specification also supports). For more information about HLS fragment types, see the HLS specification.

The following procedure shows how to use HLS with Kinesis Video Streams:

  1. Get an endpoint using GetDataEndpoint, specifying GET_HLS_STREAMING_SESSION_URL for the APIName parameter.

  2. Retrieve the HLS URL using GetHLSStreamingSessionURL. Kinesis Video Streams creates an HLS streaming session to be used for accessing content in a stream using the HLS protocol. GetHLSStreamingSessionURL returns an authenticated URL (that includes an encrypted session token) for the session's HLS master playlist (the root resource needed for streaming with HLS).

    Don't share or store this token where an unauthorized entity could access it. The token provides access to the content of the stream. Safeguard the token with the same measures that you would use with your AWS credentials.

    The media that is made available through the playlist consists only of the requested stream, time range, and format. No other media data (such as frames outside the requested window or alternate bitrates) is made available.

  3. Provide the URL (containing the encrypted session token) for the HLS master playlist to a media player that supports the HLS protocol. Kinesis Video Streams makes the HLS media playlist, initialization fragment, and media fragments available through the master playlist URL. The initialization fragment contains the codec private data for the stream, and other data needed to set up the video or audio decoder and renderer. The media fragments contain H.264-encoded video frames or AAC-encoded audio samples.

  4. The media player receives the authenticated URL and requests stream metadata and media data normally. When the media player requests data, it calls the following actions:

    • GetHLSMasterPlaylist: Retrieves an HLS master playlist, which contains a URL for the GetHLSMediaPlaylist action for each track, and additional metadata for the media player, including estimated bitrate and resolution.

    • GetHLSMediaPlaylist: Retrieves an HLS media playlist, which contains a URL to access the MP4 initialization fragment with the GetMP4InitFragment action, and URLs to access the MP4 media fragments with the GetMP4MediaFragment actions. The HLS media playlist also contains metadata about the stream that the player needs to play it, such as whether the PlaybackMode is LIVE or ON_DEMAND. The HLS media playlist is typically static for sessions with a PlaybackType of ON_DEMAND. The HLS media playlist is continually updated with new fragments for sessions with a PlaybackType of LIVE. There is a distinct HLS media playlist for the video track and the audio track (if applicable) that contains MP4 media URLs for the specific track.

    • GetMP4InitFragment: Retrieves the MP4 initialization fragment. The media player typically loads the initialization fragment before loading any media fragments. This fragment contains the \"fytp\" and \"moov\" MP4 atoms, and the child atoms that are needed to initialize the media player decoder.

      The initialization fragment does not correspond to a fragment in a Kinesis video stream. It contains only the codec private data for the stream and respective track, which the media player needs to decode the media frames.

    • GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments contain the \"moof\" and \"mdat\" MP4 atoms and their child atoms, containing the encoded fragment's media frames and their timestamps.

      After the first media fragment is made available in a streaming session, any fragments that don't contain the same codec private data cause an error to be returned when those different media fragments are loaded. Therefore, the codec private data should not change between fragments in a session. This also means that the session fails if the fragments in a stream change from having only video to having both audio and video.

      Data retrieved with this action is billable. See Pricing for details.

    • GetTSFragment: Retrieves MPEG TS fragments containing both initialization and media data for all tracks in the stream.

      If the ContainerFormat is MPEG_TS, this API is used instead of GetMP4InitFragment and GetMP4MediaFragment to retrieve stream media.

      Data retrieved with this action is billable. For more information, see Kinesis Video Streams pricing.

The following restrictions apply to HLS sessions:

  • A streaming session URL should not be shared between players. The service might throttle a session if multiple media players are sharing it. For connection limits, see Kinesis Video Streams Limits.

  • A Kinesis video stream can have a maximum of ten active HLS streaming sessions. If a new session is created when the maximum number of sessions is already active, the oldest (earliest created) session is closed. The number of active GetMedia connections on a Kinesis video stream does not count against this limit, and the number of active HLS sessions does not count against the active GetMedia connection limit.

    The maximum limits for active HLS and MPEG-DASH streaming sessions are independent of each other.

You can monitor the amount of data that the media player consumes by monitoring the GetMP4MediaFragment.OutgoingBytes Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for both HLS sessions and outgoing AWS data apply.

For more information about HLS, see HTTP Live Streaming on the Apple Developer site.

If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:

  • x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides.

  • x-amz-RequestId HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.

Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.

For more information, see the Errors section at the bottom of this topic, as well as Common Errors.

" + "documentation":"

Retrieves an HTTP Live Streaming (HLS) URL for the stream. You can then open the URL in a browser or media player to view the stream contents.

Both the StreamName and the StreamARN parameters are optional, but you must specify either the StreamName or the StreamARN when invoking this API operation.

An Amazon Kinesis video stream has the following requirements for providing data through HLS:

Kinesis Video Streams HLS sessions contain fragments in the fragmented MPEG-4 form (also called fMP4 or CMAF) or the MPEG-2 form (also called TS chunks, which the HLS specification also supports). For more information about HLS fragment types, see the HLS specification.

The following procedure shows how to use HLS with Kinesis Video Streams:

  1. Get an endpoint using GetDataEndpoint, specifying GET_HLS_STREAMING_SESSION_URL for the APIName parameter.

  2. Retrieve the HLS URL using GetHLSStreamingSessionURL. Kinesis Video Streams creates an HLS streaming session to be used for accessing content in a stream using the HLS protocol. GetHLSStreamingSessionURL returns an authenticated URL (that includes an encrypted session token) for the session's HLS master playlist (the root resource needed for streaming with HLS).

    Don't share or store this token where an unauthorized entity could access it. The token provides access to the content of the stream. Safeguard the token with the same measures that you would use with your AWS credentials.

    The media that is made available through the playlist consists only of the requested stream, time range, and format. No other media data (such as frames outside the requested window or alternate bitrates) is made available.

  3. Provide the URL (containing the encrypted session token) for the HLS master playlist to a media player that supports the HLS protocol. Kinesis Video Streams makes the HLS media playlist, initialization fragment, and media fragments available through the master playlist URL. The initialization fragment contains the codec private data for the stream, and other data needed to set up the video or audio decoder and renderer. The media fragments contain H.264-encoded video frames or AAC-encoded audio samples.

  4. The media player receives the authenticated URL and requests stream metadata and media data normally. When the media player requests data, it calls the following actions:

    • GetHLSMasterPlaylist: Retrieves an HLS master playlist, which contains a URL for the GetHLSMediaPlaylist action for each track, and additional metadata for the media player, including estimated bitrate and resolution.

    • GetHLSMediaPlaylist: Retrieves an HLS media playlist, which contains a URL to access the MP4 initialization fragment with the GetMP4InitFragment action, and URLs to access the MP4 media fragments with the GetMP4MediaFragment actions. The HLS media playlist also contains metadata about the stream that the player needs to play it, such as whether the PlaybackMode is LIVE or ON_DEMAND. The HLS media playlist is typically static for sessions with a PlaybackType of ON_DEMAND. The HLS media playlist is continually updated with new fragments for sessions with a PlaybackType of LIVE. There is a distinct HLS media playlist for the video track and the audio track (if applicable) that contains MP4 media URLs for the specific track.

    • GetMP4InitFragment: Retrieves the MP4 initialization fragment. The media player typically loads the initialization fragment before loading any media fragments. This fragment contains the \"fytp\" and \"moov\" MP4 atoms, and the child atoms that are needed to initialize the media player decoder.

      The initialization fragment does not correspond to a fragment in a Kinesis video stream. It contains only the codec private data for the stream and respective track, which the media player needs to decode the media frames.

    • GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments contain the \"moof\" and \"mdat\" MP4 atoms and their child atoms, containing the encoded fragment's media frames and their timestamps.

      After the first media fragment is made available in a streaming session, any fragments that don't contain the same codec private data cause an error to be returned when those different media fragments are loaded. Therefore, the codec private data should not change between fragments in a session. This also means that the session fails if the fragments in a stream change from having only video to having both audio and video.

      Data retrieved with this action is billable. See Pricing for details.

    • GetTSFragment: Retrieves MPEG TS fragments containing both initialization and media data for all tracks in the stream.

      If the ContainerFormat is MPEG_TS, this API is used instead of GetMP4InitFragment and GetMP4MediaFragment to retrieve stream media.

      Data retrieved with this action is billable. For more information, see Kinesis Video Streams pricing.

A streaming session URL must not be shared between players. The service might throttle a session if multiple media players are sharing it. For connection limits, see Kinesis Video Streams Limits.

You can monitor the amount of data that the media player consumes by monitoring the GetMP4MediaFragment.OutgoingBytes Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for both HLS sessions and outgoing AWS data apply.

For more information about HLS, see HTTP Live Streaming on the Apple Developer site.

If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:

  • x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides.

  • x-amz-RequestId HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.

Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.

For more information, see the Errors section at the bottom of this topic, as well as Common Errors.

" }, "GetMediaForFragmentList":{ "name":"GetMediaForFragmentList", @@ -86,7 +86,7 @@ {"shape":"ClientLimitExceededException"}, {"shape":"NotAuthorizedException"} ], - "documentation":"

Gets media for a list of fragments (specified by fragment number) from the archived data in an Amazon Kinesis video stream.

You must first call the GetDataEndpoint API to get an endpoint. Then send the GetMediaForFragmentList requests to this endpoint using the --endpoint-url parameter.

The following limits apply when using the GetMediaForFragmentList API:

If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:

  • x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides.

  • x-amz-RequestId HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.

Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.

For more information, see the Errors section at the bottom of this topic, as well as Common Errors.

" + "documentation":"

Gets media for a list of fragments (specified by fragment number) from the archived data in an Amazon Kinesis video stream.

You must first call the GetDataEndpoint API to get an endpoint. Then send the GetMediaForFragmentList requests to this endpoint using the --endpoint-url parameter.

For limits, see Kinesis Video Streams Limits.

If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:

  • x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides.

  • x-amz-RequestId HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.

Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.

For more information, see the Errors section at the bottom of this topic, as well as Common Errors.

" }, "ListFragments":{ "name":"ListFragments", @@ -111,7 +111,7 @@ "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"

Kinesis Video Streams has throttled the request because you have exceeded the limit of allowed client calls. Try making the call later.

", + "documentation":"

Kinesis Video Streams has throttled the request because you have exceeded a limit. Try making the call later. For information about limits, see Kinesis Video Streams Limits.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -149,14 +149,14 @@ "members":{ "StartTimestamp":{ "shape":"Timestamp", - "documentation":"

The starting timestamp in the range of timestamps for which to return fragments.

This value is inclusive. Fragments that start before the StartTimestamp and continue past it are included in the session. If FragmentSelectorType is SERVER_TIMESTAMP, the StartTimestamp must be later than the stream head.

" + "documentation":"

The starting timestamp in the range of timestamps for which to return fragments.

Only fragments that start exactly at or after StartTimestamp are included in the session. Fragments that start before StartTimestamp and continue past it aren't included in the session. If FragmentSelectorType is SERVER_TIMESTAMP, the StartTimestamp must be later than the stream head.

" }, "EndTimestamp":{ "shape":"Timestamp", - "documentation":"

The end of the timestamp range for the requested media.

This value must be within 3 hours of the specified StartTimestamp, and it must be later than the StartTimestamp value. If FragmentSelectorType for the request is SERVER_TIMESTAMP, this value must be in the past.

This value is inclusive. The EndTimestamp is compared to the (starting) timestamp of the fragment. Fragments that start before the EndTimestamp value and continue past it are included in the session.

" + "documentation":"

The end of the timestamp range for the requested media.

This value must be within 24 hours of the specified StartTimestamp, and it must be later than the StartTimestamp value. If FragmentSelectorType for the request is SERVER_TIMESTAMP, this value must be in the past.

This value is inclusive. The EndTimestamp is compared to the (starting) timestamp of the fragment. Fragments that start before the EndTimestamp value and continue past it are included in the session.

" } }, - "documentation":"

The range of timestamps for which to return fragments.

The values in the ClipTimestampRange are inclusive. Fragments that begin before the start time but continue past it, or fragments that begin before the end time but continue past it, are included in the session.

" + "documentation":"

The range of timestamps for which to return fragments.

" }, "ContainerFormat":{ "type":"string", @@ -206,6 +206,11 @@ "SERVER_TIMESTAMP" ] }, + "DASHMaxResults":{ + "type":"long", + "max":5000, + "min":1 + }, "DASHPlaybackMode":{ "type":"string", "enum":[ @@ -220,14 +225,14 @@ "members":{ "StartTimestamp":{ "shape":"Timestamp", - "documentation":"

The start of the timestamp range for the requested media.

If the DASHTimestampRange value is specified, the StartTimestamp value is required.

This value is inclusive. Fragments that start before the StartTimestamp and continue past it are included in the session. If FragmentSelectorType is SERVER_TIMESTAMP, the StartTimestamp must be later than the stream head.

" + "documentation":"

The start of the timestamp range for the requested media.

If the DASHTimestampRange value is specified, the StartTimestamp value is required.

Only fragments that start exactly at or after StartTimestamp are included in the session. Fragments that start before StartTimestamp and continue past it aren't included in the session. If FragmentSelectorType is SERVER_TIMESTAMP, the StartTimestamp must be later than the stream head.

" }, "EndTimestamp":{ "shape":"Timestamp", - "documentation":"

The end of the timestamp range for the requested media. This value must be within 3 hours of the specified StartTimestamp, and it must be later than the StartTimestamp value.

If FragmentSelectorType for the request is SERVER_TIMESTAMP, this value must be in the past.

The EndTimestamp value is required for ON_DEMAND mode, but optional for LIVE_REPLAY mode. If the EndTimestamp is not set for LIVE_REPLAY mode then the session will continue to include newly ingested fragments until the session expires.

This value is inclusive. The EndTimestamp is compared to the (starting) timestamp of the fragment. Fragments that start before the EndTimestamp value and continue past it are included in the session.

" + "documentation":"

The end of the timestamp range for the requested media. This value must be within 24 hours of the specified StartTimestamp, and it must be later than the StartTimestamp value.

If FragmentSelectorType for the request is SERVER_TIMESTAMP, this value must be in the past.

The EndTimestamp value is required for ON_DEMAND mode, but optional for LIVE_REPLAY mode. If the EndTimestamp is not set for LIVE_REPLAY mode then the session will continue to include newly ingested fragments until the session expires.

This value is inclusive. The EndTimestamp is compared to the (starting) timestamp of the fragment. Fragments that start before the EndTimestamp value and continue past it are included in the session.

" } }, - "documentation":"

The start and end of the timestamp range for the requested media.

This value should not be present if PlaybackType is LIVE.

The values in the DASHimestampRange are inclusive. Fragments that begin before the start time but continue past it, or fragments that begin before the end time but continue past it, are included in the session.

" + "documentation":"

The start and end of the timestamp range for the requested media.

This value should not be present if PlaybackType is LIVE.

The values in DASHimestampRange are inclusive. Fragments that start exactly at or after the start time are included in the session. Fragments that start before the start time and continue past it are not included in the session.

" }, "ErrorMessage":{"type":"string"}, "Expires":{ @@ -331,7 +336,7 @@ }, "Payload":{ "shape":"Payload", - "documentation":"

Traditional MP4 file that contains the media clip from the specified video stream. The output will contain the first 100 MB or the first 200 fragments from the specified start timestamp. For more information, see Kinesis Video Streams Limits.

" + "documentation":"

Traditional MP4 file that contains the media clip from the specified video stream. The output will contain the first 100 MB or the first 200 fragments from the specified start timestamp. For more information, see Kinesis Video Streams Limits.

" } }, "payload":"Payload" @@ -349,7 +354,7 @@ }, "PlaybackMode":{ "shape":"DASHPlaybackMode", - "documentation":"

Whether to retrieve live, live replay, or archived, on-demand data.

Features of the three types of sessions include the following:

In all playback modes, if FragmentSelectorType is PRODUCER_TIMESTAMP, and if there are multiple fragments with the same start timestamp, the fragment that has the larger fragment number (that is, the newer fragment) is included in the MPEG-DASH manifest. The other fragments are not included. Fragments that have different timestamps but have overlapping durations are still included in the MPEG-DASH manifest. This can lead to unexpected behavior in the media player.

The default is LIVE.

" + "documentation":"

Whether to retrieve live, live replay, or archived, on-demand data.

Features of the three types of sessions include the following:

In all playback modes, if FragmentSelectorType is PRODUCER_TIMESTAMP, and if there are multiple fragments with the same start timestamp, the fragment that has the larger fragment number (that is, the newer fragment) is included in the MPEG-DASH manifest. The other fragments are not included. Fragments that have different timestamps but have overlapping durations are still included in the MPEG-DASH manifest. This can lead to unexpected behavior in the media player.

The default is LIVE.

" }, "DisplayFragmentTimestamp":{ "shape":"DASHDisplayFragmentTimestamp", @@ -368,7 +373,7 @@ "documentation":"

The time in seconds until the requested session expires. This value can be between 300 (5 minutes) and 43200 (12 hours).

When a session expires, no new calls to GetDashManifest, GetMP4InitFragment, or GetMP4MediaFragment can be made for that session.

The default is 300 (5 minutes).

" }, "MaxManifestFragmentResults":{ - "shape":"PageLimit", + "shape":"DASHMaxResults", "documentation":"

The maximum number of fragments that are returned in the MPEG-DASH manifest.

When the PlaybackMode is LIVE, the most recent fragments are returned up to this value. When the PlaybackMode is ON_DEMAND, the oldest fragments are returned, up to this maximum number.

When there are a higher number of fragments available in a live MPEG-DASH manifest, video players often buffer content before starting playback. Increasing the buffer size increases the playback latency, but it decreases the likelihood that rebuffering will occur during playback. We recommend that a live MPEG-DASH manifest have a minimum of 3 fragments and a maximum of 10 fragments.

The default is 5 fragments if PlaybackMode is LIVE or LIVE_REPLAY, and 1,000 if PlaybackMode is ON_DEMAND.

The maximum value of 1,000 fragments corresponds to more than 16 minutes of video on streams with 1-second fragments, and more than 2 1/2 hours of video on streams with 10-second fragments.

" } } @@ -395,7 +400,7 @@ }, "PlaybackMode":{ "shape":"HLSPlaybackMode", - "documentation":"

Whether to retrieve live, live replay, or archived, on-demand data.

Features of the three types of sessions include the following:

In all playback modes, if FragmentSelectorType is PRODUCER_TIMESTAMP, and if there are multiple fragments with the same start timestamp, the fragment that has the larger fragment number (that is, the newer fragment) is included in the HLS media playlist. The other fragments are not included. Fragments that have different timestamps but have overlapping durations are still included in the HLS media playlist. This can lead to unexpected behavior in the media player.

The default is LIVE.

" + "documentation":"

Whether to retrieve live, live replay, or archived, on-demand data.

Features of the three types of sessions include the following:

In all playback modes, if FragmentSelectorType is PRODUCER_TIMESTAMP, and if there are multiple fragments with the same start timestamp, the fragment that has the largest fragment number (that is, the newest fragment) is included in the HLS media playlist. The other fragments are not included. Fragments that have different timestamps but have overlapping durations are still included in the HLS media playlist. This can lead to unexpected behavior in the media player.

The default is LIVE.

" }, "HLSFragmentSelector":{ "shape":"HLSFragmentSelector", @@ -418,8 +423,8 @@ "documentation":"

The time in seconds until the requested session expires. This value can be between 300 (5 minutes) and 43200 (12 hours).

When a session expires, no new calls to GetHLSMasterPlaylist, GetHLSMediaPlaylist, GetMP4InitFragment, GetMP4MediaFragment, or GetTSFragment can be made for that session.

The default is 300 (5 minutes).

" }, "MaxMediaPlaylistFragmentResults":{ - "shape":"PageLimit", - "documentation":"

The maximum number of fragments that are returned in the HLS media playlists.

When the PlaybackMode is LIVE, the most recent fragments are returned up to this value. When the PlaybackMode is ON_DEMAND, the oldest fragments are returned, up to this maximum number.

When there are a higher number of fragments available in a live HLS media playlist, video players often buffer content before starting playback. Increasing the buffer size increases the playback latency, but it decreases the likelihood that rebuffering will occur during playback. We recommend that a live HLS media playlist have a minimum of 3 fragments and a maximum of 10 fragments.

The default is 5 fragments if PlaybackMode is LIVE or LIVE_REPLAY, and 1,000 if PlaybackMode is ON_DEMAND.

The maximum value of 1,000 fragments corresponds to more than 16 minutes of video on streams with 1-second fragments, and more than 2 1/2 hours of video on streams with 10-second fragments.

" + "shape":"HLSMaxResults", + "documentation":"

The maximum number of fragments that are returned in the HLS media playlists.

When the PlaybackMode is LIVE, the most recent fragments are returned up to this value. When the PlaybackMode is ON_DEMAND, the oldest fragments are returned, up to this maximum number.

When there are a higher number of fragments available in a live HLS media playlist, video players often buffer content before starting playback. Increasing the buffer size increases the playback latency, but it decreases the likelihood that rebuffering will occur during playback. We recommend that a live HLS media playlist have a minimum of 3 fragments and a maximum of 10 fragments.

The default is 5 fragments if PlaybackMode is LIVE or LIVE_REPLAY, and 1,000 if PlaybackMode is ON_DEMAND.

The maximum value of 5,000 fragments corresponds to more than 80 minutes of video on streams with 1-second fragments, and more than 13 hours of video on streams with 10-second fragments.

" } } }, @@ -434,14 +439,15 @@ }, "GetMediaForFragmentListInput":{ "type":"structure", - "required":[ - "StreamName", - "Fragments" - ], + "required":["Fragments"], "members":{ "StreamName":{ "shape":"StreamName", - "documentation":"

The name of the stream from which to retrieve fragment media.

" + "documentation":"

The name of the stream from which to retrieve fragment media. Specify either this parameter or the StreamARN parameter.

" + }, + "StreamARN":{ + "shape":"ResourceARN", + "documentation":"

The Amazon Resource Name (ARN) of the stream from which to retrieve fragment media. Specify either this parameter or the StreamName parameter.

" }, "Fragments":{ "shape":"FragmentNumberList", @@ -501,6 +507,11 @@ "SERVER_TIMESTAMP" ] }, + "HLSMaxResults":{ + "type":"long", + "max":5000, + "min":1 + }, "HLSPlaybackMode":{ "type":"string", "enum":[ @@ -515,14 +526,14 @@ "members":{ "StartTimestamp":{ "shape":"Timestamp", - "documentation":"

The start of the timestamp range for the requested media.

If the HLSTimestampRange value is specified, the StartTimestamp value is required.

This value is inclusive. Fragments that start before the StartTimestamp and continue past it are included in the session. If FragmentSelectorType is SERVER_TIMESTAMP, the StartTimestamp must be later than the stream head.

" + "documentation":"

The start of the timestamp range for the requested media.

If the HLSTimestampRange value is specified, the StartTimestamp value is required.

Only fragments that start exactly at or after StartTimestamp are included in the session. Fragments that start before StartTimestamp and continue past it aren't included in the session. If FragmentSelectorType is SERVER_TIMESTAMP, the StartTimestamp must be later than the stream head.

" }, "EndTimestamp":{ "shape":"Timestamp", - "documentation":"

The end of the timestamp range for the requested media. This value must be within 3 hours of the specified StartTimestamp, and it must be later than the StartTimestamp value.

If FragmentSelectorType for the request is SERVER_TIMESTAMP, this value must be in the past.

The EndTimestamp value is required for ON_DEMAND mode, but optional for LIVE_REPLAY mode. If the EndTimestamp is not set for LIVE_REPLAY mode then the session will continue to include newly ingested fragments until the session expires.

This value is inclusive. The EndTimestamp is compared to the (starting) timestamp of the fragment. Fragments that start before the EndTimestamp value and continue past it are included in the session.

" + "documentation":"

The end of the timestamp range for the requested media. This value must be within 24 hours of the specified StartTimestamp, and it must be later than the StartTimestamp value.

If FragmentSelectorType for the request is SERVER_TIMESTAMP, this value must be in the past.

The EndTimestamp value is required for ON_DEMAND mode, but optional for LIVE_REPLAY mode. If the EndTimestamp is not set for LIVE_REPLAY mode then the session will continue to include newly ingested fragments until the session expires.

This value is inclusive. The EndTimestamp is compared to the (starting) timestamp of the fragment. Fragments that start before the EndTimestamp value and continue past it are included in the session.

" } }, - "documentation":"

The start and end of the timestamp range for the requested media.

This value should not be present if PlaybackType is LIVE.

The values in the HLSTimestampRange are inclusive. Fragments that begin before the start time but continue past it, or fragments that begin before the end time but continue past it, are included in the session.

" + "documentation":"

The start and end of the timestamp range for the requested media.

This value should not be present if PlaybackType is LIVE.

" }, "InvalidArgumentException":{ "type":"structure", @@ -553,14 +564,17 @@ }, "ListFragmentsInput":{ "type":"structure", - "required":["StreamName"], "members":{ "StreamName":{ "shape":"StreamName", - "documentation":"

The name of the stream from which to retrieve a fragment list.

" + "documentation":"

The name of the stream from which to retrieve a fragment list. Specify either this parameter or the StreamARN parameter.

" + }, + "StreamARN":{ + "shape":"ResourceARN", + "documentation":"

The Amazon Resource Name (ARN) of the stream from which to retrieve a fragment list. Specify either this parameter or the StreamName parameter.

" }, "MaxResults":{ - "shape":"PageLimit", + "shape":"ListFragmentsMaxResults", "documentation":"

The total number of fragments to return. If the total number of fragments available is more than the value specified in max-results, then a ListFragmentsOutput$NextToken is provided in the output that you can use to resume pagination.

" }, "NextToken":{ @@ -573,6 +587,11 @@ } } }, + "ListFragmentsMaxResults":{ + "type":"long", + "max":1000, + "min":1 + }, "ListFragmentsOutput":{ "type":"structure", "members":{ @@ -620,11 +639,6 @@ "error":{"httpStatusCode":401}, "exception":true }, - "PageLimit":{ - "type":"long", - "max":1000, - "min":1 - }, "Payload":{ "type":"blob", "streaming":true @@ -633,7 +647,7 @@ "type":"string", "max":1024, "min":1, - "pattern":"arn:aws:kinesisvideo:[a-z0-9-]+:[0-9]+:[a-z]+/[a-zA-Z0-9_.-]+/[0-9]+" + "pattern":"arn:[a-z\\d-]+:kinesisvideo:[a-z0-9-]+:[0-9]+:[a-z]+/[a-zA-Z0-9_.-]+/[0-9]+" }, "ResourceNotFoundException":{ "type":"structure", diff --git a/botocore/data/lambda/2015-03-31/service-2.json b/botocore/data/lambda/2015-03-31/service-2.json index 4eedb22b..9333eb28 100644 --- a/botocore/data/lambda/2015-03-31/service-2.json +++ b/botocore/data/lambda/2015-03-31/service-2.json @@ -646,7 +646,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"InvalidParameterValueException"} ], - "documentation":"

Returns a list of Lambda functions, with the version-specific configuration of each. Lambda returns up to 50 functions per call.

Set FunctionVersion to ALL to include all published versions of each function in addition to the unpublished version. To get more information about a function or version, use GetFunction.

" + "documentation":"

Returns a list of Lambda functions, with the version-specific configuration of each. Lambda returns up to 50 functions per call.

Set FunctionVersion to ALL to include all published versions of each function in addition to the unpublished version.

The ListFunctions action returns a subset of the FunctionConfiguration fields. To get the additional fields (State, StateReasonCode, StateReason, LastUpdateStatus, LastUpdateStatusReason, LastUpdateStatusReasonCode) for a function or version, use GetFunction.

" }, "ListFunctionsByCodeSigningConfig":{ "name":"ListFunctionsByCodeSigningConfig", @@ -1525,7 +1525,7 @@ }, "TumblingWindowInSeconds":{ "shape":"TumblingWindowInSeconds", - "documentation":"

(Streams) The duration of a processing window in seconds. The range is between 1 second up to 15 minutes.

" + "documentation":"

(Streams) The duration in seconds of a processing window. The range is between 1 second up to 900 seconds.

" }, "Topics":{ "shape":"Topics", @@ -1631,7 +1631,7 @@ }, "ImageConfig":{ "shape":"ImageConfig", - "documentation":"

Configuration values that override the container image Dockerfile.

" + "documentation":"

Container image configuration values that override the values in the container image Dockerfile.

" }, "CodeSigningConfigArn":{ "shape":"CodeSigningConfigArn", @@ -2070,7 +2070,7 @@ }, "TumblingWindowInSeconds":{ "shape":"TumblingWindowInSeconds", - "documentation":"

(Streams) The duration of a processing window in seconds. The range is between 1 second up to 15 minutes.

" + "documentation":"

(Streams) The duration in seconds of a processing window. The range is between 1 second up to 900 seconds.

" }, "FunctionResponseTypes":{ "shape":"FunctionResponseTypeList", @@ -2358,7 +2358,7 @@ "type":"list", "member":{"shape":"FunctionResponseType"}, "max":1, - "min":1 + "min":0 }, "FunctionVersion":{ "type":"string", @@ -2769,7 +2769,7 @@ "documentation":"

Specifies the working directory.

" } }, - "documentation":"

Configuration values that override the container image Dockerfile settings. See Container settings.

" + "documentation":"

Configuration values that override the container image Dockerfile settings. See Container settings.

" }, "ImageConfigError":{ "type":"structure", @@ -3442,7 +3442,7 @@ }, "MaxItems":{ "shape":"MaxListItems", - "documentation":"

The maximum number of functions to return.

", + "documentation":"

The maximum number of functions to return in the response. Note that ListFunctions returns a maximum of 50 items in each response, even if you set the number higher.

", "location":"querystring", "locationName":"MaxItems" } @@ -4249,6 +4249,7 @@ "nodejs8.10", "nodejs10.x", "nodejs12.x", + "nodejs14.x", "java8", "java8.al2", "java11", @@ -4325,7 +4326,7 @@ "members":{ "Type":{ "shape":"SourceAccessType", - "documentation":"

The type of authentication protocol or the VPC components for your event source. For example: \"Type\":\"SASL_SCRAM_512_AUTH\".

" + "documentation":"

The type of authentication protocol or the VPC components for your event source. For example: \"Type\":\"SASL_SCRAM_512_AUTH\".

" }, "URI":{ "shape":"URI", @@ -4338,7 +4339,7 @@ "type":"list", "member":{"shape":"SourceAccessConfiguration"}, "max":22, - "min":1 + "min":0 }, "SourceAccessType":{ "type":"string", @@ -4680,7 +4681,7 @@ }, "TumblingWindowInSeconds":{ "shape":"TumblingWindowInSeconds", - "documentation":"

(Streams) The duration of a processing window in seconds. The range is between 1 second up to 15 minutes.

" + "documentation":"

(Streams) The duration in seconds of a processing window. The range is between 1 second up to 900 seconds.

" }, "FunctionResponseTypes":{ "shape":"FunctionResponseTypeList", @@ -4800,7 +4801,7 @@ }, "ImageConfig":{ "shape":"ImageConfig", - "documentation":"

Configuration values that override the container image Dockerfile.

" + "documentation":"

Container image configuration values that override the values in the container image Dockerfile.

" } } }, diff --git a/botocore/data/license-manager/2018-08-01/service-2.json b/botocore/data/license-manager/2018-08-01/service-2.json index 10a548ff..a065aa9d 100644 --- a/botocore/data/license-manager/2018-08-01/service-2.json +++ b/botocore/data/license-manager/2018-08-01/service-2.json @@ -3052,7 +3052,6 @@ "type":"structure", "required":[ "ProductInformationFilterName", - "ProductInformationFilterValue", "ProductInformationFilterComparator" ], "members":{ diff --git a/botocore/data/lightsail/2016-11-28/service-2.json b/botocore/data/lightsail/2016-11-28/service-2.json index 507b296c..61cd3d18 100644 --- a/botocore/data/lightsail/2016-11-28/service-2.json +++ b/botocore/data/lightsail/2016-11-28/service-2.json @@ -178,7 +178,7 @@ {"shape":"AccessDeniedException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Creates an SSL/TLS certificate for a Amazon Lightsail content delivery network (CDN) distribution.

After the certificate is created, use the AttachCertificateToDistribution action to attach the certificate to your distribution.

Only certificates created in the us-east-1 AWS Region can be attached to Lightsail distributions. Lightsail distributions are global resources that can reference an origin in any AWS Region, and distribute its content globally. However, all distributions are located in the us-east-1 Region.

" + "documentation":"

Creates an SSL/TLS certificate for an Amazon Lightsail content delivery network (CDN) distribution and a container service.

After the certificate is valid, use the AttachCertificateToDistribution action to use the certificate and its domains with your distribution. Or use the UpdateContainerService action to use the certificate and its domains with your container service.

Only certificates created in the us-east-1 AWS Region can be attached to Lightsail distributions. Lightsail distributions are global resources that can reference an origin in any AWS Region, and distribute its content globally. However, all distributions are located in the us-east-1 Region.

" }, "CreateCloudFormationStack":{ "name":"CreateCloudFormationStack", @@ -493,7 +493,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Creates a Lightsail load balancer TLS certificate.

TLS is just an updated, more secure version of Secure Socket Layer (SSL).

The CreateLoadBalancerTlsCertificate operation supports tag-based access control via resource tags applied to the resource identified by load balancer name. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Creates an SSL/TLS certificate for an Amazon Lightsail load balancer.

TLS is just an updated, more secure version of Secure Socket Layer (SSL).

The CreateLoadBalancerTlsCertificate operation supports tag-based access control via resource tags applied to the resource identified by load balancer name. For more information, see the Lightsail Dev Guide.

" }, "CreateRelationalDatabase":{ "name":"CreateRelationalDatabase", @@ -2295,7 +2295,7 @@ {"shape":"AccessDeniedException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Registers a container image to your Amazon Lightsail container service.

This action is not required if you install and use the Lightsail Control (lightsailctl) plugin to push container images to your Lightsail container service. For more information, see Pushing and managing container images on your Amazon Lightsail container services in the Lightsail Dev Guide.

" + "documentation":"

Registers a container image to your Amazon Lightsail container service.

This action is not required if you install and use the Lightsail Control (lightsailctl) plugin to push container images to your Lightsail container service. For more information, see Pushing and managing container images on your Amazon Lightsail container services in the Lightsail Dev Guide.

" }, "ReleaseStaticIp":{ "name":"ReleaseStaticIp", @@ -2369,7 +2369,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Sets the IP address type for a Amazon Lightsail resource.

Use this action to enable dual-stack for a resource, which enables IPv4 and IPv6 for the specified resource. Alternately, you can use this action to disable dual-stack, and enable IPv4 only.

" + "documentation":"

Sets the IP address type for an Amazon Lightsail resource.

Use this action to enable dual-stack for a resource, which enables IPv4 and IPv6 for the specified resource. Alternately, you can use this action to disable dual-stack, and enable IPv4 only.

" }, "StartInstance":{ "name":"StartInstance", @@ -3783,23 +3783,23 @@ "members":{ "healthyThreshold":{ "shape":"integer", - "documentation":"

The number of consecutive health checks successes required before moving the container to the Healthy state.

" + "documentation":"

The number of consecutive health checks successes required before moving the container to the Healthy state. The default value is 2.

" }, "unhealthyThreshold":{ "shape":"integer", - "documentation":"

The number of consecutive health check failures required before moving the container to the Unhealthy state.

" + "documentation":"

The number of consecutive health check failures required before moving the container to the Unhealthy state. The default value is 2.

" }, "timeoutSeconds":{ "shape":"integer", - "documentation":"

The amount of time, in seconds, during which no response means a failed health check. You may specify between 2 and 60 seconds.

" + "documentation":"

The amount of time, in seconds, during which no response means a failed health check. You can specify between 2 and 60 seconds. The default value is 2.

" }, "intervalSeconds":{ "shape":"integer", - "documentation":"

The approximate interval, in seconds, between health checks of an individual container. You may specify between 5 and 300 seconds.

" + "documentation":"

The approximate interval, in seconds, between health checks of an individual container. You can specify between 5 and 300 seconds. The default value is 5.

" }, "path":{ "shape":"string", - "documentation":"

The path on the container on which to perform the health check.

" + "documentation":"

The path on the container on which to perform the health check. The default value is /.

" }, "successCodes":{ "shape":"string", @@ -4764,7 +4764,7 @@ "members":{ "relationalDatabaseName":{ "shape":"ResourceName", - "documentation":"

The name to use for your new database.

Constraints:

" + "documentation":"

The name to use for your new Lightsail database resource.

Constraints:

" }, "availabilityZone":{ "shape":"string", @@ -5535,7 +5535,7 @@ }, "progress":{ "shape":"string", - "documentation":"

The progress of the disk snapshot operation.

" + "documentation":"

The progress of the snapshot.

" }, "fromDiskName":{ "shape":"ResourceName", @@ -7921,7 +7921,7 @@ }, "progress":{ "shape":"string", - "documentation":"

The progress of the snapshot.

" + "documentation":"

The progress of the snapshot.

This is populated only for disk snapshots, and is null for instance snapshots.

" }, "fromAttachedDisks":{ "shape":"DiskList", @@ -8351,7 +8351,7 @@ }, "failureReason":{ "shape":"LoadBalancerTlsCertificateFailureReason", - "documentation":"

The validation failure reason, if any, of the certificate.

The following failure reasons are possible:

" + "documentation":"

The validation failure reason, if any, of the certificate.

The following failure reasons are possible:

" }, "issuedAt":{ "shape":"IsoDate", diff --git a/botocore/data/lookoutvision/2020-11-20/service-2.json b/botocore/data/lookoutvision/2020-11-20/service-2.json index ffe7e7c0..ad02799f 100644 --- a/botocore/data/lookoutvision/2020-11-20/service-2.json +++ b/botocore/data/lookoutvision/2020-11-20/service-2.json @@ -88,7 +88,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Deletes an existing Amazon Lookout for Vision dataset.

If your the project has a single dataset, you must create a new dataset before you can create a model.

If you project has a training dataset and a test dataset consider the following.

It might take a while to delete the dataset. To check the current status, check the Status field in the response from a call to DescribeDataset.

This operation requires permissions to perform the lookoutvision:DeleteDataset operation.

" + "documentation":"

Deletes an existing Amazon Lookout for Vision dataset.

If your the project has a single dataset, you must create a new dataset before you can create a model.

If you project has a training dataset and a test dataset consider the following.

This operation requires permissions to perform the lookoutvision:DeleteDataset operation.

" }, "DeleteModel":{ "name":"DeleteModel", @@ -107,7 +107,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Deletes an Amazon Lookout for Vision model. You can't delete a running model. To stop a running model, use the StopModel operation.

This operation requires permissions to perform the lookoutvision:DeleteModel operation.

" + "documentation":"

Deletes an Amazon Lookout for Vision model. You can't delete a running model. To stop a running model, use the StopModel operation.

It might take a few seconds to delete a model. To determine if a model has been deleted, call ListProjects and check if the version of the model (ModelVersion) is in the Models array.

This operation requires permissions to perform the lookoutvision:DeleteModel operation.

" }, "DeleteProject":{ "name":"DeleteProject", @@ -289,7 +289,7 @@ {"shape":"ThrottlingException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Starts the running of the version of an Amazon Lookout for Vision model. Starting a model takes a while to complete. To check the current state of the model, use DescribeModel.

Once the model is running, you can detect custom labels in new images by calling DetectAnomalies.

You are charged for the amount of time that the model is running. To stop a running model, call StopModel.

This operation requires permissions to perform the lookoutvision:StartModel operation.

" + "documentation":"

Starts the running of the version of an Amazon Lookout for Vision model. Starting a model takes a while to complete. To check the current state of the model, use DescribeModel.

A model is ready to use when its status is HOSTED.

Once the model is running, you can detect custom labels in new images by calling DetectAnomalies.

You are charged for the amount of time that the model is running. To stop a running model, call StopModel.

This operation requires permissions to perform the lookoutvision:StartModel operation.

" }, "StopModel":{ "name":"StopModel", @@ -308,7 +308,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Stops a running model. The operation might take a while to complete. To check the current status, call DescribeModel.

This operation requires permissions to perform the lookoutvision:StopModel operation.

" + "documentation":"

Stops the hosting of a running model. The operation might take a while to complete. To check the current status, call DescribeModel.

After the model hosting stops, the Status of the model is TRAINED.

This operation requires permissions to perform the lookoutvision:StopModel operation.

" }, "TagResource":{ "name":"TagResource", @@ -488,7 +488,7 @@ }, "KmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The identifier of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use for encypting the model. If this parameter is not specified, the model is encrypted by a key that AWS owns and manages.

" + "documentation":"

The identifier for your AWS Key Management Service (AWS KMS) customer master key (CMK). The key is used to encrypt training and test images copied into the service for model training. Your source images are unaffected. If this parameter is not specified, the copied images are encrypted by a key that AWS owns and manages.

" }, "Tags":{ "shape":"TagList", @@ -879,7 +879,7 @@ "shape":"ContentType", "documentation":"

The type of the image passed in Body. Valid values are image/png (PNG format images) and image/jpeg (JPG format images).

", "location":"header", - "locationName":"content-type" + "locationName":"Content-Type" } }, "payload":"Body" @@ -1206,10 +1206,11 @@ "ModelHostingStatus":{ "type":"string", "enum":[ - "RUNNING", - "STARTING", - "STOPPED", - "FAILED" + "STARTING_HOSTING", + "HOSTED", + "HOSTING_FAILED", + "STOPPING_HOSTING", + "SYSTEM_UPDATING" ] }, "ModelMetadata":{ @@ -1434,14 +1435,14 @@ "members":{ "Bucket":{ "shape":"S3BucketName", - "documentation":"

The S3 bucket that contain the manifest file.

" + "documentation":"

The S3 bucket that contains the training output.

" }, "Prefix":{ "shape":"S3KeyPrefix", - "documentation":"

The path and name of the manifest file with the S3 bucket.

" + "documentation":"

The path of the folder, within the S3 bucket, that contains the training output.

" } }, - "documentation":"

Information about the location of a manifest file.

" + "documentation":"

Information about the location training output.

" }, "S3ObjectKey":{ "type":"string", diff --git a/botocore/data/macie/2017-12-19/service-2.json b/botocore/data/macie/2017-12-19/service-2.json index 194313b7..5d040fd1 100644 --- a/botocore/data/macie/2017-12-19/service-2.json +++ b/botocore/data/macie/2017-12-19/service-2.json @@ -40,7 +40,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalException"} ], - "documentation":"

Associates specified S3 resources with Amazon Macie Classic for monitoring and data classification. If memberAccountId isn't specified, the action associates specified S3 resources with Macie Classic for the current master account. If memberAccountId is specified, the action associates specified S3 resources with Macie Classic for the specified member account.

" + "documentation":"

Associates specified S3 resources with Amazon Macie Classic for monitoring and data classification. If memberAccountId isn't specified, the action associates specified S3 resources with Macie Classic for the current Macie Classic administrator account. If memberAccountId is specified, the action associates specified S3 resources with Macie Classic for the specified member account.

" }, "DisassociateMemberAccount":{ "name":"DisassociateMemberAccount", @@ -68,7 +68,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalException"} ], - "documentation":"

Removes specified S3 resources from being monitored by Amazon Macie Classic. If memberAccountId isn't specified, the action removes specified S3 resources from Macie Classic for the current master account. If memberAccountId is specified, the action removes specified S3 resources from Macie Classic for the specified member account.

" + "documentation":"

Removes specified S3 resources from being monitored by Amazon Macie Classic. If memberAccountId isn't specified, the action removes specified S3 resources from Macie Classic for the current Macie Classic administrator account. If memberAccountId is specified, the action removes specified S3 resources from Macie Classic for the specified member account.

" }, "ListMemberAccounts":{ "name":"ListMemberAccounts", @@ -82,7 +82,7 @@ {"shape":"InternalException"}, {"shape":"InvalidInputException"} ], - "documentation":"

Lists all Amazon Macie Classic member accounts for the current Amazon Macie Classic master account.

" + "documentation":"

Lists all Amazon Macie Classic member accounts for the current Macie Classic administrator account.

" }, "ListS3Resources":{ "name":"ListS3Resources", @@ -97,7 +97,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalException"} ], - "documentation":"

Lists all the S3 resources associated with Amazon Macie Classic. If memberAccountId isn't specified, the action lists the S3 resources associated with Amazon Macie Classic for the current master account. If memberAccountId is specified, the action lists the S3 resources associated with Amazon Macie Classic for the specified member account.

" + "documentation":"

Lists all the S3 resources associated with Amazon Macie Classic. If memberAccountId isn't specified, the action lists the S3 resources associated with Macie Classic for the current Macie Classic administrator account. If memberAccountId is specified, the action lists the S3 resources associated with Macie Classic for the specified member account.

" }, "UpdateS3Resources":{ "name":"UpdateS3Resources", @@ -112,7 +112,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalException"} ], - "documentation":"

Updates the classification types for the specified S3 resources. If memberAccountId isn't specified, the action updates the classification types of the S3 resources associated with Amazon Macie Classic for the current master account. If memberAccountId is specified, the action updates the classification types of the S3 resources associated with Amazon Macie Classic for the specified member account.

" + "documentation":"

Updates the classification types for the specified S3 resources. If memberAccountId isn't specified, the action updates the classification types of the S3 resources associated with Amazon Macie Classic for the current Macie Classic administrator account. If memberAccountId is specified, the action updates the classification types of the S3 resources associated with Macie Classic for the specified member account.

" } }, "shapes":{ @@ -179,7 +179,7 @@ }, "continuous":{ "shape":"S3ContinuousClassificationType", - "documentation":"

A continuous classification of the objects that are added to a specified S3 bucket. Amazon Macie Classic begins performing continuous classification after a bucket is successfully associated with Amazon Macie Classic.

" + "documentation":"

A continuous classification of the objects that are added to a specified S3 bucket. Amazon Macie Classic begins performing continuous classification after a bucket is successfully associated with Macie Classic.

" } }, "documentation":"

The classification type that Amazon Macie Classic applies to the associated S3 resources.

" @@ -193,7 +193,7 @@ }, "continuous":{ "shape":"S3ContinuousClassificationType", - "documentation":"

A continuous classification of the objects that are added to a specified S3 bucket. Amazon Macie Classic begins performing continuous classification after a bucket is successfully associated with Amazon Macie Classic.

" + "documentation":"

A continuous classification of the objects that are added to a specified S3 bucket. Amazon Macie Classic begins performing continuous classification after a bucket is successfully associated with Macie Classic.

" } }, "documentation":"

The classification type that Amazon Macie Classic applies to the associated S3 resources. At least one of the classification types (oneTime or continuous) must be specified.

" @@ -214,7 +214,7 @@ "members":{ "memberAccountId":{ "shape":"AWSAccountId", - "documentation":"

The ID of the Amazon Macie Classic member account whose resources you want to remove from being monitored by Amazon Macie Classic.

" + "documentation":"

The ID of the Amazon Macie Classic member account whose resources you want to remove from being monitored by Macie Classic.

" }, "associatedS3Resources":{ "shape":"S3Resources", @@ -315,7 +315,7 @@ "members":{ "memberAccounts":{ "shape":"MemberAccounts", - "documentation":"

A list of the Amazon Macie Classic member accounts returned by the action. The current master account is also included in this list.

" + "documentation":"

A list of the Amazon Macie Classic member accounts returned by the action. The current Macie Classic administrator account is also included in this list.

" }, "nextToken":{ "shape":"NextToken", @@ -491,5 +491,5 @@ } } }, - "documentation":"Amazon Macie Classic

Amazon Macie Classic is a security service that uses machine learning to automatically discover, classify, and protect sensitive data in AWS. Macie Classic recognizes sensitive data such as personally identifiable information (PII) or intellectual property, and provides you with dashboards and alerts that give visibility into how this data is being accessed or moved. For more information, see the Amazon Macie Classic User Guide.

A new Amazon Macie is now available with significant design improvements and additional features, at a lower price and in most AWS Regions. We encourage you to explore and use the new and improved features, and benefit from the reduced cost. To learn about features and pricing for the new Amazon Macie, see Amazon Macie.

" + "documentation":"Amazon Macie Classic

Amazon Macie Classic is a security service that uses machine learning to automatically discover, classify, and protect sensitive data in AWS. Macie Classic recognizes sensitive data such as personally identifiable information (PII) or intellectual property, and provides you with dashboards and alerts that give visibility into how this data is being accessed or moved. For more information, see the Amazon Macie Classic User Guide.

" } diff --git a/botocore/data/macie2/2020-01-01/paginators-1.json b/botocore/data/macie2/2020-01-01/paginators-1.json index 0ab35987..46597839 100644 --- a/botocore/data/macie2/2020-01-01/paginators-1.json +++ b/botocore/data/macie2/2020-01-01/paginators-1.json @@ -10,7 +10,10 @@ "input_token": "nextToken", "output_token": "nextToken", "limit_key": "maxResults", - "result_key": "records" + "result_key": "records", + "non_aggregate_keys": [ + "timeRange" + ] }, "ListClassificationJobs": { "input_token": "nextToken", diff --git a/botocore/data/macie2/2020-01-01/service-2.json b/botocore/data/macie2/2020-01-01/service-2.json index a1b163ad..8a2742d7 100644 --- a/botocore/data/macie2/2020-01-01/service-2.json +++ b/botocore/data/macie2/2020-01-01/service-2.json @@ -284,7 +284,7 @@ "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" } ], - "documentation": "

Sends an Amazon Macie membership invitation to one or more accounts.

" + "documentation": "

Sends an Amazon Macie membership invitation to one or more accounts.

" }, "CreateMember": { "name": "CreateMember", @@ -330,7 +330,7 @@ "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" } ], - "documentation": "

Associates an account with an Amazon Macie master account.

" + "documentation": "

Associates an account with an Amazon Macie administrator account.

" }, "CreateSampleFindings": { "name": "CreateSampleFindings", @@ -606,7 +606,7 @@ "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" } ], - "documentation": "

Deletes the association between an Amazon Macie master account and an account.

" + "documentation": "

Deletes the association between an Amazon Macie administrator account and an account.

" }, "DescribeBuckets": { "name": "DescribeBuckets", @@ -838,6 +838,51 @@ ], "documentation": "

Disables an account as the delegated Amazon Macie administrator account for an AWS organization.

" }, + "DisassociateFromAdministratorAccount": { + "name": "DisassociateFromAdministratorAccount", + "http": { + "method": "POST", + "requestUri": "/administrator/disassociate", + "responseCode": 200 + }, + "input": { + "shape": "DisassociateFromAdministratorAccountRequest" + }, + "output": { + "shape": "DisassociateFromAdministratorAccountResponse" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Disassociates a member account from its Amazon Macie administrator account.

" + }, "DisassociateFromMasterAccount": { "name": "DisassociateFromMasterAccount", "http": { @@ -882,7 +927,7 @@ "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" } ], - "documentation": "

Disassociates a member account from its Amazon Macie master account.

" + "documentation": "

(Deprecated) Disassociates a member account from its Amazon Macie administrator account. This operation has been replaced by the DisassociateFromAdministratorAccount operation.

" }, "DisassociateMember": { "name": "DisassociateMember", @@ -928,7 +973,7 @@ "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" } ], - "documentation": "

Disassociates an Amazon Macie master account from a member account.

" + "documentation": "

Disassociates an Amazon Macie administrator account from a member account.

" }, "EnableMacie": { "name": "EnableMacie", @@ -1022,6 +1067,51 @@ ], "documentation": "

Designates an account as the delegated Amazon Macie administrator account for an AWS organization.

" }, + "GetAdministratorAccount": { + "name": "GetAdministratorAccount", + "http": { + "method": "GET", + "requestUri": "/administrator", + "responseCode": 200 + }, + "input": { + "shape": "GetAdministratorAccountRequest" + }, + "output": { + "shape": "GetAdministratorAccountResponse" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Retrieves information about the Amazon Macie administrator account for an account.

" + }, "GetBucketStatistics": { "name": "GetBucketStatistics", "http": { @@ -1298,6 +1388,52 @@ ], "documentation": "

Retrieves the criteria and other settings for a findings filter.

" }, + "GetFindingsPublicationConfiguration": { + "name": "GetFindingsPublicationConfiguration", + "http": { + "method": "GET", + "requestUri": "/findings-publication-configuration", + "responseCode": 200 + }, + "input": { + "shape": "GetFindingsPublicationConfigurationRequest" + }, + "output": { + "shape": "GetFindingsPublicationConfigurationResponse", + "documentation": "

The request succeeded.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Retrieves the configuration settings for publishing findings to AWS Security Hub.

" + }, "GetInvitationsCount": { "name": "GetInvitationsCount", "http": { @@ -1434,7 +1570,7 @@ "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" } ], - "documentation": "

Retrieves information about the Amazon Macie master account for an account.

" + "documentation": "

(Deprecated) Retrieves information about the Amazon Macie administrator account for an account. This operation has been replaced by the GetAdministratorAccount operation.

" }, "GetMember": { "name": "GetMember", @@ -1480,7 +1616,7 @@ "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" } ], - "documentation": "

Retrieves information about a member account that's associated with an Amazon Macie master account.

" + "documentation": "

Retrieves information about an account that's associated with an Amazon Macie administrator account.

" }, "GetUsageStatistics": { "name": "GetUsageStatistics", @@ -1848,7 +1984,7 @@ "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" } ], - "documentation": "

Retrieves information about the accounts that are associated with an Amazon Macie master account.

" + "documentation": "

Retrieves information about the accounts that are associated with an Amazon Macie administrator account.

" }, "ListOrganizationAdminAccounts": { "name": "ListOrganizationAdminAccounts", @@ -1959,6 +2095,52 @@ ], "documentation": "

Creates or updates the configuration settings for storing data classification results.

" }, + "PutFindingsPublicationConfiguration": { + "name": "PutFindingsPublicationConfiguration", + "http": { + "method": "PUT", + "requestUri": "/findings-publication-configuration", + "responseCode": 200 + }, + "input": { + "shape": "PutFindingsPublicationConfigurationRequest" + }, + "output": { + "shape": "PutFindingsPublicationConfigurationResponse", + "documentation": "

The request succeeded and there isn't any content to include in the body of the response (No Content).

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Updates the configuration settings for publishing findings to AWS Security Hub.

" + }, "TagResource": { "name": "TagResource", "http": { @@ -2221,7 +2403,7 @@ "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" } ], - "documentation": "

Enables an Amazon Macie master account to suspend or re-enable a member account.

" + "documentation": "

Enables an Amazon Macie administrator to suspend or re-enable a member account.

" }, "UpdateOrganizationConfiguration": { "name": "UpdateOrganizationConfiguration", @@ -2274,6 +2456,11 @@ "AcceptInvitationRequest": { "type": "structure", "members": { + "administratorAccountId": { + "shape": "__string", + "locationName": "administratorAccountId", + "documentation": "

The AWS account ID for the account that sent the invitation.

" + }, "invitationId": { "shape": "__string", "locationName": "invitationId", @@ -2282,11 +2469,10 @@ "masterAccount": { "shape": "__string", "locationName": "masterAccount", - "documentation": "

The AWS account ID for the account that sent the invitation.

" + "documentation": "

(Deprecated) The AWS account ID for the account that sent the invitation. This property has been replaced by the administratorAccountId property and is retained only for backward compatibility.

" } }, "required": [ - "masterAccount", "invitationId" ] }, @@ -2339,7 +2525,7 @@ "documentation": "

The email address for the account.

" } }, - "documentation": "

Specifies details for an account to associate with an Amazon Macie master account.

", + "documentation": "

Specifies details for an account to associate with an Amazon Macie administrator account.

", "required": [ "email", "accountId" @@ -2351,10 +2537,10 @@ "blockPublicAccess": { "shape": "BlockPublicAccess", "locationName": "blockPublicAccess", - "documentation": "

The block public access settings for the bucket.

" + "documentation": "

The block public access settings for the AWS account that owns the bucket.

" } }, - "documentation": "

Provides information about account-level permissions settings that apply to an S3 bucket.

" + "documentation": "

Provides information about the account-level permissions settings that apply to an S3 bucket.

" }, "AdminAccount": { "type": "structure", @@ -2367,14 +2553,14 @@ "status": { "shape": "AdminStatus", "locationName": "status", - "documentation": "

The current status of the account as a delegated administrator of Amazon Macie for the organization.

" + "documentation": "

The current status of the account as the delegated administrator of Amazon Macie for the organization.

" } }, "documentation": "

Provides information about the delegated Amazon Macie administrator account for an AWS organization.

" }, "AdminStatus": { "type": "string", - "documentation": "

The current status of an account as the delegated Amazon Macie administrator account for an AWS organization.

", + "documentation": "

The current status of an account as the delegated Amazon Macie administrator account for an AWS organization. Possible values are:

", "enum": [ "ENABLED", "DISABLING_IN_PROGRESS" @@ -2583,20 +2769,20 @@ "kmsManaged": { "shape": "__long", "locationName": "kmsManaged", - "documentation": "

The total number of buckets that use an AWS Key Management Service (AWS KMS) customer master key (CMK) by default to encrypt objects. These buckets use AWS managed AWS KMS (AWS-KMS) encryption or customer managed AWS KMS (SSE-KMS) encryption.

" + "documentation": "

The total number of buckets that use an AWS Key Management Service (AWS KMS) customer master key (CMK) to encrypt new objects by default. These buckets use AWS managed AWS KMS encryption (AWS-KMS) or customer managed AWS KMS encryption (SSE-KMS).

" }, "s3Managed": { "shape": "__long", "locationName": "s3Managed", - "documentation": "

The total number of buckets that use an Amazon S3 managed key by default to encrypt objects. These buckets use Amazon S3 managed (SSE-S3) encryption.

" + "documentation": "

The total number of buckets that use an Amazon S3 managed key to encrypt new objects by default. These buckets use Amazon S3 managed encryption (SSE-S3).

" }, "unencrypted": { "shape": "__long", "locationName": "unencrypted", - "documentation": "

The total number of buckets that don't encrypt objects by default. Default encryption is disabled for these buckets.

" + "documentation": "

The total number of buckets that don't encrypt new objects by default. Default encryption is disabled for these buckets.

" } }, - "documentation": "

Provides information about the number of S3 buckets that use certain types of server-side encryption or don't encrypt objects by default.

" + "documentation": "

Provides information about the number of S3 buckets that use certain types of server-side encryption by default or don't encrypt new objects by default.

" }, "BucketCountBySharedAccessType": { "type": "structure", @@ -2727,7 +2913,7 @@ "classifiableSizeInBytes": { "shape": "__long", "locationName": "classifiableSizeInBytes", - "documentation": "

The total storage size, in bytes, of the objects that Amazon Macie can analyze in the bucket. These objects use a supported storage class and have a file name extension for a supported file or storage format.

" + "documentation": "

The total storage size, in bytes, of the objects that Amazon Macie can analyze in the bucket. These objects use a supported storage class and have a file name extension for a supported file or storage format.

If versioning is enabled for the bucket, Macie calculates this value based on the size of the latest version of each applicable object in the bucket. This value doesn't reflect the storage size of all versions of each applicable object in the bucket.

" }, "jobDetails": { "shape": "JobDetails", @@ -2737,7 +2923,7 @@ "lastUpdated": { "shape": "__timestampIso8601", "locationName": "lastUpdated", - "documentation": "

The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently retrieved data about the bucket from Amazon S3.

" + "documentation": "

The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently retrieved both bucket and object metadata from Amazon S3 for the bucket.

" }, "objectCount": { "shape": "__long", @@ -2752,7 +2938,7 @@ "publicAccess": { "shape": "BucketPublicAccess", "locationName": "publicAccess", - "documentation": "

Specifies whether the bucket is publicly accessible. If this value is true, an access control list (ACL), bucket policy, or block public access settings allow the bucket to be accessed by the general public.

" + "documentation": "

Specifies whether the bucket is publicly accessible due to the combination of permissions settings that apply to the bucket, and provides information about those settings.

" }, "region": { "shape": "__string", @@ -2764,6 +2950,11 @@ "locationName": "replicationDetails", "documentation": "

Specifies whether the bucket is configured to replicate one or more objects to buckets for other AWS accounts and, if so, which accounts.

" }, + "serverSideEncryption": { + "shape": "BucketServerSideEncryption", + "locationName": "serverSideEncryption", + "documentation": "

Specifies whether the bucket encrypts new objects by default and, if so, the type of server-side encryption that's used.

" + }, "sharedAccess": { "shape": "SharedAccess", "locationName": "sharedAccess", @@ -2772,12 +2963,12 @@ "sizeInBytes": { "shape": "__long", "locationName": "sizeInBytes", - "documentation": "

The total storage size, in bytes, of the bucket.

" + "documentation": "

The total storage size, in bytes, of the bucket.

If versioning is enabled for the bucket, Amazon Macie calculates this value based on the size of the latest version of each object in the bucket. This value doesn't reflect the storage size of all versions of each object in the bucket.

" }, "sizeInBytesCompressed": { "shape": "__long", "locationName": "sizeInBytesCompressed", - "documentation": "

The total compressed storage size, in bytes, of the bucket.

" + "documentation": "

The total compressed storage size, in bytes, of the bucket.

If versioning is enabled for the bucket, Macie calculates this value based on the size of the latest version of each object in the bucket. This value doesn't reflect the storage size of all versions of each object in the bucket.

" }, "tags": { "shape": "__listOfKeyValuePair", @@ -2850,18 +3041,34 @@ }, "documentation": "

Provides information about the permissions settings that determine whether an S3 bucket is publicly accessible.

" }, + "BucketServerSideEncryption": { + "type": "structure", + "members": { + "kmsMasterKeyId": { + "shape": "__string", + "locationName": "kmsMasterKeyId", + "documentation": "

The Amazon Resource Name (ARN) or unique identifier (key ID) for the AWS Key Management Service (AWS KMS) customer master key (CMK) that's used by default to encrypt objects that are added to the bucket. This value is null if the bucket uses an Amazon S3 managed key to encrypt new objects or the bucket doesn't encrypt new objects by default.

" + }, + "type": { + "shape": "Type", + "locationName": "type", + "documentation": "

The type of server-side encryption that's used by default when storing new objects in the bucket. Possible values are:

" + } + }, + "documentation": "

Provides information about the default server-side encryption settings for an S3 bucket. For detailed information about these settings, see Setting default server-side encryption behavior for Amazon S3 buckets in the Amazon Simple Storage Service User Guide.

" + }, "BucketSortCriteria": { "type": "structure", "members": { "attributeName": { "shape": "__string", "locationName": "attributeName", - "documentation": "

The name of the property to sort the results by. This value can be the name of any property that Amazon Macie defines as bucket metadata, such as bucketName or accountId.

" + "documentation": "

The name of the bucket property to sort the results by. This value can be one of the following properties that Amazon Macie defines as bucket metadata: accountId, bucketName, classifiableObjectCount, classifiableSizeInBytes, objectCount, or sizeInBytes.

" }, "orderBy": { "shape": "OrderBy", "locationName": "orderBy", - "documentation": "

The sort order to apply to the results, based on the value for the property specified by the attributeName property. Valid values are: ASC, sort the results in ascending order; and, DESC, sort the results in descending order.

" + "documentation": "

The sort order to apply to the results, based on the value specified by the attributeName property. Valid values are: ASC, sort the results in ascending order; and, DESC, sort the results in descending order.

" } }, "documentation": "

Specifies criteria for sorting the results of a query for information about S3 buckets.

" @@ -2882,7 +3089,7 @@ "columnName": { "shape": "__string", "locationName": "columnName", - "documentation": "

The name of the column that contains the data, if available.

" + "documentation": "

The name of the column that contains the data, if available. This value is also null if Amazon Macie detects sensitive data in the name of any column in the file.

" }, "row": { "shape": "__long", @@ -3236,7 +3443,7 @@ "account": { "shape": "AccountDetail", "locationName": "account", - "documentation": "

The details for the account to associate with the master account.

" + "documentation": "

The details for the account to associate with the administrator account.

" }, "tags": { "shape": "TagMap", @@ -3254,7 +3461,7 @@ "arn": { "shape": "__string", "locationName": "arn", - "documentation": "

The Amazon Resource Name (ARN) of the account that was associated with the master account.

" + "documentation": "

The Amazon Resource Name (ARN) of the account that was associated with the administrator account.

" } } }, @@ -3325,7 +3532,7 @@ }, "Currency": { "type": "string", - "documentation": "

The type of currency that data for a usage metric is reported in. Possible values are:

", + "documentation": "

The type of currency that the data for an Amazon Macie usage metric is reported in. Possible values are:

", "enum": [ "USD" ] @@ -3739,7 +3946,7 @@ "shape": "__string", "location": "querystring", "locationName": "adminAccountId", - "documentation": "

The AWS account ID of the delegated administrator account.

" + "documentation": "

The AWS account ID of the delegated Amazon Macie administrator account.

" } }, "required": [ @@ -3750,6 +3957,14 @@ "type": "structure", "members": {} }, + "DisassociateFromAdministratorAccountRequest": { + "type": "structure", + "members": {} + }, + "DisassociateFromAdministratorAccountResponse": { + "type": "structure", + "members": {} + }, "DisassociateFromMasterAccountRequest": { "type": "structure", "members": {} @@ -3817,7 +4032,7 @@ "status": { "shape": "MacieStatus", "locationName": "status", - "documentation": "

Specifies the status for the account. To enable Amazon Macie and start all Amazon Macie activities for the account, set this value to ENABLED.

" + "documentation": "

Specifies the new status for the account. To enable Amazon Macie and start all Macie activities for the account, set this value to ENABLED.

" } } }, @@ -3860,7 +4075,7 @@ }, "ErrorCode": { "type": "string", - "documentation": "

The source of an error, issue, or delay. Possible values are:

", + "documentation": "

The source of an issue or delay. Possible values are:

", "enum": [ "ClientError", "InternalError" @@ -4058,7 +4273,7 @@ }, "FindingPublishingFrequency": { "type": "string", - "documentation": "

The frequency with which Amazon Macie publishes updates to policy findings for an account. This includes publishing updates to AWS Security Hub and Amazon EventBridge (formerly called Amazon CloudWatch Events). Valid values are:

", + "documentation": "

The frequency with which Amazon Macie publishes updates to policy findings for an account. This includes publishing updates to AWS Security Hub and Amazon EventBridge (formerly called Amazon CloudWatch Events). For more information, see Monitoring and processing findings in the Amazon Macie User Guide. Valid values are:

", "enum": [ "FIFTEEN_MINUTES", "ONE_HOUR", @@ -4144,6 +4359,20 @@ }, "documentation": "

Provides information about a findings filter.

" }, + "GetAdministratorAccountRequest": { + "type": "structure", + "members": {} + }, + "GetAdministratorAccountResponse": { + "type": "structure", + "members": { + "administrator": { + "shape": "Invitation", + "locationName": "administrator", + "documentation": "

The AWS account ID for the administrator account. If the accounts are associated by a Macie membership invitation, this object also provides details about the invitation that was sent to establish the relationship between the accounts.

" + } + } + }, "GetBucketStatisticsRequest": { "type": "structure", "members": { @@ -4170,7 +4399,7 @@ "bucketCountByEncryptionType": { "shape": "BucketCountByEncryptionType", "locationName": "bucketCountByEncryptionType", - "documentation": "

The total number of buckets, grouped by server-side encryption type. This object also reports the total number of buckets that don't encrypt objects by default.

" + "documentation": "

The total number of buckets, grouped by default server-side encryption type. This object also reports the total number of buckets that don't encrypt new objects by default.

" }, "bucketCountBySharedAccessType": { "shape": "BucketCountBySharedAccessType", @@ -4185,12 +4414,12 @@ "classifiableSizeInBytes": { "shape": "__long", "locationName": "classifiableSizeInBytes", - "documentation": "

The total storage size, in bytes, of all the objects that Amazon Macie can analyze in the buckets. These objects use a supported storage class and have a file name extension for a supported file or storage format.

" + "documentation": "

The total storage size, in bytes, of all the objects that Amazon Macie can analyze in the buckets. These objects use a supported storage class and have a file name extension for a supported file or storage format.

If versioning is enabled for any of the buckets, Macie calculates this value based on the size of the latest version of each applicable object in those buckets. This value doesn't reflect the storage size of all versions of all applicable objects in the buckets.

" }, "lastUpdated": { "shape": "__timestampIso8601", "locationName": "lastUpdated", - "documentation": "

The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently retrieved data about the buckets from Amazon S3.

" + "documentation": "

The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently retrieved both bucket and object metadata from Amazon S3 for the buckets.

" }, "objectCount": { "shape": "__long", @@ -4200,12 +4429,12 @@ "sizeInBytes": { "shape": "__long", "locationName": "sizeInBytes", - "documentation": "

The total storage size, in bytes, of the buckets.

" + "documentation": "

The total storage size, in bytes, of the buckets.

If versioning is enabled for any of the buckets, Macie calculates this value based on the size of the latest version of each object in those buckets. This value doesn't reflect the storage size of all versions of the objects in the buckets.

" }, "sizeInBytesCompressed": { "shape": "__long", "locationName": "sizeInBytesCompressed", - "documentation": "

The total compressed storage size, in bytes, of the buckets.

" + "documentation": "

The total compressed storage size, in bytes, of the buckets.

If versioning is enabled for any of the buckets, Macie calculates this value based on the size of the latest version of each object in those buckets. This value doesn't reflect the storage size of all versions of the objects in the buckets.

" }, "unclassifiableObjectCount": { "shape": "ObjectLevelStatistics", @@ -4215,7 +4444,7 @@ "unclassifiableObjectSizeInBytes": { "shape": "ObjectLevelStatistics", "locationName": "unclassifiableObjectSizeInBytes", - "documentation": "

The total storage size, in bytes, of all the objects that Amazon Macie can't analyze in the buckets. These objects don't use a supported storage class or don't have a file name extension for a supported file or storage format.

" + "documentation": "

The total storage size, in bytes, of the objects that Amazon Macie can't analyze in the buckets. These objects don't use a supported storage class or don't have a file name extension for a supported file or storage format.

" } } }, @@ -4404,6 +4633,20 @@ } } }, + "GetFindingsPublicationConfigurationRequest": { + "type": "structure", + "members": {} + }, + "GetFindingsPublicationConfigurationResponse": { + "type": "structure", + "members": { + "securityHubConfiguration": { + "shape": "SecurityHubConfiguration", + "locationName": "securityHubConfiguration", + "documentation": "

The configuration settings that determine which findings are published to AWS Security Hub.

" + } + } + }, "GetFindingsRequest": { "type": "structure", "members": { @@ -4461,22 +4704,22 @@ "findingPublishingFrequency": { "shape": "FindingPublishingFrequency", "locationName": "findingPublishingFrequency", - "documentation": "

The frequency with which Amazon Macie publishes updates to policy findings for the account. This includes publishing updates to AWS Security Hub and Amazon EventBridge (formerly called Amazon CloudWatch Events).

" + "documentation": "

The frequency with which Macie publishes updates to policy findings for the account. This includes publishing updates to AWS Security Hub and Amazon EventBridge (formerly called Amazon CloudWatch Events).

" }, "serviceRole": { "shape": "__string", "locationName": "serviceRole", - "documentation": "

The Amazon Resource Name (ARN) of the service-linked role that allows Amazon Macie to monitor and analyze data in AWS resources for the account.

" + "documentation": "

The Amazon Resource Name (ARN) of the service-linked role that allows Macie to monitor and analyze data in AWS resources for the account.

" }, "status": { "shape": "MacieStatus", "locationName": "status", - "documentation": "

The current status of the Amazon Macie account. Possible values are: PAUSED, the account is enabled but all Amazon Macie activities are suspended (paused) for the account; and, ENABLED, the account is enabled and all Amazon Macie activities are enabled for the account.

" + "documentation": "

The current status of the Macie account. Possible values are: PAUSED, the account is enabled but all Macie activities are suspended (paused) for the account; and, ENABLED, the account is enabled and all Macie activities are enabled for the account.

" }, "updatedAt": { "shape": "__timestampIso8601", "locationName": "updatedAt", - "documentation": "

The date and time, in UTC and extended ISO 8601 format, of the most recent change to the status of the Amazon Macie account.

" + "documentation": "

The date and time, in UTC and extended ISO 8601 format, of the most recent change to the status of the Macie account.

" } } }, @@ -4490,7 +4733,7 @@ "master": { "shape": "Invitation", "locationName": "master", - "documentation": "

The AWS account ID for the master account. If the accounts are associated by a Macie membership invitation, this object also provides details about the invitation that was sent and accepted to establish the relationship between the accounts.

" + "documentation": "

(Deprecated) The AWS account ID for the administrator account. If the accounts are associated by a Macie membership invitation, this object also provides details about the invitation that was sent to establish the relationship between the accounts.

" } } }, @@ -4516,6 +4759,11 @@ "locationName": "accountId", "documentation": "

The AWS account ID for the account.

" }, + "administratorAccountId": { + "shape": "__string", + "locationName": "administratorAccountId", + "documentation": "

The AWS account ID for the administrator account.

" + }, "arn": { "shape": "__string", "locationName": "arn", @@ -4534,12 +4782,12 @@ "masterAccountId": { "shape": "__string", "locationName": "masterAccountId", - "documentation": "

The AWS account ID for the master account.

" + "documentation": "

(Deprecated) The AWS account ID for the administrator account. This property has been replaced by the administratorAccountId property and is retained only for backward compatibility.

" }, "relationshipStatus": { "shape": "RelationshipStatus", "locationName": "relationshipStatus", - "documentation": "

The current status of the relationship between the account and the master account.

" + "documentation": "

The current status of the relationship between the account and the administrator account.

" }, "tags": { "shape": "TagMap", @@ -4549,7 +4797,7 @@ "updatedAt": { "shape": "__timestampIso8601", "locationName": "updatedAt", - "documentation": "

The date and time, in UTC and extended ISO 8601 format, of the most recent change to the status of the relationship between the account and the master account.

" + "documentation": "

The date and time, in UTC and extended ISO 8601 format, of the most recent change to the status of the relationship between the account and the administrator account.

" } } }, @@ -4559,7 +4807,7 @@ "filterBy": { "shape": "__listOfUsageStatisticsFilter", "locationName": "filterBy", - "documentation": "

An array of objects, one for each condition to use to filter the query results. If the array contains more than one object, Amazon Macie uses an AND operator to join the conditions specified by the objects.

" + "documentation": "

An array of objects, one for each condition to use to filter the query results. If you specify more than one condition, Amazon Macie uses an AND operator to join the conditions.

" }, "maxResults": { "shape": "__integer", @@ -4575,6 +4823,11 @@ "shape": "UsageStatisticsSortBy", "locationName": "sortBy", "documentation": "

The criteria to use to sort the query results.

" + }, + "timeRange": { + "shape": "TimeRange", + "locationName": "timeRange", + "documentation": "

The inclusive time period to query usage data for. Valid values are: MONTH_TO_DATE, for the current calendar month to date; and, PAST_30_DAYS, for the preceding 30 days. If you don't specify a value, Amazon Macie provides usage data for the preceding 30 days.

" } } }, @@ -4590,16 +4843,33 @@ "shape": "__listOfUsageRecord", "locationName": "records", "documentation": "

An array of objects that contains the results of the query. Each object contains the data for an account that meets the filter criteria specified in the request.

" + }, + "timeRange": { + "shape": "TimeRange", + "locationName": "timeRange", + "documentation": "

The inclusive time period that the usage data applies to. Possible values are: MONTH_TO_DATE, for the current calendar month to date; and, PAST_30_DAYS, for the preceding 30 days.

" } } }, "GetUsageTotalsRequest": { "type": "structure", - "members": {} + "members": { + "timeRange": { + "shape": "__string", + "location": "querystring", + "locationName": "timeRange", + "documentation": "

The inclusive time period to retrieve the data for. Valid values are: MONTH_TO_DATE, for the current calendar month to date; and, PAST_30_DAYS, for the preceding 30 days. If you don't specify a value for this parameter, Amazon Macie provides aggregated usage data for the preceding 30 days.

" + } + } }, "GetUsageTotalsResponse": { "type": "structure", "members": { + "timeRange": { + "shape": "TimeRange", + "locationName": "timeRange", + "documentation": "

The inclusive time period that the usage data applies to. Possible values are: MONTH_TO_DATE, for the current calendar month to date; and, PAST_30_DAYS, for the preceding 30 days.

" + }, "usageTotals": { "shape": "__listOfUsageTotal", "locationName": "usageTotals", @@ -5277,7 +5547,7 @@ "shape": "__string", "location": "querystring", "locationName": "onlyAssociated", - "documentation": "

Specifies which accounts to include in the response, based on the status of an account's relationship with the master account. By default, the response includes only current member accounts. To include all accounts, set the value for this parameter to false.

" + "documentation": "

Specifies which accounts to include in the response, based on the status of an account's relationship with the administrator account. By default, the response includes only current member accounts. To include all accounts, set this value to false.

" } } }, @@ -5287,7 +5557,7 @@ "members": { "shape": "__listOfMember", "locationName": "members", - "documentation": "

An array of objects, one for each account that's associated with the master account and meets the criteria specified by the onlyAssociated request parameter.

" + "documentation": "

An array of objects, one for each account that's associated with the administrator account and meets the criteria specified by the onlyAssociated request parameter.

" }, "nextToken": { "shape": "__string", @@ -5373,6 +5643,11 @@ "locationName": "accountId", "documentation": "

The AWS account ID for the account.

" }, + "administratorAccountId": { + "shape": "__string", + "locationName": "administratorAccountId", + "documentation": "

The AWS account ID for the administrator account.

" + }, "arn": { "shape": "__string", "locationName": "arn", @@ -5391,12 +5666,12 @@ "masterAccountId": { "shape": "__string", "locationName": "masterAccountId", - "documentation": "

The AWS account ID for the master account.

" + "documentation": "

(Deprecated) The AWS account ID for the administrator account. This property has been replaced by the administratorAccountId property and is retained only for backward compatibility.

" }, "relationshipStatus": { "shape": "RelationshipStatus", "locationName": "relationshipStatus", - "documentation": "

The current status of the relationship between the account and the master account.

" + "documentation": "

The current status of the relationship between the account and the administrator account.

" }, "tags": { "shape": "TagMap", @@ -5406,10 +5681,10 @@ "updatedAt": { "shape": "__timestampIso8601", "locationName": "updatedAt", - "documentation": "

The date and time, in UTC and extended ISO 8601 format, of the most recent change to the status of the relationship between the account and the master account.

" + "documentation": "

The date and time, in UTC and extended ISO 8601 format, of the most recent change to the status of the relationship between the account and the administrator account.

" } }, - "documentation": "

Provides information about an account that's associated with an Amazon Macie master account.

" + "documentation": "

Provides information about an account that's associated with an Amazon Macie administrator account.

" }, "MonthlySchedule": { "type": "structure", @@ -5417,7 +5692,7 @@ "dayOfMonth": { "shape": "__integer", "locationName": "dayOfMonth", - "documentation": "

The numeric day of the month when Amazon Macie runs the job. This value can be an integer from 1 through 31.

If this value exceeds the number of days in a certain month, Macie runs the job on the last day of that month. For example, if this value is 31 and a month has only 30 days, Macie runs the job on day 30 of that month.

" + "documentation": "

The numeric day of the month when Amazon Macie runs the job. This value can be an integer from 1 through 31.

If this value exceeds the number of days in a certain month, Macie doesn't run the job that month. Macie runs the job only during months that have the specified day. For example, if this value is 31 and a month has only 30 days, Macie doesn't run the job that month. To run the job every month, specify a value that's less than 29.

" } }, "documentation": "

Specifies a monthly recurrence pattern for running a classification job.

" @@ -5428,17 +5703,17 @@ "customerManaged": { "shape": "__long", "locationName": "customerManaged", - "documentation": "

The total number of objects that are encrypted using a customer-managed key. The objects use customer-provided server-side (SSE-C) encryption.

" + "documentation": "

The total number of objects that are encrypted using a customer-managed key. The objects use customer-provided server-side encryption (SSE-C).

" }, "kmsManaged": { "shape": "__long", "locationName": "kmsManaged", - "documentation": "

The total number of objects that are encrypted using an AWS Key Management Service (AWS KMS) customer master key (CMK). The objects use AWS managed AWS KMS (AWS-KMS) encryption or customer managed AWS KMS (SSE-KMS) encryption.

" + "documentation": "

The total number of objects that are encrypted using an AWS Key Management Service (AWS KMS) customer master key (CMK). The objects use AWS managed AWS KMS encryption (AWS-KMS) or customer managed AWS KMS encryption (SSE-KMS).

" }, "s3Managed": { "shape": "__long", "locationName": "s3Managed", - "documentation": "

The total number of objects that are encrypted using an Amazon S3 managed key. The objects use Amazon S3 managed (SSE-S3) encryption.

" + "documentation": "

The total number of objects that are encrypted using an Amazon S3 managed key. The objects use Amazon S3 managed encryption (SSE-S3).

" }, "unencrypted": { "shape": "__long", @@ -5467,7 +5742,7 @@ "documentation": "

The total storage size (in bytes) or number of objects that Amazon Macie can't analyze because the objects use an unsupported storage class or don't have a file name extension for a supported file or storage format.

" } }, - "documentation": "

Provides information about the total storage size (in bytes) or number of objects that Amazon Macie can't analyze in one or more S3 buckets. In a BucketMetadata object, this data is for a specific bucket. In a GetBucketStatisticsResponse object, this data is aggregated for all the buckets in the query results.

" + "documentation": "

Provides information about the total storage size (in bytes) or number of objects that Amazon Macie can't analyze in one or more S3 buckets. In a BucketMetadata object, this data is for a specific bucket. In a GetBucketStatisticsResponse object, this data is aggregated for all the buckets in the query results. If versioning is enabled for a bucket, total storage size values are based on the size of the latest version of each applicable object in the bucket.

" }, "Occurrences": { "type": "structure", @@ -5480,7 +5755,7 @@ "lineRanges": { "shape": "Ranges", "locationName": "lineRanges", - "documentation": "

An array of objects, one for each occurrence of sensitive data in a Microsoft Word document or non-binary text file, such as an HTML, JSON, TXT, or XML file. Each object specifies the line that contains the data, and the position of the data on that line.

This value is often null for file types that are supported by Cell, Page, or Record objects. Exceptions are the locations of: data in unstructured sections of an otherwise structured file, such as a comment in a file; and, data in a malformed file that Amazon Macie analyzes as plain text.

" + "documentation": "

An array of objects, one for each occurrence of sensitive data in a Microsoft Word document or non-binary text file, such as an HTML, JSON, TXT, or XML file. Each object specifies the line that contains the data, and the position of the data on that line.

This value is often null for file types that are supported by Cell, Page, or Record objects. Exceptions are the locations of data in: unstructured sections of an otherwise structured file, such as a comment in a file; a malformed file that Amazon Macie analyzes as plain text; and, a CSV or TSV file that has any column names that contain sensitive data.

" }, "offsetRanges": { "shape": "Ranges", @@ -5574,6 +5849,26 @@ } } }, + "PutFindingsPublicationConfigurationRequest": { + "type": "structure", + "members": { + "clientToken": { + "shape": "__string", + "locationName": "clientToken", + "documentation": "

A unique, case-sensitive token that you provide to ensure the idempotency of the request.

", + "idempotencyToken": true + }, + "securityHubConfiguration": { + "shape": "SecurityHubConfiguration", + "locationName": "securityHubConfiguration", + "documentation": "

The configuration settings that determine which findings to publish to AWS Security Hub.

" + } + } + }, + "PutFindingsPublicationConfigurationResponse": { + "type": "structure", + "members": {} + }, "Range": { "type": "structure", "members": { @@ -5608,7 +5903,7 @@ "jsonPath": { "shape": "__string", "locationName": "jsonPath", - "documentation": "

The path, as a JSONPath expression, to the field in the record that contains the data.

If the name of an element exceeds 20 characters, Amazon Macie truncates the name by removing characters from the beginning of the name. If the resulting full path exceeds 250 characters, Macie also truncates the path, starting with the first element in the path, until the path contains 250 or fewer characters.

" + "documentation": "

The path, as a JSONPath expression, to the field in the record that contains the data. If Amazon Macie detects sensitive data in the name of any element in the path, Macie omits this field.

If the name of an element exceeds 20 characters, Macie truncates the name by removing characters from the beginning of the name. If the resulting full path exceeds 250 characters, Macie also truncates the path, starting with the first element in the path, until the path contains 250 or fewer characters.

" }, "recordIndex": { "shape": "__long", @@ -5627,7 +5922,7 @@ }, "RelationshipStatus": { "type": "string", - "documentation": "

The current status of the relationship between an account and an associated Amazon Macie master account (inviter account). Possible values are:

", + "documentation": "

The current status of the relationship between an account and an associated Amazon Macie administrator account (inviter account). Possible values are:

", "enum": [ "Enabled", "Paused", @@ -5852,7 +6147,7 @@ "serverSideEncryption": { "shape": "ServerSideEncryption", "locationName": "serverSideEncryption", - "documentation": "

The type of server-side encryption that's used for the object.

" + "documentation": "

The type of server-side encryption that's used to encrypt the object.

" }, "size": { "shape": "__long", @@ -5905,6 +6200,26 @@ }, "documentation": "

Specifies one or more property- and tag-based conditions that refine the scope of a classification job. These conditions define criteria that determine which objects a job analyzes. Exclude conditions take precedence over include conditions.

" }, + "SecurityHubConfiguration": { + "type": "structure", + "members": { + "publishClassificationFindings": { + "shape": "__boolean", + "locationName": "publishClassificationFindings", + "documentation": "

Specifies whether to publish sensitive data findings to AWS Security Hub. If you set this value to true, Amazon Macie automatically publishes all sensitive data findings that weren't suppressed by a findings filter. The default value is false.

" + }, + "publishPolicyFindings": { + "shape": "__boolean", + "locationName": "publishPolicyFindings", + "documentation": "

Specifies whether to publish policy findings to AWS Security Hub. If you set this value to true, Amazon Macie automatically publishes all new and updated policy findings that weren't suppressed by a findings filter. The default value is true.

" + } + }, + "documentation": "

Specifies configuration settings that determine which findings are published to AWS Security Hub automatically. For information about how Macie publishes findings to Security Hub, see Amazon Macie integration with Security Hub in the Amazon Macie User Guide.

", + "required": [ + "publishPolicyFindings", + "publishClassificationFindings" + ] + }, "SensitiveData": { "type": "list", "documentation": "

Provides information about the category and number of occurrences of sensitive data that produced a finding.

", @@ -5949,12 +6264,12 @@ "encryptionType": { "shape": "EncryptionType", "locationName": "encryptionType", - "documentation": "

The server-side encryption algorithm that's used when storing data in the bucket or object. If encryption is disabled for the bucket or object, this value is NONE.

" + "documentation": "

The server-side encryption algorithm that's used when storing data in the bucket or object. If default encryption is disabled for the bucket or the object isn't encrypted using server-side encryption, this value is NONE.

" }, "kmsMasterKeyId": { "shape": "__string", "locationName": "kmsMasterKeyId", - "documentation": "

The unique identifier for the AWS Key Management Service (AWS KMS) master key that's used to encrypt the bucket or object. This value is null if AWS KMS isn't used to encrypt the bucket or object.

" + "documentation": "

The Amazon Resource Name (ARN) or unique identifier (key ID) for the AWS Key Management Service (AWS KMS) customer master key (CMK) that's used to encrypt data in the bucket or the object. If an AWS KMS CMK isn't used, this value is null.

" } }, "documentation": "

Provides information about the server-side encryption settings for an S3 bucket or S3 object.

" @@ -5978,7 +6293,7 @@ "documentation": "

The value for the metric specified by the UsageByAccount.type field in the response.

" } }, - "documentation": "

Specifies a current quota for an account.

" + "documentation": "

Specifies a current quota for an Amazon Macie account.

" }, "ServiceQuotaExceededException": { "type": "structure", @@ -6300,6 +6615,22 @@ "httpStatusCode": 429 } }, + "TimeRange": { + "type": "string", + "documentation": "

An inclusive time period that Amazon Macie usage data applies to. Possible values are:

", + "enum": [ + "MONTH_TO_DATE", + "PAST_30_DAYS" + ] + }, + "Type": { + "type": "string", + "enum": [ + "NONE", + "AES256", + "aws:kms" + ] + }, "Unit": { "type": "string", "enum": [ @@ -6364,7 +6695,7 @@ "jobStatus": { "shape": "JobStatus", "locationName": "jobStatus", - "documentation": "

The new status for the job. Valid values are:

" + "documentation": "

The new status for the job. Valid values are:

" } }, "required": [ @@ -6441,7 +6772,7 @@ "status": { "shape": "MacieStatus", "locationName": "status", - "documentation": "

Specifies whether to change the status of the account. Valid values are: ENABLED, resume all Amazon Macie activities for the account; and, PAUSED, suspend all Macie activities for the account.

" + "documentation": "

Specifies a new status for the account. Valid values are: ENABLED, resume all Amazon Macie activities for the account; and, PAUSED, suspend all Macie activities for the account.

" } } }, @@ -6479,7 +6810,7 @@ "autoEnable": { "shape": "__boolean", "locationName": "autoEnable", - "documentation": "

Specifies whether Amazon Macie is enabled automatically for each account, when the account is added to the AWS organization.

" + "documentation": "

Specifies whether to enable Amazon Macie automatically for each account, when the account is added to the AWS organization.

" } }, "required": [ @@ -6511,10 +6842,10 @@ "type": { "shape": "UsageType", "locationName": "type", - "documentation": "

The name of the metric. Possible values are: DATA_INVENTORY_EVALUATION, for monitoring S3 buckets; and, SENSITIVE_DATA_DISCOVERY, for analyzing sensitive data.

" + "documentation": "

The name of the metric. Possible values are: DATA_INVENTORY_EVALUATION, for monitoring S3 buckets; and, SENSITIVE_DATA_DISCOVERY, for analyzing S3 objects to detect sensitive data.

" } }, - "documentation": "

Provides data for a specific usage metric and the corresponding quota for an account. The value for the metric is an aggregated value that reports usage during the past 30 days.

" + "documentation": "

Provides data for a specific usage metric and the corresponding quota for an Amazon Macie account.

" }, "UsageRecord": { "type": "structure", @@ -6535,7 +6866,7 @@ "documentation": "

An array of objects that contains usage data and quotas for the account. Each object contains the data for a specific usage metric and the corresponding quota.

" } }, - "documentation": "

Provides quota and aggregated usage data for an account.

" + "documentation": "

Provides quota and aggregated usage data for an Amazon Macie account.

" }, "UsageStatisticsFilter": { "type": "structure", @@ -6553,14 +6884,14 @@ "values": { "shape": "__listOf__string", "locationName": "values", - "documentation": "

An array that lists values to use in the condition, based on the value for the field specified by the key property. If the value for the key property is accountId, this array can specify multiple values. Otherwise, this array can specify only one value.

Valid values for each supported field are:

" + "documentation": "

An array that lists values to use in the condition, based on the value for the field specified by the key property. If the value for the key property is accountId, this array can specify multiple values. Otherwise, this array can specify only one value.

Valid values for each supported field are:

" } }, - "documentation": "

Specifies a condition for filtering the results of a query for account quotas and usage data.

" + "documentation": "

Specifies a condition for filtering the results of a query for quota and usage data for one or more Amazon Macie accounts.

" }, "UsageStatisticsFilterComparator": { "type": "string", - "documentation": "

The operator to use in a condition that filters the results of a query for account quotas and usage data. Valid values are:

", + "documentation": "

The operator to use in a condition that filters the results of a query for Amazon Macie account quotas and usage data. Valid values are:

", "enum": [ "GT", "GTE", @@ -6573,7 +6904,7 @@ }, "UsageStatisticsFilterKey": { "type": "string", - "documentation": "

The field to use in a condition that filters the results of a query for account quotas and usage data. Valid values are:

", + "documentation": "

The field to use in a condition that filters the results of a query for Amazon Macie account quotas and usage data. Valid values are:

", "enum": [ "accountId", "serviceLimit", @@ -6595,11 +6926,11 @@ "documentation": "

The sort order to apply to the results, based on the value for the field specified by the key property. Valid values are: ASC, sort the results in ascending order; and, DESC, sort the results in descending order.

" } }, - "documentation": "

Specifies criteria for sorting the results of a query for account quotas and usage data.

" + "documentation": "

Specifies criteria for sorting the results of a query for Amazon Macie account quotas and usage data.

" }, "UsageStatisticsSortKey": { "type": "string", - "documentation": "

The field to use to sort the results of a query for account quotas and usage data. Valid values are:

", + "documentation": "

The field to use to sort the results of a query for Amazon Macie account quotas and usage data. Valid values are:

", "enum": [ "accountId", "total", @@ -6623,14 +6954,14 @@ "type": { "shape": "UsageType", "locationName": "type", - "documentation": "

The name of the metric. Possible values are: DATA_INVENTORY_EVALUATION, for monitoring S3 buckets; and, SENSITIVE_DATA_DISCOVERY, for analyzing sensitive data.

" + "documentation": "

The name of the metric. Possible values are: DATA_INVENTORY_EVALUATION, for monitoring S3 buckets; and, SENSITIVE_DATA_DISCOVERY, for analyzing S3 objects to detect sensitive data.

" } }, - "documentation": "

Provides aggregated data for a usage metric. The value for the metric reports usage data for an account during the past 30 days.

" + "documentation": "

Provides aggregated data for an Amazon Macie usage metric. The value for the metric reports estimated usage data for an account for the preceding 30 days or the current calendar month to date, depending on the time period (timeRange) specified in the request.

" }, "UsageType": { "type": "string", - "documentation": "

The name of a usage metric for an account. Possible values are:

", + "documentation": "

The name of an Amazon Macie usage metric for an account. Possible values are:

", "enum": [ "DATA_INVENTORY_EVALUATION", "SENSITIVE_DATA_DISCOVERY" diff --git a/botocore/data/managedblockchain/2018-09-24/service-2.json b/botocore/data/managedblockchain/2018-09-24/service-2.json index 010bfbe5..e107e79e 100644 --- a/botocore/data/managedblockchain/2018-09-24/service-2.json +++ b/botocore/data/managedblockchain/2018-09-24/service-2.json @@ -72,7 +72,7 @@ {"shape":"InternalServiceErrorException"}, {"shape":"TooManyTagsException"} ], - "documentation":"

Creates a node on the specified blockchain network.

Applies to Hyperledger Fabric and Ethereum.

Ethereum on Managed Blockchain is in preview release and is subject to change.

" + "documentation":"

Creates a node on the specified blockchain network.

Applies to Hyperledger Fabric and Ethereum.

" }, "CreateProposal":{ "name":"CreateProposal", @@ -541,7 +541,7 @@ }, "Tags":{ "shape":"InputTagMap", - "documentation":"

Tags to assign to the network. Each tag consists of a key and optional value.

When specifying tags during creation, you can specify multiple key-value pairs in a single request, with an overall maximum of 50 added to each resource.

For more information about tags, see Tagging Resources in the Amazon Managed Blockchain Ethereum Developer Guide, or Tagging Resources in the Amazon Managed Blockchain Hyperledger Fabric Developer Guide.

" + "documentation":"

Tags to assign to the network. Each tag consists of a key and optional value.

When specifying tags during creation, you can specify multiple key-value pairs in a single request, with an overall maximum of 50 tags added to each resource.

For more information about tags, see Tagging Resources in the Amazon Managed Blockchain Ethereum Developer Guide, or Tagging Resources in the Amazon Managed Blockchain Hyperledger Fabric Developer Guide.

" } } }, @@ -587,7 +587,7 @@ }, "Tags":{ "shape":"InputTagMap", - "documentation":"

Tags to assign to the node. Each tag consists of a key and optional value.

When specifying tags during creation, you can specify multiple key-value pairs in a single request, with an overall maximum of 50 added to each resource.

For more information about tags, see Tagging Resources in the Amazon Managed Blockchain Ethereum Developer Guide, or Tagging Resources in the Amazon Managed Blockchain Hyperledger Fabric Developer Guide.

" + "documentation":"

Tags to assign to the node. Each tag consists of a key and optional value.

When specifying tags during creation, you can specify multiple key-value pairs in a single request, with an overall maximum of 50 tags added to each resource.

For more information about tags, see Tagging Resources in the Amazon Managed Blockchain Ethereum Developer Guide, or Tagging Resources in the Amazon Managed Blockchain Hyperledger Fabric Developer Guide.

" } } }, @@ -634,7 +634,7 @@ }, "Tags":{ "shape":"InputTagMap", - "documentation":"

Tags to assign to the proposal. Each tag consists of a key and optional value.

When specifying tags during creation, you can specify multiple key-value pairs in a single request, with an overall maximum of 50 added to each resource. If the proposal is for a network invitation, the invitation inherits the tags added to the proposal.

For more information about tags, see Tagging Resources in the Amazon Managed Blockchain Ethereum Developer Guide, or Tagging Resources in the Amazon Managed Blockchain Hyperledger Fabric Developer Guide.

" + "documentation":"

Tags to assign to the proposal. Each tag consists of a key and optional value.

When specifying tags during creation, you can specify multiple key-value pairs in a single request, with an overall maximum of 50 tags added to each resource. If the proposal is for a network invitation, the invitation inherits the tags added to the proposal.

For more information about tags, see Tagging Resources in the Amazon Managed Blockchain Ethereum Developer Guide, or Tagging Resources in the Amazon Managed Blockchain Hyperledger Fabric Developer Guide.

" } } }, @@ -1321,7 +1321,7 @@ }, "Tags":{ "shape":"InputTagMap", - "documentation":"

Tags assigned to the member. Tags consist of a key and optional value. For more information about tags, see Tagging Resources in the Amazon Managed Blockchain Hyperledger Fabric Developer Guide.

When specifying tags during creation, you can specify multiple key-value pairs in a single request, with an overall maximum of 50 added to each resource.

" + "documentation":"

Tags assigned to the member. Tags consist of a key and optional value. For more information about tags, see Tagging Resources in the Amazon Managed Blockchain Hyperledger Fabric Developer Guide.

When specifying tags during creation, you can specify multiple key-value pairs in a single request, with an overall maximum of 50 tags added to each resource.

" } }, "documentation":"

Configuration properties of the member.

Applies only to Hyperledger Fabric.

" @@ -1521,7 +1521,7 @@ "documentation":"

The Ethereum CHAIN_ID associated with the Ethereum network. Chain IDs are as follows:

" } }, - "documentation":"

Attributes of Ethereum for a network. Ethereum on Managed Blockchain is in preview release and is subject to change.

" + "documentation":"

Attributes of Ethereum for a network.

" }, "NetworkFabricAttributes":{ "type":"structure", @@ -1557,7 +1557,7 @@ }, "Ethereum":{ "shape":"NetworkEthereumAttributes", - "documentation":"

Attributes of an Ethereum network for Managed Blockchain resources participating in an Ethereum network. Ethereum on Managed Blockchain is in preview release and is subject to change.

" + "documentation":"

Attributes of an Ethereum network for Managed Blockchain resources participating in an Ethereum network.

" } }, "documentation":"

Attributes relevant to the network for the blockchain framework that the network uses.

" @@ -1657,7 +1657,7 @@ }, "AvailabilityZone":{ "shape":"AvailabilityZoneString", - "documentation":"

The Availability Zone in which the node exists. Required for Ethereum nodes. Ethereum on Managed Blockchain is in preview release and is subject to change.

" + "documentation":"

The Availability Zone in which the node exists. Required for Ethereum nodes.

" }, "FrameworkAttributes":{ "shape":"NodeFrameworkAttributes", @@ -1700,7 +1700,7 @@ }, "AvailabilityZone":{ "shape":"AvailabilityZoneString", - "documentation":"

The Availability Zone in which the node exists. Required for Ethereum nodes. Ethereum on Managed Blockchain is in preview release and is subject to change.

" + "documentation":"

The Availability Zone in which the node exists. Required for Ethereum nodes.

" }, "LogPublishingConfiguration":{ "shape":"NodeLogPublishingConfiguration", @@ -1725,7 +1725,7 @@ "documentation":"

The endpoint on which the Ethereum node listens to run Ethereum JSON-RPC methods over WebSockets connections from a client. Use this endpoint in client code for smart contracts when using a WebSockets connection. Connections to this endpoint are authenticated using Signature Version 4.

" } }, - "documentation":"

Attributes of an Ethereum node. Ethereum on Managed Blockchain is in preview release and is subject to change.

" + "documentation":"

Attributes of an Ethereum node.

" }, "NodeFabricAttributes":{ "type":"structure", @@ -1764,7 +1764,7 @@ }, "Ethereum":{ "shape":"NodeEthereumAttributes", - "documentation":"

Attributes of Ethereum for a node on a Managed Blockchain network that uses Ethereum. Ethereum on Managed Blockchain is in preview release and is subject to change.

" + "documentation":"

Attributes of Ethereum for a node on a Managed Blockchain network that uses Ethereum.

" } }, "documentation":"

Attributes relevant to a node on a Managed Blockchain network for the blockchain framework that the network uses.

" @@ -2106,7 +2106,7 @@ }, "Tags":{ "shape":"InputTagMap", - "documentation":"

The tags to assign to the specified resource. Tag values can be empty, for example, \"MyTagKey\" : \"\". You can specify multiple key-value pairs in a single request, with an overall maximum of 50 added to each resource.

" + "documentation":"

The tags to assign to the specified resource. Tag values can be empty, for example, \"MyTagKey\" : \"\". You can specify multiple key-value pairs in a single request, with an overall maximum of 50 tags added to each resource.

" } } }, @@ -2330,5 +2330,5 @@ "documentation":"

The voting rules for the network to decide if a proposal is accepted

Applies only to Hyperledger Fabric.

" } }, - "documentation":"

Amazon Managed Blockchain is a fully managed service for creating and managing blockchain networks using open-source frameworks. Blockchain allows you to build applications where multiple parties can securely and transparently run transactions and share data without the need for a trusted, central authority.

Managed Blockchain supports the Hyperledger Fabric and Ethereum open-source frameworks. Ethereum on Managed Blockchain is in preview release and is subject to change. Because of fundamental differences between the frameworks, some API actions or data types may only apply in the context of one framework and not the other. For example, actions related to Hyperledger Fabric network members such as CreateMember and DeleteMember do not apply to Ethereum.

The description for each action indicates the framework or frameworks to which it applies. Data types and properties that apply only in the context of a particular framework are similarly indicated.

" + "documentation":"

Amazon Managed Blockchain is a fully managed service for creating and managing blockchain networks using open-source frameworks. Blockchain allows you to build applications where multiple parties can securely and transparently run transactions and share data without the need for a trusted, central authority.

Managed Blockchain supports the Hyperledger Fabric and Ethereum open-source frameworks. Because of fundamental differences between the frameworks, some API actions or data types may only apply in the context of one framework and not the other. For example, actions related to Hyperledger Fabric network members such as CreateMember and DeleteMember do not apply to Ethereum.

The description for each action indicates the framework or frameworks to which it applies. Data types and properties that apply only in the context of a particular framework are similarly indicated.

" } diff --git a/botocore/data/mediaconnect/2018-11-14/service-2.json b/botocore/data/mediaconnect/2018-11-14/service-2.json index d77651a1..b3107e3c 100644 --- a/botocore/data/mediaconnect/2018-11-14/service-2.json +++ b/botocore/data/mediaconnect/2018-11-14/service-2.json @@ -1225,6 +1225,11 @@ "locationName": "maxLatency", "documentation": "The maximum latency in milliseconds for Zixi-based streams." }, + "MinLatency": { + "shape": "__integer", + "locationName": "minLatency", + "documentation": "The minimum latency in milliseconds for SRT-based streams. In streams that use the SRT protocol, this value that you set on your MediaConnect source or output represents the minimal potential latency of that connection. The latency of the stream is set to the highest number between the sender\u2019s minimum latency and the receiver\u2019s minimum latency." + }, "Name": { "shape": "__string", "locationName": "name", @@ -1524,7 +1529,6 @@ }, "documentation": "Information about the encryption of the flow.", "required": [ - "Algorithm", "RoleArn" ] }, @@ -1801,7 +1805,8 @@ "type": "string", "enum": [ "speke", - "static-key" + "static-key", + "srt-password" ] }, "ListEntitlementsRequest": { @@ -2196,7 +2201,8 @@ "rtp-fec", "rtp", "zixi-pull", - "rist" + "rist", + "srt-listener" ] }, "PurchaseOfferingRequest": { @@ -2569,6 +2575,11 @@ "locationName": "maxLatency", "documentation": "The maximum latency in milliseconds. This parameter applies only to RIST-based and Zixi-based streams." }, + "MinLatency": { + "shape": "__integer", + "locationName": "minLatency", + "documentation": "The minimum latency in milliseconds for SRT-based streams. In streams that use the SRT protocol, this value that you set on your MediaConnect source or output represents the minimal potential latency of that connection. The latency of the stream is set to the highest number between the sender\u2019s minimum latency and the receiver\u2019s minimum latency." + }, "Name": { "shape": "__string", "locationName": "name", @@ -2803,6 +2814,11 @@ "locationName": "maxLatency", "documentation": "The maximum latency in milliseconds. This parameter applies only to RIST-based and Zixi-based streams." }, + "MinLatency": { + "shape": "__integer", + "locationName": "minLatency", + "documentation": "The minimum latency in milliseconds for SRT-based streams. In streams that use the SRT protocol, this value that you set on your MediaConnect source or output represents the minimal potential latency of that connection. The latency of the stream is set to the highest number between the sender\u2019s minimum latency and the receiver\u2019s minimum latency." + }, "Protocol": { "shape": "Protocol", "locationName": "protocol", @@ -3007,6 +3023,11 @@ "locationName": "maxLatency", "documentation": "The maximum latency in milliseconds for Zixi-based streams." }, + "MinLatency": { + "shape": "__integer", + "locationName": "minLatency", + "documentation": "The minimum latency in milliseconds for SRT-based streams. In streams that use the SRT protocol, this value that you set on your MediaConnect source or output represents the minimal potential latency of that connection. The latency of the stream is set to the highest number between the sender\u2019s minimum latency and the receiver\u2019s minimum latency." + }, "OutputArn": { "shape": "__string", "location": "uri", @@ -3132,6 +3153,11 @@ "locationName": "maxLatency", "documentation": "The maximum latency in milliseconds. This parameter applies only to RIST-based and Zixi-based streams." }, + "MinLatency": { + "shape": "__integer", + "locationName": "minLatency", + "documentation": "The minimum latency in milliseconds for SRT-based streams. In streams that use the SRT protocol, this value that you set on your MediaConnect source or output represents the minimal potential latency of that connection. The latency of the stream is set to the highest number between the sender\u2019s minimum latency and the receiver\u2019s minimum latency." + }, "Protocol": { "shape": "Protocol", "locationName": "protocol", diff --git a/botocore/data/mediaconvert/2017-08-29/service-2.json b/botocore/data/mediaconvert/2017-08-29/service-2.json index 1d18566d..c16542eb 100644 --- a/botocore/data/mediaconvert/2017-08-29/service-2.json +++ b/botocore/data/mediaconvert/2017-08-29/service-2.json @@ -1651,7 +1651,7 @@ "documentation": "Enable this setting on one audio selector to set it as the default for the job. The service uses this default for outputs where it can't find the specified input audio. If you don't set a default, those outputs have no audio." }, "ExternalAudioFileInput": { - "shape": "__stringPatternS3MM2PPWWEEBBMMMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEEHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEE", + "shape": "__stringPatternS3MM2PPWWEEBBMMMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVaAAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEEHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVaAAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEE", "locationName": "externalAudioFileInput", "documentation": "Specifies audio data from an external file source." }, @@ -2431,6 +2431,7 @@ "STL", "SRT", "SMI", + "SMPTE_TT", "TELETEXT", "NULL_SOURCE", "IMSC" @@ -6024,7 +6025,7 @@ "documentation": "Enable Denoise (InputDenoiseFilter) to filter noise from the input. Default is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video inputs." }, "FileInput": { - "shape": "__stringPatternS3MM2PPMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLLOOGGGGaAHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLLOOGGGGaA", + "shape": "__stringPatternS3MM2PPMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVaAAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLLOOGGGGaAHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVaAAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLLOOGGGGaA", "locationName": "fileInput", "documentation": "Specify the source file for your transcoding job. You can use multiple inputs in a single job. The service concatenates these inputs, in the order that you specify them in the job, to create the outputs. If your input format is IMF, specify your input by providing the path to your CPL. For example, \"s3://bucket/vf/cpl.xml\". If the CPL is in an incomplete IMP, make sure to use *Supplemental IMPs* (SupplementalImps) to specify any supplemental IMPs that contain assets referenced by the CPL." }, @@ -7584,7 +7585,7 @@ "documentation": "If your motion graphic asset is a .mov file, keep this setting unspecified. If your motion graphic asset is a series of .png files, specify the frame rate of the overlay in frames per second, as a fraction. For example, specify 24 fps as 24/1. Make sure that the number of images in your series matches the frame rate and your intended overlay duration. For example, if you want a 30-second overlay at 30 fps, you should have 900 .png images. This overlay frame rate doesn't need to match the frame rate of the underlying video." }, "Input": { - "shape": "__stringMin14Max1285PatternS3Mov09PngHttpsMov09Png", + "shape": "__stringMin14PatternS3Mov09PngHttpsMov09Png", "locationName": "input", "documentation": "Specify the .mov file or series of .png files that you want to overlay on your video. For .png files, provide the file name of the first file in the series. Make sure that the names of the .png files end with sequential numbers that specify the order that they are played in. For example, overlay_000.png, overlay_001.png, overlay_002.png, and so on. The sequence must start at zero, and each image file name must have the same number of digits. Pad your initial file names with enough zeros to complete the sequence. For example, if the first image is overlay_0.png, there can be only 10 images in the sequence, with the last image being overlay_9.png. But if the first image is overlay_00.png, there can be 100 images in the sequence." }, @@ -8704,7 +8705,7 @@ "InputChannelsFineTune": { "shape": "__listOf__doubleMinNegative60Max6", "locationName": "inputChannelsFineTune", - "documentation": "Use this setting to specify your remix values when they have a decimal component, such as -10.312, 0.08, or 4.9. MediaConvert rounds your remixing values to the nearest thousandth." + "documentation": "Use this setting to specify your remix values when they have a decimal component, such as -10.312, 0.08, or 4.9. MediaConvert rounds your remixing values to the nearest thousandth." } }, "documentation": "OutputChannel mapping settings." @@ -11307,12 +11308,6 @@ "max": 11, "pattern": "^((([0-1]\\d)|(2[0-3]))(:[0-5]\\d){2}([:;][0-5]\\d))$" }, - "__stringMin14Max1285PatternS3Mov09PngHttpsMov09Png": { - "type": "string", - "min": 14, - "max": 1285, - "pattern": "^((s3://(.*)(\\.mov|[0-9]+\\.png))|(https?://(.*)(\\.mov|[0-9]+\\.png)(\\?([^&=]+=[^&]+&)*[^&=]+=[^&]+)?))$" - }, "__stringMin14PatternS3BmpBMPPngPNGHttpsBmpBMPPngPNG": { "type": "string", "min": 14, @@ -11323,6 +11318,11 @@ "min": 14, "pattern": "^((s3://(.*?)\\.(bmp|BMP|png|PNG|tga|TGA))|(https?://(.*?)\\.(bmp|BMP|png|PNG|tga|TGA)(\\?([^&=]+=[^&]+&)*[^&=]+=[^&]+)?))$" }, + "__stringMin14PatternS3Mov09PngHttpsMov09Png": { + "type": "string", + "min": 14, + "pattern": "^((s3://(.*)(\\.mov|[0-9]+\\.png))|(https?://(.*)(\\.mov|[0-9]+\\.png)(\\?([^&=]+=[^&]+&)*[^&=]+=[^&]+)?))$" + }, "__stringMin14PatternS3SccSCCTtmlTTMLDfxpDFXPStlSTLSrtSRTXmlXMLSmiSMIHttpsSccSCCTtmlTTMLDfxpDFXPStlSTLSrtSRTXmlXMLSmiSMI": { "type": "string", "min": 14, @@ -11454,13 +11454,13 @@ "type": "string", "pattern": "^s3:\\/\\/.*\\/(ASSETMAP.xml)?$" }, - "__stringPatternS3MM2PPMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLLOOGGGGaAHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLLOOGGGGaA": { + "__stringPatternS3MM2PPMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVaAAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLLOOGGGGaAHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVaAAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLLOOGGGGaA": { "type": "string", - "pattern": "^((s3://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[pP]|[mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][kK][aA]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[wW][eE][bB][mM]|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[xX][mM][lL]|[oO][gG][gGaA]))))|(https?://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][kK][aA]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[wW][eE][bB][mM]|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[xX][mM][lL]|[oO][gG][gGaA])))(\\?([^&=]+=[^&]+&)*[^&=]+=[^&]+)?))$" + "pattern": "^((s3://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[pP]|[mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][kK][aA]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vVaA]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[wW][eE][bB][mM]|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[xX][mM][lL]|[oO][gG][gGaA]))))|(https?://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][kK][aA]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vVaA]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[wW][eE][bB][mM]|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[xX][mM][lL]|[oO][gG][gGaA])))(\\?([^&=]+=[^&]+&)*[^&=]+=[^&]+)?))$" }, - "__stringPatternS3MM2PPWWEEBBMMMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEEHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEE": { + "__stringPatternS3MM2PPWWEEBBMMMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVaAAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEEHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVaAAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEE": { "type": "string", - "pattern": "^((s3://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[pP]|[wW][eE][bB][mM]|[mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][kK][aA]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[aA][aA][cC]|[aA][iI][fF][fF]|[mM][pP]2|[aA][cC]3|[eE][cC]3|[dD][tT][sS][eE]))))|(https?://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][kK][aA]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[aA][aA][cC]|[aA][iI][fF][fF]|[mM][pP]2|[aA][cC]3|[eE][cC]3|[dD][tT][sS][eE])))(\\?([^&=]+=[^&]+&)*[^&=]+=[^&]+)?))$" + "pattern": "^((s3://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[pP]|[wW][eE][bB][mM]|[mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][kK][aA]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vVaA]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[aA][aA][cC]|[aA][iI][fF][fF]|[mM][pP]2|[aA][cC]3|[eE][cC]3|[dD][tT][sS][eE]))))|(https?://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][kK][aA]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vVaA]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[aA][aA][cC]|[aA][iI][fF][fF]|[mM][pP]2|[aA][cC]3|[eE][cC]3|[dD][tT][sS][eE])))(\\?([^&=]+=[^&]+&)*[^&=]+=[^&]+)?))$" }, "__stringPatternSNManifestConfirmConditionNotificationNS": { "type": "string", diff --git a/botocore/data/medialive/2017-10-14/service-2.json b/botocore/data/medialive/2017-10-14/service-2.json index 162e498b..6b84c28d 100644 --- a/botocore/data/medialive/2017-10-14/service-2.json +++ b/botocore/data/medialive/2017-10-14/service-2.json @@ -554,6 +554,48 @@ ], "documentation": "Create a new program in the multiplex." }, + "CreatePartnerInput": { + "name": "CreatePartnerInput", + "http": { + "method": "POST", + "requestUri": "/prod/inputs/{inputId}/partners", + "responseCode": 201 + }, + "input": { + "shape": "CreatePartnerInputRequest" + }, + "output": { + "shape": "CreatePartnerInputResponse", + "documentation": "Successfully created the input." + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "This request was invalid" + }, + { + "shape": "InternalServerErrorException", + "documentation": "Internal Service Error" + }, + { + "shape": "ForbiddenException", + "documentation": "Access was denied" + }, + { + "shape": "BadGatewayException", + "documentation": "Bad Gateway Error" + }, + { + "shape": "GatewayTimeoutException", + "documentation": "Gateway Timeout Error" + }, + { + "shape": "TooManyRequestsException", + "documentation": "Limit exceeded" + } + ], + "documentation": "Create a partner input" + }, "CreateTags": { "name": "CreateTags", "http": { @@ -2841,6 +2883,16 @@ }, "documentation": "Ancillary Source Settings" }, + "ArchiveCdnSettings": { + "type": "structure", + "members": { + "ArchiveS3Settings": { + "shape": "ArchiveS3Settings", + "locationName": "archiveS3Settings" + } + }, + "documentation": "Archive Cdn Settings" + }, "ArchiveContainerSettings": { "type": "structure", "members": { @@ -2858,6 +2910,11 @@ "ArchiveGroupSettings": { "type": "structure", "members": { + "ArchiveCdnSettings": { + "shape": "ArchiveCdnSettings", + "locationName": "archiveCdnSettings", + "documentation": "Parameters that control interactions with the CDN." + }, "Destination": { "shape": "OutputLocationRef", "locationName": "destination", @@ -2898,6 +2955,25 @@ "ContainerSettings" ] }, + "ArchiveS3LogUploads": { + "type": "string", + "documentation": "Archive S3 Log Uploads", + "enum": [ + "DISABLED", + "ENABLED" + ] + }, + "ArchiveS3Settings": { + "type": "structure", + "members": { + "CannedAcl": { + "shape": "S3CannedAcl", + "locationName": "cannedAcl", + "documentation": "Specify the canned ACL to apply to each S3 request. Defaults to none." + } + }, + "documentation": "Archive S3 Settings" + }, "AribDestinationSettings": { "type": "structure", "members": { @@ -4074,6 +4150,38 @@ "CaptionChannel" ] }, + "CaptionRectangle": { + "type": "structure", + "members": { + "Height": { + "shape": "__doubleMin0Max100", + "locationName": "height", + "documentation": "See the description in leftOffset.\nFor height, specify the entire height of the rectangle as a percentage of the underlying frame height. For example, \\\"80\\\" means the rectangle height is 80% of the underlying frame height. The topOffset and rectangleHeight must add up to 100% or less.\nThis field corresponds to tts:extent - Y in the TTML standard." + }, + "LeftOffset": { + "shape": "__doubleMin0Max100", + "locationName": "leftOffset", + "documentation": "Applies only if you plan to convert these source captions to EBU-TT-D or TTML in an output. (Make sure to leave the default if you don't have either of these formats in the output.) You can define a display rectangle for the captions that is smaller than the underlying video frame. You define the rectangle by specifying the position of the left edge, top edge, bottom edge, and right edge of the rectangle, all within the underlying video frame. The units for the measurements are percentages.\nIf you specify a value for one of these fields, you must specify a value for all of them.\nFor leftOffset, specify the position of the left edge of the rectangle, as a percentage of the underlying frame width, and relative to the left edge of the frame. For example, \\\"10\\\" means the measurement is 10% of the underlying frame width. The rectangle left edge starts at that position from the left edge of the frame.\nThis field corresponds to tts:origin - X in the TTML standard." + }, + "TopOffset": { + "shape": "__doubleMin0Max100", + "locationName": "topOffset", + "documentation": "See the description in leftOffset.\nFor topOffset, specify the position of the top edge of the rectangle, as a percentage of the underlying frame height, and relative to the top edge of the frame. For example, \\\"10\\\" means the measurement is 10% of the underlying frame height. The rectangle top edge starts at that position from the top edge of the frame.\nThis field corresponds to tts:origin - Y in the TTML standard." + }, + "Width": { + "shape": "__doubleMin0Max100", + "locationName": "width", + "documentation": "See the description in leftOffset.\nFor width, specify the entire width of the rectangle as a percentage of the underlying frame width. For example, \\\"80\\\" means the rectangle width is 80% of the underlying frame width. The leftOffset and rectangleWidth must add up to 100% or less.\nThis field corresponds to tts:extent - X in the TTML standard." + } + }, + "documentation": "Caption Rectangle", + "required": [ + "TopOffset", + "Height", + "Width", + "LeftOffset" + ] + }, "CaptionSelector": { "type": "structure", "members": { @@ -4912,6 +5020,69 @@ }, "documentation": "Placeholder documentation for CreateMultiplexResultModel" }, + "CreatePartnerInput": { + "type": "structure", + "members": { + "RequestId": { + "shape": "__string", + "locationName": "requestId", + "documentation": "Unique identifier of the request to ensure the request is handled\nexactly once in case of retries.\n", + "idempotencyToken": true + }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "A collection of key-value pairs." + } + }, + "documentation": "Placeholder documentation for CreatePartnerInput" + }, + "CreatePartnerInputRequest": { + "type": "structure", + "members": { + "InputId": { + "shape": "__string", + "location": "uri", + "locationName": "inputId", + "documentation": "Unique ID of the input." + }, + "RequestId": { + "shape": "__string", + "locationName": "requestId", + "documentation": "Unique identifier of the request to ensure the request is handled\nexactly once in case of retries.\n", + "idempotencyToken": true + }, + "Tags": { + "shape": "Tags", + "locationName": "tags", + "documentation": "A collection of key-value pairs." + } + }, + "documentation": "A request to create a partner input", + "required": [ + "InputId" + ] + }, + "CreatePartnerInputResponse": { + "type": "structure", + "members": { + "Input": { + "shape": "Input", + "locationName": "input" + } + }, + "documentation": "Placeholder documentation for CreatePartnerInputResponse" + }, + "CreatePartnerInputResultModel": { + "type": "structure", + "members": { + "Input": { + "shape": "Input", + "locationName": "input" + } + }, + "documentation": "Placeholder documentation for CreatePartnerInputResultModel" + }, "CreateTagsRequest": { "type": "structure", "members": { @@ -5644,6 +5815,11 @@ "locationName": "inputDevices", "documentation": "Settings for the input devices." }, + "InputPartnerIds": { + "shape": "__listOf__string", + "locationName": "inputPartnerIds", + "documentation": "A list of IDs for all Inputs which are partners of this one." + }, "InputSourceType": { "shape": "InputSourceType", "locationName": "inputSourceType", @@ -6579,6 +6755,11 @@ "EbuTtDDestinationSettings": { "type": "structure", "members": { + "CopyrightHolder": { + "shape": "__stringMax1000", + "locationName": "copyrightHolder", + "documentation": "Applies only if you plan to convert these source captions to EBU-TT-D or TTML in an output. Complete this field if you want to include the name of the copyright holder in the copyright metadata tag in the TTML" + }, "FillLineGap": { "shape": "EbuTtDFillLineGapControl", "locationName": "fillLineGap", @@ -6927,6 +7108,16 @@ }, "documentation": "Placeholder documentation for ForbiddenException" }, + "FrameCaptureCdnSettings": { + "type": "structure", + "members": { + "FrameCaptureS3Settings": { + "shape": "FrameCaptureS3Settings", + "locationName": "frameCaptureS3Settings" + } + }, + "documentation": "Frame Capture Cdn Settings" + }, "FrameCaptureGroupSettings": { "type": "structure", "members": { @@ -6934,6 +7125,11 @@ "shape": "OutputLocationRef", "locationName": "destination", "documentation": "The destination for the frame capture files. Either the URI for an Amazon S3 bucket and object, plus a file name prefix (for example, s3ssl://sportsDelivery/highlights/20180820/curling-) or the URI for a MediaStore container, plus a file name prefix (for example, mediastoressl://sportsDelivery/20180820/curling-). The final file names consist of the prefix from the destination field (for example, \"curling-\") + name modifier + the counter (5 digits, starting from 00001) + extension (which is always .jpg). For example, curling-low.00001.jpg" + }, + "FrameCaptureCdnSettings": { + "shape": "FrameCaptureCdnSettings", + "locationName": "frameCaptureCdnSettings", + "documentation": "Parameters that control interactions with the CDN." } }, "documentation": "Frame Capture Group Settings", @@ -6966,6 +7162,25 @@ }, "documentation": "Frame Capture Output Settings" }, + "FrameCaptureS3LogUploads": { + "type": "string", + "documentation": "Frame Capture S3 Log Uploads", + "enum": [ + "DISABLED", + "ENABLED" + ] + }, + "FrameCaptureS3Settings": { + "type": "structure", + "members": { + "CannedAcl": { + "shape": "S3CannedAcl", + "locationName": "cannedAcl", + "documentation": "Specify the canned ACL to apply to each S3 request. Defaults to none." + } + }, + "documentation": "Frame Capture S3 Settings" + }, "FrameCaptureSettings": { "type": "structure", "members": { @@ -7427,7 +7642,7 @@ "Softness": { "shape": "__integerMin0Max128", "locationName": "softness", - "documentation": "Softness. Selects quantizer matrix, larger values reduce high-frequency content in the encoded image." + "documentation": "Softness. Selects quantizer matrix, larger values reduce high-frequency content in the encoded image. If not set to zero, must be greater than 15." }, "SpatialAq": { "shape": "H264SpatialAq", @@ -7930,6 +8145,10 @@ "shape": "HlsMediaStoreSettings", "locationName": "hlsMediaStoreSettings" }, + "HlsS3Settings": { + "shape": "HlsS3Settings", + "locationName": "hlsS3Settings" + }, "HlsWebdavSettings": { "shape": "HlsWebdavSettings", "locationName": "hlsWebdavSettings" @@ -8392,6 +8611,25 @@ "ENABLED" ] }, + "HlsS3LogUploads": { + "type": "string", + "documentation": "Hls S3 Log Uploads", + "enum": [ + "DISABLED", + "ENABLED" + ] + }, + "HlsS3Settings": { + "type": "structure", + "members": { + "CannedAcl": { + "shape": "S3CannedAcl", + "locationName": "cannedAcl", + "documentation": "Specify the canned ACL to apply to each S3 request. Defaults to none." + } + }, + "documentation": "Hls S3 Settings" + }, "HlsSegmentationMode": { "type": "string", "documentation": "Hls Segmentation Mode", @@ -8547,6 +8785,11 @@ "locationName": "inputDevices", "documentation": "Settings for the input devices." }, + "InputPartnerIds": { + "shape": "__listOf__string", + "locationName": "inputPartnerIds", + "documentation": "A list of IDs for all Inputs which are partners of this one." + }, "InputSourceType": { "shape": "InputSourceType", "locationName": "inputSourceType", @@ -12481,6 +12724,16 @@ "Destination" ] }, + "S3CannedAcl": { + "type": "string", + "documentation": "S3 Canned Acl", + "enum": [ + "AUTHENTICATED_READ", + "BUCKET_OWNER_FULL_CONTROL", + "BUCKET_OWNER_READ", + "PUBLIC_READ" + ] + }, "ScheduleAction": { "type": "structure", "members": { @@ -13542,6 +13795,11 @@ "TeletextSourceSettings": { "type": "structure", "members": { + "OutputRectangle": { + "shape": "CaptionRectangle", + "locationName": "outputRectangle", + "documentation": "Optionally defines a region where TTML style captions will be displayed" + }, "PageNumber": { "shape": "__string", "locationName": "pageNumber", @@ -13659,6 +13917,11 @@ "locationName": "targetCustomerId", "documentation": "The AWS account ID (12 digits) for the recipient of the device transfer." }, + "TargetRegion": { + "shape": "__string", + "locationName": "targetRegion", + "documentation": "The target AWS region to transfer the device." + }, "TransferMessage": { "shape": "__string", "locationName": "transferMessage", @@ -13681,6 +13944,11 @@ "locationName": "targetCustomerId", "documentation": "The AWS account ID (12 digits) for the recipient of the device transfer." }, + "TargetRegion": { + "shape": "__string", + "locationName": "targetRegion", + "documentation": "The target AWS region to transfer the device." + }, "TransferMessage": { "shape": "__string", "locationName": "transferMessage", @@ -14719,6 +14987,10 @@ "type": "double", "documentation": "Placeholder documentation for __doubleMin0Max1" }, + "__doubleMin0Max100": { + "type": "double", + "documentation": "Placeholder documentation for __doubleMin0Max100" + }, "__doubleMin1": { "type": "double", "documentation": "Placeholder documentation for __doubleMin1" @@ -15394,6 +15666,11 @@ "type": "string", "documentation": "Placeholder documentation for __string" }, + "__stringMax1000": { + "type": "string", + "max": 1000, + "documentation": "Placeholder documentation for __stringMax1000" + }, "__stringMax256": { "type": "string", "max": 256, diff --git a/botocore/data/mediapackage-vod/2018-11-07/service-2.json b/botocore/data/mediapackage-vod/2018-11-07/service-2.json index 70c31d3c..e2f5492c 100644 --- a/botocore/data/mediapackage-vod/2018-11-07/service-2.json +++ b/botocore/data/mediapackage-vod/2018-11-07/service-2.json @@ -13,6 +13,42 @@ "uid": "mediapackage-vod-2018-11-07" }, "operations": { + "ConfigureLogs": { + "documentation": "Changes the packaging group's properities to configure log subscription", + "errors": [ + { + "shape": "UnprocessableEntityException" + }, + { + "shape": "InternalServerErrorException" + }, + { + "shape": "ForbiddenException" + }, + { + "shape": "NotFoundException" + }, + { + "shape": "ServiceUnavailableException" + }, + { + "shape": "TooManyRequestsException" + } + ], + "http": { + "method": "PUT", + "requestUri": "/packaging_groups/{id}/configure_logs", + "responseCode": 200 + }, + "input": { + "shape": "ConfigureLogsRequest" + }, + "name": "ConfigureLogs", + "output": { + "documentation": "The updated MediaPackage VOD PackagingGroup resource.", + "shape": "ConfigureLogsResponse" + } + }, "CreateAsset": { "documentation": "Creates a new MediaPackage VOD Asset resource.", "errors": [ @@ -742,6 +778,67 @@ ], "type": "structure" }, + "ConfigureLogsParameters": { + "documentation": "The configuration parameters for egress access logging.", + "members": { + "EgressAccessLogs": { + "locationName": "egressAccessLogs", + "shape": "EgressAccessLogs" + } + }, + "type": "structure" + }, + "ConfigureLogsRequest": { + "documentation": "The option to configure log subscription.", + "members": { + "EgressAccessLogs": { + "locationName": "egressAccessLogs", + "shape": "EgressAccessLogs" + }, + "Id": { + "documentation": "The ID of a MediaPackage VOD PackagingGroup resource.", + "location": "uri", + "locationName": "id", + "shape": "__string" + } + }, + "required": [ + "Id" + ], + "type": "structure" + }, + "ConfigureLogsResponse": { + "members": { + "Arn": { + "documentation": "The ARN of the PackagingGroup.", + "locationName": "arn", + "shape": "__string" + }, + "Authorization": { + "locationName": "authorization", + "shape": "Authorization" + }, + "DomainName": { + "documentation": "The fully qualified domain name for Assets in the PackagingGroup.", + "locationName": "domainName", + "shape": "__string" + }, + "EgressAccessLogs": { + "locationName": "egressAccessLogs", + "shape": "EgressAccessLogs" + }, + "Id": { + "documentation": "The ID of the PackagingGroup.", + "locationName": "id", + "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "Tags" + } + }, + "type": "structure" + }, "CreateAssetRequest": { "documentation": "A new MediaPackage VOD Asset configuration.", "members": { @@ -919,6 +1016,10 @@ "locationName": "authorization", "shape": "Authorization" }, + "EgressAccessLogs": { + "locationName": "egressAccessLogs", + "shape": "EgressAccessLogs" + }, "Id": { "documentation": "The ID of the PackagingGroup.", "locationName": "id", @@ -950,6 +1051,10 @@ "locationName": "domainName", "shape": "__string" }, + "EgressAccessLogs": { + "locationName": "egressAccessLogs", + "shape": "EgressAccessLogs" + }, "Id": { "documentation": "The ID of the PackagingGroup.", "locationName": "id", @@ -1239,6 +1344,10 @@ "locationName": "domainName", "shape": "__string" }, + "EgressAccessLogs": { + "locationName": "egressAccessLogs", + "shape": "EgressAccessLogs" + }, "Id": { "documentation": "The ID of the PackagingGroup.", "locationName": "id", @@ -1251,6 +1360,17 @@ }, "type": "structure" }, + "EgressAccessLogs": { + "documentation": "Configure egress access logging.", + "members": { + "LogGroupName": { + "documentation": "Customize the log group name.", + "locationName": "logGroupName", + "shape": "__string" + } + }, + "type": "structure" + }, "EgressEndpoint": { "documentation": "The endpoint URL used to access an Asset using one PackagingConfiguration.", "members": { @@ -1711,6 +1831,10 @@ "locationName": "domainName", "shape": "__string" }, + "EgressAccessLogs": { + "locationName": "egressAccessLogs", + "shape": "EgressAccessLogs" + }, "Id": { "documentation": "The ID of the PackagingGroup.", "locationName": "id", @@ -1730,6 +1854,10 @@ "locationName": "authorization", "shape": "Authorization" }, + "EgressAccessLogs": { + "locationName": "egressAccessLogs", + "shape": "EgressAccessLogs" + }, "Id": { "documentation": "The ID of the PackagingGroup.", "locationName": "id", @@ -1982,6 +2110,10 @@ "locationName": "domainName", "shape": "__string" }, + "EgressAccessLogs": { + "locationName": "egressAccessLogs", + "shape": "EgressAccessLogs" + }, "Id": { "documentation": "The ID of the PackagingGroup.", "locationName": "id", diff --git a/botocore/data/mediatailor/2018-04-23/paginators-1.json b/botocore/data/mediatailor/2018-04-23/paginators-1.json index 993b5eaf..11622e84 100644 --- a/botocore/data/mediatailor/2018-04-23/paginators-1.json +++ b/botocore/data/mediatailor/2018-04-23/paginators-1.json @@ -5,6 +5,30 @@ "limit_key": "MaxResults", "output_token": "NextToken", "result_key": "Items" + }, + "GetChannelSchedule": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Items" + }, + "ListChannels": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Items" + }, + "ListSourceLocations": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Items" + }, + "ListVodSources": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Items" } } } diff --git a/botocore/data/mediatailor/2018-04-23/service-2.json b/botocore/data/mediatailor/2018-04-23/service-2.json index 53164a8d..ec1edea7 100644 --- a/botocore/data/mediatailor/2018-04-23/service-2.json +++ b/botocore/data/mediatailor/2018-04-23/service-2.json @@ -1,764 +1,2691 @@ { - "documentation": "

Use the AWS Elemental MediaTailor SDK to configure scalable ad insertion for your live and VOD content. With AWS Elemental MediaTailor, you can serve targeted ads to viewers while maintaining broadcast quality in over-the-top (OTT) video applications. For information about using the service, including detailed information about the settings covered in this guide, see the AWS Elemental MediaTailor User Guide.

Through the SDK, you manage AWS Elemental MediaTailor configurations the same as you do through the console. For example, you specify ad insertion behavior and mapping information for the origin server and the ad decision server (ADS).

", - "metadata": { - "apiVersion": "2018-04-23", - "endpointPrefix": "api.mediatailor", - "jsonVersion": "1.1", - "protocol": "rest-json", - "serviceAbbreviation": "MediaTailor", - "serviceFullName": "AWS MediaTailor", - "serviceId": "MediaTailor", - "signatureVersion": "v4", - "signingName": "mediatailor", - "uid": "mediatailor-2018-04-23" - }, - "operations": { - "DeletePlaybackConfiguration": { - "documentation": "

Deletes the playback configuration for the specified name.

", - "errors": [], - "http": { - "method": "DELETE", - "requestUri": "/playbackConfiguration/{Name}", - "responseCode": 204 - }, - "input": { - "shape": "DeletePlaybackConfigurationRequest" - }, - "name": "DeletePlaybackConfiguration", - "output": { - "documentation": "

The request was successful and there is no content in the response.

", - "shape": "DeletePlaybackConfigurationResponse" - } - }, - "GetPlaybackConfiguration": { - "documentation": "

Returns the playback configuration for the specified name.

", - "errors": [], - "http": { - "method": "GET", - "requestUri": "/playbackConfiguration/{Name}", - "responseCode": 200 - }, - "input": { - "shape": "GetPlaybackConfigurationRequest" - }, - "name": "GetPlaybackConfiguration", - "output": { - "documentation": "

Success.

", - "shape": "GetPlaybackConfigurationResponse" - } - }, - "ListPlaybackConfigurations": { - "documentation": "

Returns a list of the playback configurations defined in AWS Elemental MediaTailor. You can specify a maximum number of configurations to return at a time. The default maximum is 50. Results are returned in pagefuls. If MediaTailor has more configurations than the specified maximum, it provides parameters in the response that you can use to retrieve the next pageful.

", - "errors": [], - "http": { - "method": "GET", - "requestUri": "/playbackConfigurations", - "responseCode": 200 - }, - "input": { - "shape": "ListPlaybackConfigurationsRequest" - }, - "name": "ListPlaybackConfigurations", - "output": { - "documentation": "

Success.

", - "shape": "ListPlaybackConfigurationsResponse" - } - }, - "ListTagsForResource": { - "documentation": "

Returns a list of the tags assigned to the specified playback configuration resource.

", - "errors": [ - { - "documentation": "

Invalid request parameters.

", - "shape": "BadRequestException" - } - ], - "http": { - "method": "GET", - "requestUri": "/tags/{ResourceArn}", - "responseCode": 200 - }, - "input": { - "shape": "ListTagsForResourceRequest" - }, - "name": "ListTagsForResource", - "output": { - "documentation": "

Success.

", - "shape": "ListTagsForResourceResponse" - } - }, - "PutPlaybackConfiguration": { - "documentation": "

Adds a new playback configuration to AWS Elemental MediaTailor.

", - "errors": [], - "http": { - "method": "PUT", - "requestUri": "/playbackConfiguration", - "responseCode": 200 - }, - "input": { - "shape": "PutPlaybackConfigurationRequest" - }, - "name": "PutPlaybackConfiguration", - "output": { - "documentation": "

Success.

", - "shape": "PutPlaybackConfigurationResponse" - } - }, - "TagResource": { - "documentation": "

Adds tags to the specified playback configuration resource. You can specify one or more tags to add.

", - "errors": [ - { - "documentation": "

Invalid request parameters.

", - "shape": "BadRequestException" - } - ], - "http": { - "method": "POST", - "requestUri": "/tags/{ResourceArn}", - "responseCode": 204 - }, - "input": { - "shape": "TagResourceRequest" - }, - "name": "TagResource" - }, - "UntagResource": { - "documentation": "

Removes tags from the specified playback configuration resource. You can specify one or more tags to remove.

", - "errors": [ - { - "documentation": "

Invalid request parameters.

", - "shape": "BadRequestException" - } - ], - "http": { - "method": "DELETE", - "requestUri": "/tags/{ResourceArn}", - "responseCode": 204 - }, - "input": { - "shape": "UntagResourceRequest" - }, - "name": "UntagResource" + "documentation": "

Use the AWS Elemental MediaTailor SDKs and CLI to configure scalable ad insertion and linear channels. With MediaTailor, you can assemble existing content into a linear stream and serve targeted ads to viewers while maintaining broadcast quality in over-the-top (OTT) video applications. For information about using the service, including detailed information about the settings covered in this guide, see the AWS Elemental MediaTailor User Guide.

Through the SDKs and the CLI you manage AWS Elemental MediaTailor configurations and channels the same as you do through the console. For example, you specify ad insertion behavior and mapping information for the origin server and the ad decision server (ADS).

", + "metadata": { + "apiVersion": "2018-04-23", + "endpointPrefix": "api.mediatailor", + "jsonVersion": "1.1", + "protocol": "rest-json", + "serviceAbbreviation": "MediaTailor", + "serviceFullName": "AWS MediaTailor", + "serviceId": "MediaTailor", + "signatureVersion": "v4", + "signingName": "mediatailor", + "uid": "mediatailor-2018-04-23" + }, + "operations": { + "CreateChannel": { + "documentation": "

Creates a channel.

", + "errors": [], + "http": { + "method": "POST", + "requestUri": "/channel/{channelName}", + "responseCode": 200 + }, + "input": { + "shape": "CreateChannelRequest" + }, + "name": "CreateChannel", + "output": { + "documentation": "

Success.

", + "shape": "CreateChannelResponse" + } + }, + "CreateProgram": { + "documentation": "

Creates a program.

", + "errors": [], + "http": { + "method": "POST", + "requestUri": "/channel/{channelName}/program/{programName}", + "responseCode": 200 + }, + "input": { + "shape": "CreateProgramRequest" + }, + "name": "CreateProgram", + "output": { + "documentation": "

Success.

", + "shape": "CreateProgramResponse" + } + }, + "CreateSourceLocation": { + "documentation": "

Creates a source location on a specific channel.

", + "errors": [], + "http": { + "method": "POST", + "requestUri": "/sourceLocation/{sourceLocationName}", + "responseCode": 200 + }, + "input": { + "shape": "CreateSourceLocationRequest" + }, + "name": "CreateSourceLocation", + "output": { + "documentation": "

Success.

", + "shape": "CreateSourceLocationResponse" + } + }, + "CreateVodSource": { + "documentation": "

Creates name for a specific VOD source in a source location.

", + "errors": [], + "http": { + "method": "POST", + "requestUri": "/sourceLocation/{sourceLocationName}/vodSource/{vodSourceName}", + "responseCode": 200 + }, + "input": { + "shape": "CreateVodSourceRequest" + }, + "name": "CreateVodSource", + "output": { + "documentation": "

Success.

", + "shape": "CreateVodSourceResponse" + } + }, + "DeleteChannel": { + "documentation": "

Deletes a channel. You must stop the channel before it can be deleted.

", + "errors": [], + "http": { + "method": "DELETE", + "requestUri": "/channel/{channelName}", + "responseCode": 200 + }, + "input": { + "shape": "DeleteChannelRequest" + }, + "name": "DeleteChannel", + "output": { + "documentation": "

Success.

", + "shape": "DeleteChannelResponse" + } + }, + "DeleteChannelPolicy": { + "documentation": "

Deletes a channel's IAM policy.

", + "errors": [], + "http": { + "method": "DELETE", + "requestUri": "/channel/{channelName}/policy", + "responseCode": 200 + }, + "input": { + "shape": "DeleteChannelPolicyRequest" + }, + "name": "DeleteChannelPolicy", + "output": { + "documentation": "

Success.

", + "shape": "DeleteChannelPolicyResponse" + } + }, + "DeletePlaybackConfiguration": { + "documentation": "

Deletes the playback configuration for the specified name.

", + "errors": [], + "http": { + "method": "DELETE", + "requestUri": "/playbackConfiguration/{Name}", + "responseCode": 204 + }, + "input": { + "shape": "DeletePlaybackConfigurationRequest" + }, + "name": "DeletePlaybackConfiguration", + "output": { + "documentation": "

The request was successful and there is no content in the response.

", + "shape": "DeletePlaybackConfigurationResponse" + } + }, + "DeleteProgram": { + "documentation": "

Deletes a specific program on a specific channel.

", + "errors": [], + "http": { + "method": "DELETE", + "requestUri": "/channel/{channelName}/program/{programName}", + "responseCode": 200 + }, + "input": { + "shape": "DeleteProgramRequest" + }, + "name": "DeleteProgram", + "output": { + "documentation": "

Success.

", + "shape": "DeleteProgramResponse" + } + }, + "DeleteSourceLocation": { + "documentation": "

Deletes a source location on a specific channel.

", + "errors": [], + "http": { + "method": "DELETE", + "requestUri": "/sourceLocation/{sourceLocationName}", + "responseCode": 200 + }, + "input": { + "shape": "DeleteSourceLocationRequest" + }, + "name": "DeleteSourceLocation", + "output": { + "documentation": "

Success.

", + "shape": "DeleteSourceLocationResponse" + } + }, + "DeleteVodSource": { + "documentation": "

Deletes a specific VOD source in a specific source location.

", + "errors": [], + "http": { + "method": "DELETE", + "requestUri": "/sourceLocation/{sourceLocationName}/vodSource/{vodSourceName}", + "responseCode": 200 + }, + "input": { + "shape": "DeleteVodSourceRequest" + }, + "name": "DeleteVodSource", + "output": { + "documentation": "

Success.

", + "shape": "DeleteVodSourceResponse" + } + }, + "DescribeChannel": { + "documentation": "

Describes the properties of a specific channel.

", + "errors": [], + "http": { + "method": "GET", + "requestUri": "/channel/{channelName}", + "responseCode": 200 + }, + "input": { + "shape": "DescribeChannelRequest" + }, + "name": "DescribeChannel", + "output": { + "documentation": "

Success.

", + "shape": "DescribeChannelResponse" + } + }, + "DescribeProgram": { + "documentation": "

Retrieves the properties of the requested program.

", + "errors": [], + "http": { + "method": "GET", + "requestUri": "/channel/{channelName}/program/{programName}", + "responseCode": 200 + }, + "input": { + "shape": "DescribeProgramRequest" + }, + "name": "DescribeProgram", + "output": { + "documentation": "

Success.

", + "shape": "DescribeProgramResponse" + } + }, + "DescribeSourceLocation": { + "documentation": "

Retrieves the properties of the requested source location.

", + "errors": [], + "http": { + "method": "GET", + "requestUri": "/sourceLocation/{sourceLocationName}", + "responseCode": 200 + }, + "input": { + "shape": "DescribeSourceLocationRequest" + }, + "name": "DescribeSourceLocation", + "output": { + "documentation": "

Success.

", + "shape": "DescribeSourceLocationResponse" + } + }, + "DescribeVodSource": { + "documentation": "

Provides details about a specific VOD source in a specific source location.

", + "errors": [], + "http": { + "method": "GET", + "requestUri": "/sourceLocation/{sourceLocationName}/vodSource/{vodSourceName}", + "responseCode": 200 + }, + "input": { + "shape": "DescribeVodSourceRequest" + }, + "name": "DescribeVodSource", + "output": { + "documentation": "

Success.

", + "shape": "DescribeVodSourceResponse" + } + }, + "GetChannelPolicy": { + "documentation": "

Retrieves information about a channel's IAM policy.

", + "errors": [], + "http": { + "method": "GET", + "requestUri": "/channel/{channelName}/policy", + "responseCode": 200 + }, + "input": { + "shape": "GetChannelPolicyRequest" + }, + "name": "GetChannelPolicy", + "output": { + "documentation": "

Success.

", + "shape": "GetChannelPolicyResponse" + } + }, + "GetChannelSchedule": { + "documentation": "

Retrieves information about your channel's schedule.

", + "errors": [], + "http": { + "method": "GET", + "requestUri": "/channel/{channelName}/schedule", + "responseCode": 200 + }, + "input": { + "shape": "GetChannelScheduleRequest" + }, + "name": "GetChannelSchedule", + "output": { + "documentation": "

Success.

", + "shape": "GetChannelScheduleResponse" + } + }, + "GetPlaybackConfiguration": { + "documentation": "

Returns the playback configuration for the specified name.

", + "errors": [], + "http": { + "method": "GET", + "requestUri": "/playbackConfiguration/{Name}", + "responseCode": 200 + }, + "input": { + "shape": "GetPlaybackConfigurationRequest" + }, + "name": "GetPlaybackConfiguration", + "output": { + "documentation": "

Success.

", + "shape": "GetPlaybackConfigurationResponse" + } + }, + "ListChannels": { + "documentation": "

Retrieves a list of channels that are associated with this account.

", + "errors": [], + "http": { + "method": "GET", + "requestUri": "/channels", + "responseCode": 200 + }, + "input": { + "shape": "ListChannelsRequest" + }, + "name": "ListChannels", + "output": { + "documentation": "

Success.

", + "shape": "ListChannelsResponse" + } + }, + "ListPlaybackConfigurations": { + "documentation": "

Returns a list of the playback configurations defined in AWS Elemental MediaTailor. You can specify a maximum number of configurations to return at a time. The default maximum is 50. Results are returned in pagefuls. If MediaTailor has more configurations than the specified maximum, it provides parameters in the response that you can use to retrieve the next pageful.

", + "errors": [], + "http": { + "method": "GET", + "requestUri": "/playbackConfigurations", + "responseCode": 200 + }, + "input": { + "shape": "ListPlaybackConfigurationsRequest" + }, + "name": "ListPlaybackConfigurations", + "output": { + "documentation": "

Success.

", + "shape": "ListPlaybackConfigurationsResponse" + } + }, + "ListSourceLocations": { + "documentation": "

Retrieves a list of source locations.

", + "errors": [], + "http": { + "method": "GET", + "requestUri": "/sourceLocations", + "responseCode": 200 + }, + "input": { + "shape": "ListSourceLocationsRequest" + }, + "name": "ListSourceLocations", + "output": { + "documentation": "

Success.

", + "shape": "ListSourceLocationsResponse" + } + }, + "ListTagsForResource": { + "documentation": "

Returns a list of the tags assigned to the specified playback configuration resource.

", + "errors": [ + { + "documentation": "

Invalid request parameters.

", + "shape": "BadRequestException" } - }, - "shapes": { - "AdMarkerPassthrough" : { - "type" : "structure", - "members" : { - "Enabled" : { - "documentation": "

For HLS, when set to true, MediaTailor passes through EXT-X-CUE-IN, EXT-X-CUE-OUT, and EXT-X-SPLICEPOINT-SCTE35 ad markers from the origin manifest to the MediaTailor personalized manifest.

No logic is applied to these ad markers. For example, if EXT-X-CUE-OUT has a value of 60, but no ads are filled for that ad break, MediaTailor will not set the value to 0.

", - "shape" : "__boolean" - } - }, - "documentation" : "

The configuration for Ad Marker Passthrough. Ad marker passthrough can be used to pass ad markers from the origin to the customized manifest.

" - }, - "AvailSuppression": { - "type": "structure", - "documentation" : "

The configuration for Avail Suppression. Ad suppression can be used to turn off ad personalization in a long manifest, or if a viewer joins mid-break.

", - "members": { - "Mode": { - "documentation": "Sets the mode for avail suppression, also known as ad suppression. By default, ad suppression is off and all ad breaks are filled by MediaTailor with ads or slate.", - "shape": "Mode" - }, - "Value": { - "documentation": "The avail suppression value is a live edge offset time in HH:MM:SS. MediaTailor won't fill ad breaks on or behind this time in the manifest lookback window. ", - "shape": "__string" - } - } + ], + "http": { + "method": "GET", + "requestUri": "/tags/{ResourceArn}", + "responseCode": 200 + }, + "input": { + "shape": "ListTagsForResourceRequest" + }, + "name": "ListTagsForResource", + "output": { + "documentation": "

Success.

", + "shape": "ListTagsForResourceResponse" + } + }, + "ListVodSources": { + "documentation": "

Lists all the VOD sources in a source location.

", + "errors": [], + "http": { + "method": "GET", + "requestUri": "/sourceLocation/{sourceLocationName}/vodSources", + "responseCode": 200 + }, + "input": { + "shape": "ListVodSourcesRequest" + }, + "name": "ListVodSources", + "output": { + "documentation": "

Success.

", + "shape": "ListVodSourcesResponse" + } + }, + "PutChannelPolicy": { + "documentation": "

Creates an IAM policy for the channel.

", + "errors": [], + "http": { + "method": "PUT", + "requestUri": "/channel/{channelName}/policy", + "responseCode": 200 + }, + "input": { + "shape": "PutChannelPolicyRequest" + }, + "name": "PutChannelPolicy", + "output": { + "documentation": "

Success.

", + "shape": "PutChannelPolicyResponse" + } + }, + "PutPlaybackConfiguration": { + "documentation": "

Adds a new playback configuration to AWS Elemental MediaTailor.

", + "errors": [], + "http": { + "method": "PUT", + "requestUri": "/playbackConfiguration", + "responseCode": 200 + }, + "input": { + "shape": "PutPlaybackConfigurationRequest" + }, + "name": "PutPlaybackConfiguration", + "output": { + "documentation": "

Success.

", + "shape": "PutPlaybackConfigurationResponse" + } + }, + "StartChannel": { + "documentation": "

Starts a specific channel.

", + "errors": [], + "http": { + "method": "PUT", + "requestUri": "/channel/{channelName}/start", + "responseCode": 200 + }, + "input": { + "shape": "StartChannelRequest" + }, + "name": "StartChannel", + "output": { + "documentation": "

Success.

", + "shape": "StartChannelResponse" + } + }, + "StopChannel": { + "documentation": "

Stops a specific channel.

", + "errors": [], + "http": { + "method": "PUT", + "requestUri": "/channel/{channelName}/stop", + "responseCode": 200 + }, + "input": { + "shape": "StopChannelRequest" + }, + "name": "StopChannel", + "output": { + "documentation": "

Success.

", + "shape": "StopChannelResponse" + } + }, + "TagResource": { + "documentation": "

Adds tags to the specified playback configuration resource. You can specify one or more tags to add.

", + "errors": [ + { + "documentation": "

Invalid request parameters.

", + "shape": "BadRequestException" + } + ], + "http": { + "method": "POST", + "requestUri": "/tags/{ResourceArn}", + "responseCode": 204 + }, + "input": { + "shape": "TagResourceRequest" + }, + "name": "TagResource" + }, + "UntagResource": { + "documentation": "

Removes tags from the specified playback configuration resource. You can specify one or more tags to remove.

", + "errors": [ + { + "documentation": "

Invalid request parameters.

", + "shape": "BadRequestException" + } + ], + "http": { + "method": "DELETE", + "requestUri": "/tags/{ResourceArn}", + "responseCode": 204 + }, + "input": { + "shape": "UntagResourceRequest" + }, + "name": "UntagResource" + }, + "UpdateChannel": { + "documentation": "

Updates an existing channel.

", + "errors": [], + "http": { + "method": "PUT", + "requestUri": "/channel/{channelName}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateChannelRequest" + }, + "name": "UpdateChannel", + "output": { + "documentation": "

Success.

", + "shape": "UpdateChannelResponse" + } + }, + "UpdateSourceLocation": { + "documentation": "

Updates a source location on a specific channel.

", + "errors": [], + "http": { + "method": "PUT", + "requestUri": "/sourceLocation/{sourceLocationName}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateSourceLocationRequest" + }, + "name": "UpdateSourceLocation", + "output": { + "documentation": "

Success.

", + "shape": "UpdateSourceLocationResponse" + } + }, + "UpdateVodSource": { + "documentation": "

Updates a specific VOD source in a specific source location.

", + "errors": [], + "http": { + "method": "PUT", + "requestUri": "/sourceLocation/{sourceLocationName}/vodSource/{vodSourceName}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateVodSourceRequest" + }, + "name": "UpdateVodSource", + "output": { + "documentation": "

Success.

", + "shape": "UpdateVodSourceResponse" + } + } + }, + "shapes": { + "AccessConfiguration": { + "documentation": "

Access configuration parameters.

", + "members": { + "AccessType": { + "documentation": "

The type of authentication used to access content from HttpConfiguration::BaseUrl on your source location. Accepted value: S3_SIGV4.

S3_SIGV4 - AWS Signature Version 4 authentication for Amazon S3 hosted virtual-style access. If your source location base URL is an Amazon S3 bucket, MediaTailor can use AWS Signature Version 4 (SigV4) authentication to access the bucket where your source content is stored. Your MediaTailor source location baseURL must follow the S3 virtual hosted-style request URL format. For example, https://bucket-name.s3.Region.amazonaws.com/key-name.

Before you can use S3_SIGV4, you must meet these requirements:

\u2022 You must allow MediaTailor to access your S3 bucket by granting mediatailor.amazonaws.com principal access in IAM. For information about configuring access in IAM, see Access management in the IAM User Guide.

\u2022 The mediatailor.amazonaws.com service principal must have permissions to read all top level manifests referenced by the VodSource packaging configurations.

\u2022 The caller of the API must have s3:GetObject IAM permissions to read all top level manifests referenced by your MediaTailor VodSource packaging configurations.

", + "shape": "AccessType" + } + }, + "type": "structure" + }, + "AccessType": { + "enum": [ + "S3_SIGV4" + ], + "type": "string" + }, + "AdBreak": { + "documentation": "

Ad break configuration parameters.

", + "members": { + "MessageType": { + "documentation": "

The SCTE-35 ad insertion type. Accepted value: SPLICE_INSERT.

", + "shape": "MessageType" }, - "BadRequestException": { - "documentation": "

Invalid request parameters.

", - "error": { - "httpStatusCode": 400 - }, - "exception": true, - "members": { - "Message": { - "shape": "__string" - } - }, - "type": "structure" - }, - "Bumper": { - "type": "structure", - "documentation": "

The configuration for bumpers. Bumpers are short audio or video clips that play at the start or before the end of an ad break.

", - "members": { - "EndUrl": { - "documentation": "

The URL for the end bumper asset.

", - "shape": "__string" - }, - "StartUrl": { - "documentation": "

The URL for the start bumper asset.

", - "shape": "__string" - } - } + "OffsetMillis": { + "documentation": "

How long (in milliseconds) after the beginning of the program that an ad starts. This value must fall within 100ms of a segment boundary, otherwise the ad break will be skipped.

", + "shape": "__long" }, - "CdnConfiguration": { - "documentation": "

The configuration for using a content delivery network (CDN), like Amazon CloudFront, for content and ad segment management.

", - "members": { - "AdSegmentUrlPrefix": { - "documentation": "

A non-default content delivery network (CDN) to serve ad segments. By default, AWS Elemental MediaTailor uses Amazon CloudFront with default cache settings as its CDN for ad segments. To set up an alternate CDN, create a rule in your CDN for the following origin: ads.mediatailor.<region>.amazonaws.com. Then specify the rule's name in this AdSegmentUrlPrefix. When AWS Elemental MediaTailor serves a manifest, it reports your CDN as the source for ad segments.

", - "shape": "__string" - }, - "ContentSegmentUrlPrefix": { - "documentation": "

A content delivery network (CDN) to cache content segments, so that content requests don’t always have to go to the origin server. First, create a rule in your CDN for the content segment origin server. Then specify the rule's name in this ContentSegmentUrlPrefix. When AWS Elemental MediaTailor serves a manifest, it reports your CDN as the source for content segments.

", - "shape": "__string" - } - }, - "type": "structure" - }, - "DashConfiguration": { - "documentation": "

The configuration for DASH content.

", - "members": { - "ManifestEndpointPrefix": { - "documentation": "

The URL generated by MediaTailor to initiate a playback session. The session uses server-side reporting. This setting is ignored in PUT operations.

", - "shape": "__string" - }, - "MpdLocation": { - "documentation": "

The setting that controls whether MediaTailor includes the Location tag in DASH manifests. MediaTailor populates the Location tag with the URL for manifest update requests, to be used by players that don't support sticky redirects. Disable this if you have CDN routing rules set up for accessing MediaTailor manifests, and you are either using client-side reporting or your players support sticky HTTP redirects. Valid values are DISABLED and EMT_DEFAULT. The EMT_DEFAULT setting enables the inclusion of the tag and is the default value.

", - "shape": "__string" - }, - "OriginManifestType": { - "documentation": "

The setting that controls whether MediaTailor handles manifests from the origin server as multi-period manifests or single-period manifests. If your origin server produces single-period manifests, set this to SINGLE_PERIOD. The default setting is MULTI_PERIOD. For multi-period manifests, omit this setting or set it to MULTI_PERIOD.

", - "shape": "OriginManifestType" - } - }, - "type": "structure" - }, - "DashConfigurationForPut": { - "documentation": "

The configuration for DASH PUT operations.

", - "members": { - "MpdLocation": { - "documentation": "

The setting that controls whether MediaTailor includes the Location tag in DASH manifests. MediaTailor populates the Location tag with the URL for manifest update requests, to be used by players that don't support sticky redirects. Disable this if you have CDN routing rules set up for accessing MediaTailor manifests, and you are either using client-side reporting or your players support sticky HTTP redirects. Valid values are DISABLED and EMT_DEFAULT. The EMT_DEFAULT setting enables the inclusion of the tag and is the default value.

", - "shape": "__string" - }, - "OriginManifestType": { - "documentation": "

The setting that controls whether MediaTailor handles manifests from the origin server as multi-period manifests or single-period manifests. If your origin server produces single-period manifests, set this to SINGLE_PERIOD. The default setting is MULTI_PERIOD. For multi-period manifests, omit this setting or set it to MULTI_PERIOD.

", - "shape": "OriginManifestType" - } - }, - "type": "structure" - }, - "DeletePlaybackConfigurationRequest": { - "members": { - "Name": { - "documentation": "

The identifier for the playback configuration.

", - "location": "uri", - "locationName": "Name", - "shape": "__string" - } - }, - "required": [ - "Name" - ], - "type": "structure" - }, - "DeletePlaybackConfigurationResponse": { - "members": {}, - "type": "structure" - }, - "Empty": { - "members": {}, - "type": "structure" - }, - "GetPlaybackConfigurationRequest": { - "members": { - "Name": { - "documentation": "

The identifier for the playback configuration.

", - "location": "uri", - "locationName": "Name", - "shape": "__string" - } - }, - "required": [ - "Name" - ], - "type": "structure" - }, - "GetPlaybackConfigurationResponse": { - "members": { - "AdDecisionServerUrl": { - "documentation": "

The URL for the ad decision server (ADS). This includes the specification of static parameters and placeholders for dynamic parameters. AWS Elemental MediaTailor substitutes player-specific and session-specific parameters as needed when calling the ADS. Alternately, for testing, you can provide a static VAST URL. The maximum length is 25,000 characters.

", - "shape": "__string" - }, - "AvailSuppression": { - "shape": "AvailSuppression", - "documentation": "

The configuration for Avail Suppression. Ad suppression can be used to turn off ad personalization in a long manifest, or if a viewer joins mid-break.

" - }, - "Bumper": { - "shape": "Bumper", - "documentation": "

The configuration for bumpers. Bumpers are short audio or video clips that play at the start or before the end of an ad break.

" - }, - "CdnConfiguration": { - "documentation": "

The configuration for using a content delivery network (CDN), like Amazon CloudFront, for content and ad segment management.

", - "shape": "CdnConfiguration" - }, - "DashConfiguration": { - "documentation": "

The configuration for DASH content.

", - "shape": "DashConfiguration" - }, - "HlsConfiguration": { - "documentation": "

The configuration for HLS content.

", - "shape": "HlsConfiguration" - }, - "LivePreRollConfiguration" : { - "shape" : "LivePreRollConfiguration", - "documentation" : "

The configuration for pre-roll ad insertion.

" - }, - "ManifestProcessingRules": { - "shape" : "ManifestProcessingRules", - "documentation" : "

The configuration for manifest processing rules. Manifest processing rules enable customization of the personalized manifests created by MediaTailor.

" - }, - "Name": { - "documentation": "

The identifier for the playback configuration.

", - "shape": "__string" - }, - "PersonalizationThresholdSeconds" : { - "documentation": "

The maximum duration of underfilled ad time (in seconds) allowed in an ad break.

", - "shape" : "__integerMin1" - }, - "PlaybackConfigurationArn": { - "documentation": "

The Amazon Resource Name (ARN) for the playback configuration.

", - "shape": "__string" - }, - "PlaybackEndpointPrefix": { - "documentation": "

The URL that the player accesses to get a manifest from AWS Elemental MediaTailor. This session will use server-side reporting.

", - "shape": "__string" - }, - "SessionInitializationEndpointPrefix": { - "documentation": "

The URL that the player uses to initialize a session that uses client-side reporting.

", - "shape": "__string" - }, - "SlateAdUrl": { - "documentation": "

The URL for a high-quality video asset to transcode and use to fill in time that's not used by ads. AWS Elemental MediaTailor shows the slate to fill in gaps in media content. Configuring the slate is optional for non-VPAID playback configurations. For VPAID, the slate is required because MediaTailor provides it in the slots designated for dynamic ad content. The slate must be a high-quality asset that contains both audio and video.

", - "shape": "__string" - }, - "Tags": { - "documentation": "

The tags assigned to the playback configuration.

", - "locationName": "tags", - "shape": "__mapOf__string" - }, - "TranscodeProfileName": { - "documentation": "

The name that is used to associate this playback configuration with a custom transcode profile. This overrides the dynamic transcoding defaults of MediaTailor. Use this only if you have already set up custom profiles with the help of AWS Support.

", - "shape": "__string" - }, - "VideoContentSourceUrl": { - "documentation": "

The URL prefix for the master playlist for the stream, minus the asset ID. The maximum length is 512 characters.

", - "shape": "__string" - } - }, - "type": "structure" - }, - "HlsConfiguration": { - "documentation": "

The configuration for HLS content.

", - "members": { - "ManifestEndpointPrefix": { - "documentation": "

The URL that is used to initiate a playback session for devices that support Apple HLS. The session uses server-side reporting.

", - "shape": "__string" - } - }, - "type": "structure" - }, - "ListPlaybackConfigurationsRequest": { - "members": { - "MaxResults": { - "documentation": "

Maximum number of records to return.

", - "location": "querystring", - "locationName": "MaxResults", - "shape": "__integerMin1Max100" - }, - "NextToken": { - "documentation": "

Pagination token returned by the GET list request when results exceed the maximum allowed. Use the token to fetch the next page of results.

", - "location": "querystring", - "locationName": "NextToken", - "shape": "__string" - } - }, - "type": "structure" - }, - "ListPlaybackConfigurationsResponse": { - "members": { - "Items": { - "documentation": "

Array of playback configurations. This might be all the available configurations or a subset, depending on the settings that you provide and the total number of configurations stored.

", - "shape": "__listOfPlaybackConfigurations" - }, - "NextToken": { - "documentation": "

Pagination token returned by the GET list request when results exceed the maximum allowed. Use the token to fetch the next page of results.

", - "shape": "__string" - } - }, - "type": "structure" - }, - "ListTagsForResourceRequest": { - "members": { - "ResourceArn": { - "documentation": "

The Amazon Resource Name (ARN) for the playback configuration. You can get this from the response to any playback configuration request.

", - "location": "uri", - "locationName": "ResourceArn", - "shape": "__string" - } - }, - "required": [ - "ResourceArn" - ], - "type": "structure" - }, - "ListTagsForResourceResponse": { - "members": { - "Tags": { - "documentation": "

A comma-separated list of tag key:value pairs. For example: \n {\n \"Key1\": \"Value1\",\n \"Key2\": \"Value2\"\n }\n

", - "locationName": "tags", - "shape": "__mapOf__string" - } - }, - "type": "structure" + "Slate": { + "documentation": "

Ad break slate configuration.

", + "shape": "SlateSource" + }, + "SpliceInsertMessage": { + "documentation": "

This defines the SCTE-35 splice_insert() message inserted around the ad. For information about using splice_insert(), see the SCTE-35 specficiaiton, section 9.7.3.1.

", + "shape": "SpliceInsertMessage" + } + }, + "type": "structure" + }, + "AdMarkerPassthrough": { + "documentation": "

For HLS, when set to true, MediaTailor passes through EXT-X-CUE-IN, EXT-X-CUE-OUT, and EXT-X-SPLICEPOINT-SCTE35 ad markers from the origin manifest to the MediaTailor personalized manifest.

No logic is applied to these ad markers. For example, if EXT-X-CUE-OUT has a value of 60, but no ads are filled for that ad break, MediaTailor will not set the value to 0.

", + "members": { + "Enabled": { + "documentation": "

Enables ad marker passthrough for your configuration.

", + "shape": "__boolean" + } + }, + "type": "structure" + }, + "AvailSuppression": { + "documentation": "

The configuration for avail suppression, also known as ad suppression. For more information about ad suppression, see Ad Suppression.

", + "members": { + "Mode": { + "documentation": "

Sets the ad suppression mode. By default, ad suppression is off and all ad breaks are filled with ads or slate. When Mode is set to BEHIND_LIVE_EDGE, ad suppression is active and MediaTailor won't fill ad breaks on or behind the ad suppression Value time in the manifest lookback window.

", + "shape": "Mode" + }, + "Value": { + "documentation": "

A live edge offset time in HH:MM:SS. MediaTailor won't fill ad breaks on or behind this time in the manifest lookback window. If Value is set to 00:00:00, it is in sync with the live edge, and MediaTailor won't fill any ad breaks on or behind the live edge. If you set a Value time, MediaTailor won't fill any ad breaks on or behind this time in the manifest lookback window. For example, if you set 00:45:00, then MediaTailor will fill ad breaks that occur within 45 minutes behind the live edge, but won't fill ad breaks on or behind 45 minutes behind the live edge.

", + "shape": "__string" + } + }, + "type": "structure" + }, + "BadRequestException": { + "documentation": "Invalid request parameters.", + "error": { + "httpStatusCode": 400 + }, + "exception": true, + "members": { + "Message": { + "shape": "__string" + } + }, + "type": "structure" + }, + "Bumper": { + "documentation": "

The configuration for bumpers. Bumpers are short audio or video clips that play at the start or before the end of an ad break. To learn more about bumpers, see Bumpers.

", + "members": { + "EndUrl": { + "documentation": "

The URL for the end bumper asset.

", + "shape": "__string" + }, + "StartUrl": { + "documentation": "

The URL for the start bumper asset.

", + "shape": "__string" + } + }, + "type": "structure" + }, + "CdnConfiguration": { + "documentation": "

The configuration for using a content delivery network (CDN), like Amazon CloudFront, for content and ad segment management.

", + "members": { + "AdSegmentUrlPrefix": { + "documentation": "

A non-default content delivery network (CDN) to serve ad segments. By default, AWS Elemental MediaTailor uses Amazon CloudFront with default cache settings as its CDN for ad segments. To set up an alternate CDN, create a rule in your CDN for the origin ads.mediatailor.&lt;region>.amazonaws.com. Then specify the rule's name in this AdSegmentUrlPrefix. When AWS Elemental MediaTailor serves a manifest, it reports your CDN as the source for ad segments.

", + "shape": "__string" + }, + "ContentSegmentUrlPrefix": { + "documentation": "

A content delivery network (CDN) to cache content segments, so that content requests don\u2019t always have to go to the origin server. First, create a rule in your CDN for the content segment origin server. Then specify the rule's name in this ContentSegmentUrlPrefix. When AWS Elemental MediaTailor serves a manifest, it reports your CDN as the source for content segments.

", + "shape": "__string" + } + }, + "type": "structure" + }, + "Channel": { + "documentation": "

The configuration parameters for a channel.

", + "members": { + "Arn": { + "documentation": "

The ARN of the channel.

", + "shape": "__string" + }, + "ChannelName": { + "documentation": "

The name of the channel.

", + "shape": "__string" + }, + "ChannelState": { + "documentation": "

Returns the state whether the channel is running or not.

", + "shape": "__string" + }, + "CreationTime": { + "documentation": "

The timestamp of when the channel was created.

", + "shape": "__timestampUnix" + }, + "LastModifiedTime": { + "documentation": "

The timestamp of when the channel was last modified.

", + "shape": "__timestampUnix" + }, + "Outputs": { + "documentation": "

The channel's output properties.

", + "shape": "ResponseOutputs" + }, + "PlaybackMode": { + "documentation": "

The type of playback mode for this channel. Possible values: ONCE or LOOP.

", + "shape": "__string" + }, + "Tags": { + "documentation": "

The tags to assign to the channel.

", + "locationName": "tags", + "shape": "__mapOf__string" + } + }, + "required": [ + "ChannelState", + "ChannelName", + "Outputs", + "Arn", + "PlaybackMode" + ], + "type": "structure" + }, + "ChannelState": { + "enum": [ + "RUNNING", + "STOPPED" + ], + "type": "string" + }, + "ConfigurationAliasesRequest": { + "documentation": "

The predefined aliases for dynamic variables.

", + "key": { + "documentation": "

The dynamic variable that has aliases.

", + "shape": "__string" + }, + "type": "map", + "value": { + "documentation": "

Map of aliases to the value to be used at request time.

", + "shape": "__mapOf__string" + } + }, + "ConfigurationAliasesResponse": { + "documentation": "

The predefined aliases for dynamic variables.

", + "key": { + "documentation": "

The dynamic variable that has aliases.

", + "shape": "__string" + }, + "type": "map", + "value": { + "documentation": "

Map of aliases to the value to be used at request time.

", + "shape": "__mapOf__string" + } + }, + "CreateChannelRequest": { + "members": { + "ChannelName": { + "documentation": "

The identifier for the channel you are working on.

", + "location": "uri", + "locationName": "channelName", + "shape": "__string" + }, + "Outputs": { + "documentation": "

The channel's output properties.

", + "shape": "RequestOutputs" + }, + "PlaybackMode": { + "documentation": "

The type of playback mode for this channel. The only supported value is LOOP.

", + "shape": "PlaybackMode" + }, + "Tags": { + "documentation": "

The tags to assign to the channel.

", + "locationName": "tags", + "shape": "__mapOf__string" + } + }, + "required": [ + "ChannelName", + "Outputs", + "PlaybackMode" + ], + "type": "structure" + }, + "CreateChannelResponse": { + "members": { + "Arn": { + "documentation": "

The ARN of the channel.

", + "shape": "__string" + }, + "ChannelName": { + "documentation": "

The name of the channel.

", + "shape": "__string" + }, + "ChannelState": { + "documentation": "

Indicates whether the channel is in a running state or not.

", + "shape": "ChannelState" + }, + "CreationTime": { + "documentation": "

The timestamp of when the channel was created.

", + "shape": "__timestampUnix" + }, + "LastModifiedTime": { + "documentation": "

The timestamp of when the channel was last modified.

", + "shape": "__timestampUnix" + }, + "Outputs": { + "documentation": "

The channel's output properties.

", + "shape": "ResponseOutputs" + }, + "PlaybackMode": { + "documentation": "

The type of playback for this channel. The only supported value is LOOP.

", + "shape": "__string" + }, + "Tags": { + "documentation": "

The tags assigned to the channel.

", + "locationName": "tags", + "shape": "__mapOf__string" + } + }, + "type": "structure" + }, + "CreateProgramRequest": { + "members": { + "AdBreaks": { + "documentation": "

The ad break configuration settings.

", + "shape": "__listOfAdBreak" + }, + "ChannelName": { + "documentation": "

The identifier for the channel you are working on.

", + "location": "uri", + "locationName": "channelName", + "shape": "__string" + }, + "ProgramName": { + "documentation": "

The identifier for the program you are working on.

", + "location": "uri", + "locationName": "programName", + "shape": "__string" + }, + "ScheduleConfiguration": { + "documentation": "

The schedule configuration settings.

", + "shape": "ScheduleConfiguration" + }, + "SourceLocationName": { + "documentation": "

The name of the source location.

", + "shape": "__string" + }, + "VodSourceName": { + "documentation": "

The name that's used to refer to a VOD source.

", + "shape": "__string" + } + }, + "required": [ + "ChannelName", + "ProgramName", + "VodSourceName", + "ScheduleConfiguration", + "SourceLocationName" + ], + "type": "structure" + }, + "CreateProgramResponse": { + "members": { + "AdBreaks": { + "documentation": "

The ad break configuration settings.

", + "shape": "__listOfAdBreak" + }, + "Arn": { + "documentation": "

The ARN of the program.

", + "shape": "__string" + }, + "ChannelName": { + "documentation": "

The name of the channel that the program belongs to.

", + "shape": "__string" + }, + "CreationTime": { + "documentation": "

The timestamp of when the program was created.

", + "shape": "__timestampUnix" + }, + "ProgramName": { + "documentation": "

The name of the program.

", + "shape": "__string" + }, + "SourceLocationName": { + "documentation": "

The source location name.

", + "shape": "__string" + }, + "VodSourceName": { + "documentation": "

The name that's used to refer to a VOD source.

", + "shape": "__string" + } + }, + "type": "structure" + }, + "CreateSourceLocationRequest": { + "members": { + "AccessConfiguration": { + "documentation": "

Access configuration parameters. Configures the type of authentication used to access content from your source location.

", + "shape": "AccessConfiguration" + }, + "DefaultSegmentDeliveryConfiguration": { + "documentation": "

The optional configuration for the server that serves segments.

", + "shape": "DefaultSegmentDeliveryConfiguration" + }, + "HttpConfiguration": { + "documentation": "

The source's HTTP package configurations.

", + "shape": "HttpConfiguration" + }, + "SourceLocationName": { + "documentation": "

The identifier for the source location you are working on.

", + "location": "uri", + "locationName": "sourceLocationName", + "shape": "__string" + }, + "Tags": { + "documentation": "

The tags to assign to the source location.

", + "locationName": "tags", + "shape": "__mapOf__string" + } + }, + "required": [ + "SourceLocationName", + "HttpConfiguration" + ], + "type": "structure" + }, + "CreateSourceLocationResponse": { + "members": { + "AccessConfiguration": { + "documentation": "

The access configuration for the source location.

", + "shape": "AccessConfiguration" + }, + "Arn": { + "documentation": "

The ARN of the source location.

", + "shape": "__string" + }, + "CreationTime": { + "documentation": "

The timestamp that indicates when the source location was created.

", + "shape": "__timestampUnix" + }, + "DefaultSegmentDeliveryConfiguration": { + "documentation": "

The default segment delivery configuration settings.

", + "shape": "DefaultSegmentDeliveryConfiguration" + }, + "HttpConfiguration": { + "documentation": "

The HTTP package configuration settings for the source location.

", + "shape": "HttpConfiguration" + }, + "LastModifiedTime": { + "documentation": "

The timestamp that indicates when the source location was last modified.

", + "shape": "__timestampUnix" + }, + "SourceLocationName": { + "documentation": "

The name of the source location.

", + "shape": "__string" + }, + "Tags": { + "documentation": "

The tags assigned to the source location.

", + "locationName": "tags", + "shape": "__mapOf__string" + } + }, + "type": "structure" + }, + "CreateVodSourceRequest": { + "members": { + "HttpPackageConfigurations": { + "documentation": "

An array of HTTP package configuration parameters for this VOD source.

", + "shape": "HttpPackageConfigurations" + }, + "SourceLocationName": { + "documentation": "

The identifier for the source location you are working on.

", + "location": "uri", + "locationName": "sourceLocationName", + "shape": "__string" + }, + "Tags": { + "documentation": "

The tags to assign to the VOD source.

", + "locationName": "tags", + "shape": "__mapOf__string" + }, + "VodSourceName": { + "documentation": "

The identifier for the VOD source you are working on.

", + "location": "uri", + "locationName": "vodSourceName", + "shape": "__string" + } + }, + "required": [ + "SourceLocationName", + "VodSourceName", + "HttpPackageConfigurations" + ], + "type": "structure" + }, + "CreateVodSourceResponse": { + "members": { + "Arn": { + "documentation": "

The ARN of the VOD source.

", + "shape": "__string" + }, + "CreationTime": { + "documentation": "

The timestamp that indicates when the VOD source was created.

", + "shape": "__timestampUnix" + }, + "HttpPackageConfigurations": { + "documentation": "

The HTTP package configurations.

", + "shape": "HttpPackageConfigurations" + }, + "LastModifiedTime": { + "documentation": "

The ARN for the VOD source.

", + "shape": "__timestampUnix" + }, + "SourceLocationName": { + "documentation": "

The name of the source location associated with the VOD source.

", + "shape": "__string" + }, + "Tags": { + "documentation": "

The tags assigned to the VOD source.

", + "locationName": "tags", + "shape": "__mapOf__string" + }, + "VodSourceName": { + "documentation": "

The name of the VOD source.

", + "shape": "__string" + } + }, + "type": "structure" + }, + "DashConfiguration": { + "documentation": "

The configuration for DASH content.

", + "members": { + "ManifestEndpointPrefix": { + "documentation": "

The URL generated by MediaTailor to initiate a playback session. The session uses server-side reporting. This setting is ignored in PUT operations.

", + "shape": "__string" + }, + "MpdLocation": { + "documentation": "

The setting that controls whether MediaTailor includes the Location tag in DASH manifests. MediaTailor populates the Location tag with the URL for manifest update requests, to be used by players that don't support sticky redirects. Disable this if you have CDN routing rules set up for accessing MediaTailor manifests, and you are either using client-side reporting or your players support sticky HTTP redirects. Valid values are DISABLED and EMT_DEFAULT. The EMT_DEFAULT setting enables the inclusion of the tag and is the default value.

", + "shape": "__string" }, "OriginManifestType": { - "enum": [ - "SINGLE_PERIOD", - "MULTI_PERIOD" - ], - "type": "string" - }, - "ManifestProcessingRules" : { - "type" : "structure", - "documentation" : "

The configuration for manifest processing rules. Manifest processing rules enable customization of the personalized manifests created by MediaTailor.

", - "members" : { - "AdMarkerPassthrough" : { - "shape" : "AdMarkerPassthrough" - } - } - }, - "Mode": { - "enum": [ - "OFF", - "BEHIND_LIVE_EDGE" - ], - "type": "string" - }, - "PlaybackConfiguration": { - "documentation": "

The AWSMediaTailor configuration.

", - "members": { - "AdDecisionServerUrl": { - "documentation": "

The URL for the ad decision server (ADS). This includes the specification of static parameters and placeholders for dynamic parameters. AWS Elemental MediaTailor substitutes player-specific and session-specific parameters as needed when calling the ADS. Alternately, for testing, you can provide a static VAST URL. The maximum length is 25,000 characters.

", - "shape": "__string" - }, - "AvailSuppression":{ - "documentation": "

The configuration for Avail Suppression. Ad suppression can be used to turn off ad personalization in a long manifest, or if a viewer joins mid-break.

", - "shape": "AvailSuppression" - }, - "Bumper": { - "shape": "Bumper", - "documentation": "

The configuration for bumpers. Bumpers are short audio or video clips that play at the start or before the end of an ad break.

" - }, - "CdnConfiguration": { - "documentation": "

The configuration for using a content delivery network (CDN), like Amazon CloudFront, for content and ad segment management.

", - "shape": "CdnConfiguration" - }, - "DashConfiguration": { - "documentation": "

The configuration for DASH content.

", - "shape": "DashConfiguration" - }, - "HlsConfiguration": { - "documentation": "

The configuration for HLS content.

", - "shape": "HlsConfiguration" - }, - "ManifestProcessingRules": { - "shape" : "ManifestProcessingRules", - "documentation" : "

The configuration for manifest processing rules. Manifest processing rules enable customization of the personalized manifests created by MediaTailor.

" - }, - "Name": { - "documentation": "

The identifier for the playback configuration.

", - "shape": "__string" - }, - "PlaybackConfigurationArn": { - "documentation": "

The Amazon Resource Name (ARN) for the playback configuration.

", - "shape": "__string" - }, - "PlaybackEndpointPrefix": { - "documentation": "

The URL that the player accesses to get a manifest from AWS Elemental MediaTailor. This session will use server-side reporting.

", - "shape": "__string" - }, - "SessionInitializationEndpointPrefix": { - "documentation": "

The URL that the player uses to initialize a session that uses client-side reporting.

", - "shape": "__string" - }, - "SlateAdUrl": { - "documentation": "

The URL for a high-quality video asset to transcode and use to fill in time that's not used by ads. AWS Elemental MediaTailor shows the slate to fill in gaps in media content. Configuring the slate is optional for non-VPAID playback configurations. For VPAID, the slate is required because MediaTailor provides it in the slots designated for dynamic ad content. The slate must be a high-quality asset that contains both audio and video.

", - "shape": "__string" - }, - "Tags": { - "documentation": "

The tags assigned to the playback configuration.

", - "locationName": "tags", - "shape": "__mapOf__string" - }, - "TranscodeProfileName": { - "documentation": "

The name that is used to associate this playback configuration with a custom transcode profile. This overrides the dynamic transcoding defaults of MediaTailor. Use this only if you have already set up custom profiles with the help of AWS Support.

", - "shape": "__string" - }, - "PersonalizationThresholdSeconds" : { - "documentation": "

The maximum duration of underfilled ad time (in seconds) allowed in an ad break.

", - "shape" : "__integerMin1" - }, - "VideoContentSourceUrl": { - "documentation": "

The URL prefix for the master playlist for the stream, minus the asset ID. The maximum length is 512 characters.

", - "shape": "__string" - } - }, - "type": "structure" - }, - "LivePreRollConfiguration" : { - "type" : "structure", - "members" : { - "AdDecisionServerUrl" : { - "shape" : "__string", - "documentation" : "

The URL for the ad decision server (ADS) for pre-roll ads. This includes the specification of static parameters and placeholders for dynamic parameters. AWS Elemental MediaTailor substitutes player-specific and session-specific parameters as needed when calling the ADS. Alternately, for testing, you can provide a static VAST URL. The maximum length is 25,000 characters.

" - }, - "MaxDurationSeconds" : { - "shape" : "__integer", - "documentation" : "The maximum allowed duration for the pre-roll ad avail. AWS Elemental MediaTailor won't play pre-roll ads to exceed this duration, regardless of the total duration of ads that the ADS returns." - } - }, - "documentation" : "

The configuration for pre-roll ad insertion.

" - }, - "PutPlaybackConfigurationRequest": { - "members": { - "AdDecisionServerUrl": { - "documentation": "

The URL for the ad decision server (ADS). This includes the specification of static parameters and placeholders for dynamic parameters. AWS Elemental MediaTailor substitutes player-specific and session-specific parameters as needed when calling the ADS. Alternately, for testing you can provide a static VAST URL. The maximum length is 25,000 characters.

", - "shape": "__string" - }, - "AvailSuppression" : { - "shape" : "AvailSuppression", - "documentation": "

The configuration for Avail Suppression. Ad suppression can be used to turn off ad personalization in a long manifest, or if a viewer joins mid-break.

" - }, - "Bumper": { - "shape": "Bumper", - "documentation": "

The configuration for bumpers. Bumpers are short audio or video clips that play at the start or before the end of an ad break.

" - }, - "CdnConfiguration": { - "documentation": "

The configuration for using a content delivery network (CDN), like Amazon CloudFront, for content and ad segment management.

", - "shape": "CdnConfiguration" - }, - "DashConfiguration": { - "documentation": "

The configuration for DASH content.

", - "shape": "DashConfigurationForPut" - }, - "LivePreRollConfiguration" : { - "shape" : "LivePreRollConfiguration", - "documentation" : "

The configuration for pre-roll ad insertion.

" - }, - "ManifestProcessingRules": { - "shape" : "ManifestProcessingRules", - "documentation": "

The configuration for manifest processing rules. Manifest processing rules enable customization of the personalized manifests created by MediaTailor.

" - }, - "Name": { - "documentation": "

The identifier for the playback configuration.

", - "shape": "__string" - }, - "PersonalizationThresholdSeconds" : { - "documentation": "

The maximum duration of underfilled ad time (in seconds) allowed in an ad break.

", - "shape" : "__integerMin1" - }, - "SlateAdUrl": { - "documentation": "

The URL for a high-quality video asset to transcode and use to fill in time that's not used by ads. AWS Elemental MediaTailor shows the slate to fill in gaps in media content. Configuring the slate is optional for non-VPAID configurations. For VPAID, the slate is required because MediaTailor provides it in the slots that are designated for dynamic ad content. The slate must be a high-quality asset that contains both audio and video.

", - "shape": "__string" - }, - "Tags": { - "documentation": "

The tags to assign to the playback configuration.

", - "locationName": "tags", - "shape": "__mapOf__string" - }, - "TranscodeProfileName": { - "documentation": "

The name that is used to associate this playback configuration with a custom transcode profile. This overrides the dynamic transcoding defaults of MediaTailor. Use this only if you have already set up custom profiles with the help of AWS Support.

", - "shape": "__string" - }, - "VideoContentSourceUrl": { - "documentation": "

The URL prefix for the master playlist for the stream, minus the asset ID. The maximum length is 512 characters.

", - "shape": "__string" - } - }, - "type": "structure" - }, - "PutPlaybackConfigurationResponse": { - "members": { - "AdDecisionServerUrl": { - "documentation": "

The URL for the ad decision server (ADS). This includes the specification of static parameters and placeholders for dynamic parameters. AWS Elemental MediaTailor substitutes player-specific and session-specific parameters as needed when calling the ADS. Alternately, for testing, you can provide a static VAST URL. The maximum length is 25,000 characters.

", - "shape": "__string" - }, - "AvailSuppression" : { - "shape" : "AvailSuppression", - "documentation": "

The configuration for Avail Suppression. Ad suppression can be used to turn off ad personalization in a long manifest, or if a viewer joins mid-break.

" - }, - "Bumper": { - "shape": "Bumper", - "documentation": "

The configuration for bumpers. Bumpers are short audio or video clips that play at the start or before the end of an ad break.

" - }, - "CdnConfiguration": { - "documentation": "

The configuration for using a content delivery network (CDN), like Amazon CloudFront, for content and ad segment management.

", - "shape": "CdnConfiguration" - }, - "DashConfiguration": { - "documentation": "

The configuration for DASH content.

", - "shape": "DashConfiguration" - }, - "HlsConfiguration": { - "documentation": "

The configuration for HLS content.

", - "shape": "HlsConfiguration" - }, - "LivePreRollConfiguration" : { - "shape" : "LivePreRollConfiguration", - "documentation" : "

The configuration for pre-roll ad insertion.

" - }, - "ManifestProcessingRules": { - "shape" : "ManifestProcessingRules", - "documentation": "The configuration for manifest processing rules. Manifest processing rules enable customization of the personalized manifests created by MediaTailor." - }, - "Name": { - "documentation": "

The identifier for the playback configuration.

", - "shape": "__string" - }, - "PersonalizationThresholdSeconds" : { - "documentation": "

The maximum duration of underfilled ad time (in seconds) allowed in an ad break.

", - "shape" : "__integerMin1" - }, - "PlaybackConfigurationArn": { - "documentation": "

The Amazon Resource Name (ARN) for the playback configuration.

", - "shape": "__string" - }, - "PlaybackEndpointPrefix": { - "documentation": "

The URL that the player accesses to get a manifest from AWS Elemental MediaTailor. This session will use server-side reporting.

", - "shape": "__string" - }, - "SessionInitializationEndpointPrefix": { - "documentation": "

The URL that the player uses to initialize a session that uses client-side reporting.

", - "shape": "__string" - }, - "SlateAdUrl": { - "documentation": "

The URL for a high-quality video asset to transcode and use to fill in time that's not used by ads. AWS Elemental MediaTailor shows the slate to fill in gaps in media content. Configuring the slate is optional for non-VPAID playback configurations. For VPAID, the slate is required because MediaTailor provides it in the slots designated for dynamic ad content. The slate must be a high-quality asset that contains both audio and video.

", - "shape": "__string" - }, - "Tags": { - "documentation": "

The tags assigned to the playback configuration.

", - "locationName": "tags", - "shape": "__mapOf__string" - }, - "TranscodeProfileName": { - "documentation": "

The name that is used to associate this playback configuration with a custom transcode profile. This overrides the dynamic transcoding defaults of MediaTailor. Use this only if you have already set up custom profiles with the help of AWS Support.

", - "shape": "__string" - }, - "VideoContentSourceUrl": { - "documentation": "

The URL prefix for the master playlist for the stream, minus the asset ID. The maximum length is 512 characters.

", - "shape": "__string" - } - }, - "type": "structure" - }, - "TagResourceRequest": { - "members": { - "ResourceArn": { - "documentation": "

The Amazon Resource Name (ARN) for the playback configuration. You can get this from the response to any playback configuration request.

", - "location": "uri", - "locationName": "ResourceArn", - "shape": "__string" - }, - "Tags": { - "documentation": "

A comma-separated list of tag key:value pairs. For example: \n {\n \"Key1\": \"Value1\",\n \"Key2\": \"Value2\"\n }\n

", - "locationName": "tags", - "shape": "__mapOf__string" - } - }, - "required": [ - "ResourceArn", - "Tags" - ], - "type": "structure" - }, - "TagsModel": { - "documentation": "

A set of tags assigned to a resource.

", - "members": { - "Tags": { - "documentation": "

A comma-separated list of tag key:value pairs. For example: \n {\n \"Key1\": \"Value1\",\n \"Key2\": \"Value2\"\n }\n

", - "locationName": "tags", - "shape": "__mapOf__string" - } - }, - "required": [ - "Tags" - ], - "type": "structure" - }, - "UntagResourceRequest": { - "members": { - "ResourceArn": { - "documentation": "

The Amazon Resource Name (ARN) for the playback configuration. You can get this from the response to any playback configuration request.

", - "location": "uri", - "locationName": "ResourceArn", - "shape": "__string" - }, - "TagKeys": { - "documentation": "

A comma-separated list of the tag keys to remove from the playback configuration.

", - "location": "querystring", - "locationName": "tagKeys", - "shape": "__listOf__string" - } - }, - "required": [ - "ResourceArn", - "TagKeys" - ], - "type": "structure" - }, - "__boolean": { - "type": "boolean" - }, - "__double": { - "type": "double" - }, - "__integer": { - "type": "integer" - }, - "__integerMin1": { - "type": "integer", - "min": 1 - }, - "__integerMin1Max100": { - "max": 100, - "min": 1, - "type": "integer" - }, - "__listOfPlaybackConfigurations": { - "member": { - "shape": "PlaybackConfiguration" - }, - "type": "list" - }, - "__listOf__string": { - "member": { - "shape": "__string" - }, - "type": "list" - }, - "__long": { - "type": "long" - }, - "__mapOf__string": { - "key": { - "shape": "__string" - }, - "type": "map", - "value": { - "shape": "__string" - } - }, - "__string": { - "type": "string" - }, - "__timestampIso8601": { - "timestampFormat": "iso8601", - "type": "timestamp" - }, - "__timestampUnix": { - "timestampFormat": "unixTimestamp", - "type": "timestamp" + "documentation": "

The setting that controls whether MediaTailor handles manifests from the origin server as multi-period manifests or single-period manifests. If your origin server produces single-period manifests, set this to SINGLE_PERIOD. The default setting is MULTI_PERIOD. For multi-period manifests, omit this setting or set it to MULTI_PERIOD.

", + "shape": "OriginManifestType" } + }, + "type": "structure" + }, + "DashConfigurationForPut": { + "documentation": "

The configuration for DASH PUT operations.

", + "members": { + "MpdLocation": { + "documentation": "

The setting that controls whether MediaTailor includes the Location tag in DASH manifests. MediaTailor populates the Location tag with the URL for manifest update requests, to be used by players that don't support sticky redirects. Disable this if you have CDN routing rules set up for accessing MediaTailor manifests, and you are either using client-side reporting or your players support sticky HTTP redirects. Valid values are DISABLED and EMT_DEFAULT. The EMT_DEFAULT setting enables the inclusion of the tag and is the default value.

", + "shape": "__string" + }, + "OriginManifestType": { + "documentation": "

The setting that controls whether MediaTailor handles manifests from the origin server as multi-period manifests or single-period manifests. If your origin server produces single-period manifests, set this to SINGLE_PERIOD. The default setting is MULTI_PERIOD. For multi-period manifests, omit this setting or set it to MULTI_PERIOD.

", + "shape": "OriginManifestType" + } + }, + "type": "structure" + }, + "DashPlaylistSettings": { + "documentation": "

Dash manifest configuration parameters.

", + "members": { + "ManifestWindowSeconds": { + "documentation": "

The total duration (in seconds) of each manifest. Minimum value: 30 seconds. Maximum value: 3600 seconds.

", + "shape": "__integer" + }, + "MinBufferTimeSeconds": { + "documentation": "

Minimum amount of content (measured in seconds) that a player must keep available in the buffer. Minimum value: 2 seconds. Maximum value: 60 seconds.

", + "shape": "__integer" + }, + "MinUpdatePeriodSeconds": { + "documentation": "

Minimum amount of time (in seconds) that the player should wait before requesting updates to the manifest. Minimum value: 2 seconds. Maximum value: 60 seconds.

", + "shape": "__integer" + }, + "SuggestedPresentationDelaySeconds": { + "documentation": "

Amount of time (in seconds) that the player should be from the live point at the end of the manifest. Minimum value: 2 seconds. Maximum value: 60 seconds.

", + "shape": "__integer" + } + }, + "type": "structure" + }, + "DefaultSegmentDeliveryConfiguration": { + "documentation": "

The optional configuration for a server that serves segments. Use this if you want the segment delivery server to be different from the source location server. For example, you can configure your source location server to be an origination server, such as MediaPackage, and the segment delivery server to be a content delivery network (CDN), such as CloudFront. If you don't specify a segment delivery server, then the source location server is used.

", + "members": { + "BaseUrl": { + "documentation": "

The hostname of the server that will be used to serve segments. This string must include the protocol, such as https://.

", + "shape": "__string" + } + }, + "type": "structure" + }, + "DeleteChannelPolicyRequest": { + "members": { + "ChannelName": { + "documentation": "

The identifier for the channel you are working on.

", + "location": "uri", + "locationName": "channelName", + "shape": "__string" + } + }, + "required": [ + "ChannelName" + ], + "type": "structure" + }, + "DeleteChannelPolicyResponse": { + "members": {}, + "type": "structure" + }, + "DeleteChannelRequest": { + "members": { + "ChannelName": { + "documentation": "

The identifier for the channel you are working on.

", + "location": "uri", + "locationName": "channelName", + "shape": "__string" + } + }, + "required": [ + "ChannelName" + ], + "type": "structure" + }, + "DeleteChannelResponse": { + "members": {}, + "type": "structure" + }, + "DeletePlaybackConfigurationRequest": { + "members": { + "Name": { + "documentation": "

The identifier for the playback configuration.

", + "location": "uri", + "locationName": "Name", + "shape": "__string" + } + }, + "required": [ + "Name" + ], + "type": "structure" + }, + "DeletePlaybackConfigurationResponse": { + "members": {}, + "type": "structure" + }, + "DeleteProgramRequest": { + "members": { + "ChannelName": { + "documentation": "

The identifier for the channel you are working on.

", + "location": "uri", + "locationName": "channelName", + "shape": "__string" + }, + "ProgramName": { + "documentation": "

The identifier for the program you are working on.

", + "location": "uri", + "locationName": "programName", + "shape": "__string" + } + }, + "required": [ + "ChannelName", + "ProgramName" + ], + "type": "structure" + }, + "DeleteProgramResponse": { + "members": {}, + "type": "structure" + }, + "DeleteSourceLocationRequest": { + "members": { + "SourceLocationName": { + "documentation": "

The identifier for the source location you are working on.

", + "location": "uri", + "locationName": "sourceLocationName", + "shape": "__string" + } + }, + "required": [ + "SourceLocationName" + ], + "type": "structure" + }, + "DeleteSourceLocationResponse": { + "members": {}, + "type": "structure" + }, + "DeleteVodSourceRequest": { + "members": { + "SourceLocationName": { + "documentation": "

The identifier for the source location you are working on.

", + "location": "uri", + "locationName": "sourceLocationName", + "shape": "__string" + }, + "VodSourceName": { + "documentation": "

The identifier for the VOD source you are working on.

", + "location": "uri", + "locationName": "vodSourceName", + "shape": "__string" + } + }, + "required": [ + "SourceLocationName", + "VodSourceName" + ], + "type": "structure" + }, + "DeleteVodSourceResponse": { + "members": {}, + "type": "structure" + }, + "DescribeChannelRequest": { + "members": { + "ChannelName": { + "documentation": "

The identifier for the channel you are working on.

", + "location": "uri", + "locationName": "channelName", + "shape": "__string" + } + }, + "required": [ + "ChannelName" + ], + "type": "structure" + }, + "DescribeChannelResponse": { + "members": { + "Arn": { + "documentation": "

The ARN of the channel.

", + "shape": "__string" + }, + "ChannelName": { + "documentation": "

The name of the channel.

", + "shape": "__string" + }, + "ChannelState": { + "documentation": "

Indicates whether the channel is in a running state or not.

", + "shape": "ChannelState" + }, + "CreationTime": { + "documentation": "

The timestamp of when the channel was created.

", + "shape": "__timestampUnix" + }, + "LastModifiedTime": { + "documentation": "

The timestamp of when the channel was last modified.

", + "shape": "__timestampUnix" + }, + "Outputs": { + "documentation": "

The channel's output properties.

", + "shape": "ResponseOutputs" + }, + "PlaybackMode": { + "documentation": "

The type of playback for this channel. The only supported value is LOOP.

", + "shape": "__string" + }, + "Tags": { + "documentation": "

The tags assigned to the channel.

", + "locationName": "tags", + "shape": "__mapOf__string" + } + }, + "type": "structure" + }, + "DescribeProgramRequest": { + "members": { + "ChannelName": { + "documentation": "

The identifier for the channel you are working on.

", + "location": "uri", + "locationName": "channelName", + "shape": "__string" + }, + "ProgramName": { + "documentation": "

The identifier for the program you are working on.

", + "location": "uri", + "locationName": "programName", + "shape": "__string" + } + }, + "required": [ + "ChannelName", + "ProgramName" + ], + "type": "structure" + }, + "DescribeProgramResponse": { + "members": { + "AdBreaks": { + "documentation": "

The ad break configuration settings.

", + "shape": "__listOfAdBreak" + }, + "Arn": { + "documentation": "

The ARN of the program.

", + "shape": "__string" + }, + "ChannelName": { + "documentation": "

The name of the channel that the program belongs to.

", + "shape": "__string" + }, + "CreationTime": { + "documentation": "

The timestamp of when the program was created.

", + "shape": "__timestampUnix" + }, + "ProgramName": { + "documentation": "

The name of the program.

", + "shape": "__string" + }, + "SourceLocationName": { + "documentation": "

The source location name.

", + "shape": "__string" + }, + "VodSourceName": { + "documentation": "

The name that's used to refer to a VOD source.

", + "shape": "__string" + } + }, + "type": "structure" + }, + "DescribeSourceLocationRequest": { + "members": { + "SourceLocationName": { + "documentation": "

The identifier for the source location you are working on.

", + "location": "uri", + "locationName": "sourceLocationName", + "shape": "__string" + } + }, + "required": [ + "SourceLocationName" + ], + "type": "structure" + }, + "DescribeSourceLocationResponse": { + "members": { + "AccessConfiguration": { + "documentation": "

The access configuration for the source location.

", + "shape": "AccessConfiguration" + }, + "Arn": { + "documentation": "

The ARN of the source location.

", + "shape": "__string" + }, + "CreationTime": { + "documentation": "

The timestamp that indicates when the source location was created.

", + "shape": "__timestampUnix" + }, + "DefaultSegmentDeliveryConfiguration": { + "documentation": "

The default segment delivery configuration settings.

", + "shape": "DefaultSegmentDeliveryConfiguration" + }, + "HttpConfiguration": { + "documentation": "

The HTTP package configuration settings for the source location.

", + "shape": "HttpConfiguration" + }, + "LastModifiedTime": { + "documentation": "

The timestamp that indicates when the source location was last modified.

", + "shape": "__timestampUnix" + }, + "SourceLocationName": { + "documentation": "

The name of the source location.

", + "shape": "__string" + }, + "Tags": { + "documentation": "

The tags assigned to the source location.

", + "locationName": "tags", + "shape": "__mapOf__string" + } + }, + "type": "structure" + }, + "DescribeVodSourceRequest": { + "members": { + "SourceLocationName": { + "documentation": "

The identifier for the source location you are working on.

", + "location": "uri", + "locationName": "sourceLocationName", + "shape": "__string" + }, + "VodSourceName": { + "documentation": "

The identifier for the VOD source you are working on.

", + "location": "uri", + "locationName": "vodSourceName", + "shape": "__string" + } + }, + "required": [ + "SourceLocationName", + "VodSourceName" + ], + "type": "structure" + }, + "DescribeVodSourceResponse": { + "members": { + "Arn": { + "documentation": "

The ARN of the VOD source.

", + "shape": "__string" + }, + "CreationTime": { + "documentation": "

The timestamp that indicates when the VOD source was created.

", + "shape": "__timestampUnix" + }, + "HttpPackageConfigurations": { + "documentation": "

The HTTP package configurations.

", + "shape": "HttpPackageConfigurations" + }, + "LastModifiedTime": { + "documentation": "

The ARN for the VOD source.

", + "shape": "__timestampUnix" + }, + "SourceLocationName": { + "documentation": "

The name of the source location associated with the VOD source.

", + "shape": "__string" + }, + "Tags": { + "documentation": "

The tags assigned to the VOD source.

", + "locationName": "tags", + "shape": "__mapOf__string" + }, + "VodSourceName": { + "documentation": "

The name of the VOD source.

", + "shape": "__string" + } + }, + "type": "structure" + }, + "GetChannelPolicyRequest": { + "members": { + "ChannelName": { + "documentation": "

The identifier for the channel you are working on.

", + "location": "uri", + "locationName": "channelName", + "shape": "__string" + } + }, + "required": [ + "ChannelName" + ], + "type": "structure" + }, + "GetChannelPolicyResponse": { + "members": { + "Policy": { + "documentation": "

The IAM policy for the channel.

", + "shape": "__string" + } + }, + "type": "structure" + }, + "GetChannelScheduleRequest": { + "members": { + "ChannelName": { + "documentation": "

The identifier for the channel you are working on.

", + "location": "uri", + "locationName": "channelName", + "shape": "__string" + }, + "DurationMinutes": { + "documentation": "

The schedule duration in minutes. The maximum duration is 4320 minutes (three days).

", + "location": "querystring", + "locationName": "durationMinutes", + "shape": "__string" + }, + "MaxResults": { + "documentation": "

Upper bound on number of records to return. The maximum number of results is 100.

", + "location": "querystring", + "locationName": "maxResults", + "shape": "MaxResults" + }, + "NextToken": { + "documentation": "

Pagination token from the GET list request. Use the token to fetch the next page of results.

", + "location": "querystring", + "locationName": "nextToken", + "shape": "__string" + } + }, + "required": [ + "ChannelName" + ], + "type": "structure" + }, + "GetChannelScheduleResponse": { + "members": { + "Items": { + "documentation": "

An array of schedule entries for the channel.

", + "shape": "__listOfScheduleEntry" + }, + "NextToken": { + "documentation": "

Pagination token from the GET list request. Use the token to fetch the next page of results.

", + "shape": "__string" + } + }, + "type": "structure" + }, + "GetPlaybackConfigurationRequest": { + "members": { + "Name": { + "documentation": "

The identifier for the playback configuration.

", + "location": "uri", + "locationName": "Name", + "shape": "__string" + } + }, + "required": [ + "Name" + ], + "type": "structure" + }, + "GetPlaybackConfigurationResponse": { + "members": { + "AdDecisionServerUrl": { + "documentation": "

The URL for the ad decision server (ADS). This includes the specification of static parameters and placeholders for dynamic parameters. AWS Elemental MediaTailor substitutes player-specific and session-specific parameters as needed when calling the ADS. Alternately, for testing, you can provide a static VAST URL. The maximum length is 25,000 characters.

", + "shape": "__string" + }, + "AvailSuppression": { + "documentation": "

The configuration for avail suppression, also known as ad suppression. For more information about ad suppression, see Ad Suppression.

", + "shape": "AvailSuppression" + }, + "Bumper": { + "documentation": "

The configuration for bumpers. Bumpers are short audio or video clips that play at the start or before the end of an ad break. To learn more about bumpers, see Bumpers.

", + "shape": "Bumper" + }, + "CdnConfiguration": { + "documentation": "

The configuration for using a content delivery network (CDN), like Amazon CloudFront, for content and ad segment management.

", + "shape": "CdnConfiguration" + }, + "ConfigurationAliases": { + "documentation": "

The player parameters and aliases used as dynamic variables during session initialization. For more information, see Domain Variables.

", + "shape": "ConfigurationAliasesResponse" + }, + "DashConfiguration": { + "documentation": "

The configuration for DASH content.

", + "shape": "DashConfiguration" + }, + "HlsConfiguration": { + "documentation": "

The configuration for HLS content.

", + "shape": "HlsConfiguration" + }, + "LivePreRollConfiguration": { + "documentation": "

The configuration for pre-roll ad insertion.

", + "shape": "LivePreRollConfiguration" + }, + "ManifestProcessingRules": { + "documentation": "

The configuration for manifest processing rules. Manifest processing rules enable customization of the personalized manifests created by MediaTailor.

", + "shape": "ManifestProcessingRules" + }, + "Name": { + "documentation": "

The identifier for the playback configuration.

", + "shape": "__string" + }, + "PersonalizationThresholdSeconds": { + "documentation": "

Defines the maximum duration of underfilled ad time (in seconds) allowed in an ad break. If the duration of underfilled ad time exceeds the personalization threshold, then the personalization of the ad break is abandoned and the underlying content is shown. This feature applies to ad replacement in live and VOD streams, rather than ad insertion, because it relies on an underlying content stream. For more information about ad break behavior, including ad replacement and insertion, see Ad Behavior in AWS Elemental MediaTailor.

", + "shape": "__integerMin1" + }, + "PlaybackConfigurationArn": { + "documentation": "

The Amazon Resource Name (ARN) for the playback configuration.

", + "shape": "__string" + }, + "PlaybackEndpointPrefix": { + "documentation": "

The URL that the player accesses to get a manifest from AWS Elemental MediaTailor. This session will use server-side reporting.

", + "shape": "__string" + }, + "SessionInitializationEndpointPrefix": { + "documentation": "

The URL that the player uses to initialize a session that uses client-side reporting.

", + "shape": "__string" + }, + "SlateAdUrl": { + "documentation": "

The URL for a high-quality video asset to transcode and use to fill in time that's not used by ads. AWS Elemental MediaTailor shows the slate to fill in gaps in media content. Configuring the slate is optional for non-VPAID playback configurations. For VPAID, the slate is required because MediaTailor provides it in the slots designated for dynamic ad content. The slate must be a high-quality asset that contains both audio and video.

", + "shape": "__string" + }, + "Tags": { + "documentation": "

The tags assigned to the playback configuration.

", + "locationName": "tags", + "shape": "__mapOf__string" + }, + "TranscodeProfileName": { + "documentation": "

The name that is used to associate this playback configuration with a custom transcode profile. This overrides the dynamic transcoding defaults of MediaTailor. Use this only if you have already set up custom profiles with the help of AWS Support.

", + "shape": "__string" + }, + "VideoContentSourceUrl": { + "documentation": "

The URL prefix for the parent manifest for the stream, minus the asset ID. The maximum length is 512 characters.

", + "shape": "__string" + } + }, + "type": "structure" + }, + "HlsConfiguration": { + "documentation": "

The configuration for HLS content.

", + "members": { + "ManifestEndpointPrefix": { + "documentation": "

The URL that is used to initiate a playback session for devices that support Apple HLS. The session uses server-side reporting.

", + "shape": "__string" + } + }, + "type": "structure" + }, + "HlsPlaylistSettings": { + "documentation": "

HLS playlist configuration parameters.

", + "members": { + "ManifestWindowSeconds": { + "documentation": "

The total duration (in seconds) of each manifest. Minimum value: 30 seconds. Maximum value: 3600 seconds.

", + "shape": "__integer" + } + }, + "type": "structure" + }, + "HttpConfiguration": { + "documentation": "

The HTTP configuration for the source location.

", + "members": { + "BaseUrl": { + "documentation": "

The base URL for the source location host server. This string must include the protocol, such as https://.

", + "shape": "__string" + } + }, + "required": [ + "BaseUrl" + ], + "type": "structure" + }, + "HttpPackageConfiguration": { + "documentation": "

The HTTP package configuration properties for the requested VOD source.

", + "members": { + "Path": { + "documentation": "

The relative path to the URL for this VOD source. This is combined with SourceLocation::HttpConfiguration::BaseUrl to form a valid URL.

", + "shape": "__string" + }, + "SourceGroup": { + "documentation": "

The name of the source group. This has to match one of the Channel::Outputs::SourceGroup.

", + "shape": "__string" + }, + "Type": { + "documentation": "

The streaming protocol for this package configuration. Supported values are HLS and DASH.

", + "shape": "Type" + } + }, + "required": [ + "Path", + "Type", + "SourceGroup" + ], + "type": "structure" + }, + "HttpPackageConfigurations": { + "documentation": "

The VOD source's HTTP package configuration settings.

", + "member": { + "shape": "HttpPackageConfiguration" + }, + "type": "list" + }, + "ListChannelsRequest": { + "members": { + "MaxResults": { + "documentation": "

Upper bound on number of records to return. The maximum number of results is 100.

", + "location": "querystring", + "locationName": "maxResults", + "shape": "MaxResults" + }, + "NextToken": { + "documentation": "

Pagination token from the GET list request. Use the token to fetch the next page of results.

", + "location": "querystring", + "locationName": "nextToken", + "shape": "__string" + } + }, + "type": "structure" + }, + "ListChannelsResponse": { + "members": { + "Items": { + "documentation": "

An array of channels that are associated with this account.

", + "shape": "__listOfChannel" + }, + "NextToken": { + "documentation": "

Pagination token returned by the list request when results exceed the maximum allowed. Use the token to fetch the next page of results.

", + "shape": "__string" + } + }, + "type": "structure" + }, + "ListPlaybackConfigurationsRequest": { + "members": { + "MaxResults": { + "documentation": "

Maximum number of records to return.

", + "location": "querystring", + "locationName": "MaxResults", + "shape": "MaxResults" + }, + "NextToken": { + "documentation": "

Pagination token returned by the GET list request when results exceed the maximum allowed. Use the token to fetch the next page of results.

", + "location": "querystring", + "locationName": "NextToken", + "shape": "__string" + } + }, + "type": "structure" + }, + "ListPlaybackConfigurationsResponse": { + "members": { + "Items": { + "documentation": "

Array of playback configurations. This might be all the available configurations or a subset, depending on the settings that you provide and the total number of configurations stored.

", + "shape": "__listOfPlaybackConfiguration" + }, + "NextToken": { + "documentation": "

Pagination token returned by the GET list request when results exceed the maximum allowed. Use the token to fetch the next page of results.

", + "shape": "__string" + } + }, + "type": "structure" + }, + "ListSourceLocationsRequest": { + "members": { + "MaxResults": { + "documentation": "

Upper bound on number of records to return. The maximum number of results is 100.

", + "location": "querystring", + "locationName": "maxResults", + "shape": "MaxResults" + }, + "NextToken": { + "documentation": "

Pagination token from the GET list request. Use the token to fetch the next page of results.

", + "location": "querystring", + "locationName": "nextToken", + "shape": "__string" + } + }, + "type": "structure" + }, + "ListSourceLocationsResponse": { + "members": { + "Items": { + "documentation": "

An array of source locations.

", + "shape": "__listOfSourceLocation" + }, + "NextToken": { + "documentation": "

Pagination token from the list request. Use the token to fetch the next page of results.

", + "shape": "__string" + } + }, + "type": "structure" + }, + "ListTagsForResourceRequest": { + "members": { + "ResourceArn": { + "documentation": "

The Amazon Resource Name (ARN) for the playback configuration. You can get this from the response to any playback configuration request.

", + "location": "uri", + "locationName": "ResourceArn", + "shape": "__string" + } + }, + "required": [ + "ResourceArn" + ], + "type": "structure" + }, + "ListTagsForResourceResponse": { + "members": { + "Tags": { + "documentation": "

A comma-separated list of tag key:value pairs.

", + "locationName": "tags", + "shape": "__mapOf__string" + } + }, + "type": "structure" + }, + "ListVodSourcesRequest": { + "members": { + "MaxResults": { + "documentation": "

Upper bound on number of records to return. The maximum number of results is 100.

", + "location": "querystring", + "locationName": "maxResults", + "shape": "MaxResults" + }, + "NextToken": { + "documentation": "

Pagination token from the GET list request. Use the token to fetch the next page of results.

", + "location": "querystring", + "locationName": "nextToken", + "shape": "__string" + }, + "SourceLocationName": { + "documentation": "

The identifier for the source location you are working on.

", + "location": "uri", + "locationName": "sourceLocationName", + "shape": "__string" + } + }, + "required": [ + "SourceLocationName" + ], + "type": "structure" + }, + "ListVodSourcesResponse": { + "members": { + "Items": { + "documentation": "

Lists the VOD sources.

", + "shape": "__listOfVodSource" + }, + "NextToken": { + "documentation": "

Pagination token from the list request. Use the token to fetch the next page of results.

", + "shape": "__string" + } + }, + "type": "structure" + }, + "LivePreRollConfiguration": { + "documentation": "

The configuration for pre-roll ad insertion.

", + "members": { + "AdDecisionServerUrl": { + "documentation": "

The URL for the ad decision server (ADS) for pre-roll ads. This includes the specification of static parameters and placeholders for dynamic parameters. AWS Elemental MediaTailor substitutes player-specific and session-specific parameters as needed when calling the ADS. Alternately, for testing, you can provide a static VAST URL. The maximum length is 25,000 characters.

", + "shape": "__string" + }, + "MaxDurationSeconds": { + "documentation": "The maximum allowed duration for the pre-roll ad avail. AWS Elemental MediaTailor won't play pre-roll ads to exceed this duration, regardless of the total duration of ads that the ADS returns.", + "shape": "__integer" + } + }, + "type": "structure" + }, + "ManifestProcessingRules": { + "documentation": "

The configuration for manifest processing rules. Manifest processing rules enable customization of the personalized manifests created by MediaTailor.

", + "members": { + "AdMarkerPassthrough": { + "documentation": "

For HLS, when set to true, MediaTailor passes through EXT-X-CUE-IN, EXT-X-CUE-OUT, and EXT-X-SPLICEPOINT-SCTE35 ad markers from the origin manifest to the MediaTailor personalized manifest.

No logic is applied to these ad markers. For example, if EXT-X-CUE-OUT has a value of 60, but no ads are filled for that ad break, MediaTailor will not set the value to 0.

", + "shape": "AdMarkerPassthrough" + } + }, + "type": "structure" + }, + "MaxResults": { + "max": 100, + "min": 1, + "type": "integer" + }, + "MessageType": { + "enum": [ + "SPLICE_INSERT" + ], + "type": "string" + }, + "Mode": { + "enum": [ + "OFF", + "BEHIND_LIVE_EDGE" + ], + "type": "string" + }, + "OriginManifestType": { + "enum": [ + "SINGLE_PERIOD", + "MULTI_PERIOD" + ], + "type": "string" + }, + "PlaybackConfiguration": { + "documentation": "

Creates a playback configuration. For information about MediaTailor configurations, see Working with configurations in AWS Elemental MediaTailor.

", + "members": { + "AdDecisionServerUrl": { + "documentation": "

The URL for the ad decision server (ADS). This includes the specification of static parameters and placeholders for dynamic parameters. AWS Elemental MediaTailor substitutes player-specific and session-specific parameters as needed when calling the ADS. Alternately, for testing you can provide a static VAST URL. The maximum length is 25,000 characters.

", + "shape": "__string" + }, + "AvailSuppression": { + "documentation": "

The configuration for avail suppression, also known as ad suppression. For more information about ad suppression, see Ad Suppression.

", + "shape": "AvailSuppression" + }, + "Bumper": { + "documentation": "

The configuration for bumpers. Bumpers are short audio or video clips that play at the start or before the end of an ad break. To learn more about bumpers, see Bumpers.

", + "shape": "Bumper" + }, + "CdnConfiguration": { + "documentation": "

The configuration for using a content delivery network (CDN), like Amazon CloudFront, for content and ad segment management.

", + "shape": "CdnConfiguration" + }, + "ConfigurationAliases": { + "documentation": "

The player parameters and aliases used as dynamic variables during session initialization. For more information, see Domain Variables.

", + "shape": "ConfigurationAliasesResponse" + }, + "DashConfiguration": { + "documentation": "

The configuration for a DASH source.

", + "shape": "DashConfiguration" + }, + "HlsConfiguration": { + "documentation": "

The configuration for HLS content.

", + "shape": "HlsConfiguration" + }, + "LivePreRollConfiguration": { + "documentation": "

The configuration for pre-roll ad insertion.

", + "shape": "LivePreRollConfiguration" + }, + "ManifestProcessingRules": { + "documentation": "

The configuration for manifest processing rules. Manifest processing rules enable customization of the personalized manifests created by MediaTailor.

", + "shape": "ManifestProcessingRules" + }, + "Name": { + "documentation": "

The identifier for the playback configuration.

", + "shape": "__string" + }, + "PersonalizationThresholdSeconds": { + "documentation": "

Defines the maximum duration of underfilled ad time (in seconds) allowed in an ad break. If the duration of underfilled ad time exceeds the personalization threshold, then the personalization of the ad break is abandoned and the underlying content is shown. This feature applies to ad replacement in live and VOD streams, rather than ad insertion, because it relies on an underlying content stream. For more information about ad break behavior, including ad replacement and insertion, see Ad Behavior in AWS Elemental MediaTailor.

", + "shape": "__integerMin1" + }, + "PlaybackConfigurationArn": { + "documentation": "

The Amazon Resource Name (ARN) for the playback configuration.

", + "shape": "__string" + }, + "PlaybackEndpointPrefix": { + "documentation": "

The URL that the player accesses to get a manifest from AWS Elemental MediaTailor.

", + "shape": "__string" + }, + "SessionInitializationEndpointPrefix": { + "documentation": "

The URL that the player uses to initialize a session that uses client-side reporting.

", + "shape": "__string" + }, + "SlateAdUrl": { + "documentation": "

The URL for a video asset to transcode and use to fill in time that's not used by ads. AWS Elemental MediaTailor shows the slate to fill in gaps in media content. Configuring the slate is optional for non-VPAID playback configurations. For VPAID, the slate is required because MediaTailor provides it in the slots designated for dynamic ad content. The slate must be a high-quality asset that contains both audio and video.

", + "shape": "__string" + }, + "Tags": { + "documentation": "

The tags to assign to the playback configuration.

", + "locationName": "tags", + "shape": "__mapOf__string" + }, + "TranscodeProfileName": { + "documentation": "

The name that is used to associate this playback configuration with a custom transcode profile. This overrides the dynamic transcoding defaults of MediaTailor. Use this only if you have already set up custom profiles with the help of AWS Support.

", + "shape": "__string" + }, + "VideoContentSourceUrl": { + "documentation": "

The URL prefix for the parent manifest for the stream, minus the asset ID. The maximum length is 512 characters.

", + "shape": "__string" + } + }, + "type": "structure" + }, + "PlaybackMode": { + "enum": [ + "LOOP" + ], + "type": "string" + }, + "PutChannelPolicyRequest": { + "members": { + "ChannelName": { + "documentation": "

The identifier for the channel you are working on.

", + "location": "uri", + "locationName": "channelName", + "shape": "__string" + }, + "Policy": { + "documentation": "

Adds an IAM role that determines the permissions of your channel.

", + "shape": "__string" + } + }, + "required": [ + "ChannelName", + "Policy" + ], + "type": "structure" + }, + "PutChannelPolicyResponse": { + "members": {}, + "type": "structure" + }, + "PutPlaybackConfigurationRequest": { + "members": { + "AdDecisionServerUrl": { + "documentation": "

The URL for the ad decision server (ADS). This includes the specification of static parameters and placeholders for dynamic parameters. AWS Elemental MediaTailor substitutes player-specific and session-specific parameters as needed when calling the ADS. Alternately, for testing you can provide a static VAST URL. The maximum length is 25,000 characters.

", + "shape": "__string" + }, + "AvailSuppression": { + "documentation": "

The configuration for avail suppression, also known as ad suppression. For more information about ad suppression, see Ad Suppression.

", + "shape": "AvailSuppression" + }, + "Bumper": { + "documentation": "

The configuration for bumpers. Bumpers are short audio or video clips that play at the start or before the end of an ad break. To learn more about bumpers, see Bumpers.

", + "shape": "Bumper" + }, + "CdnConfiguration": { + "documentation": "

The configuration for using a content delivery network (CDN), like Amazon CloudFront, for content and ad segment management.

", + "shape": "CdnConfiguration" + }, + "ConfigurationAliases": { + "documentation": "

The player parameters and aliases used as dynamic variables during session initialization. For more information, see Domain Variables.

", + "shape": "ConfigurationAliasesRequest" + }, + "DashConfiguration": { + "documentation": "

The configuration for DASH content.

", + "shape": "DashConfigurationForPut" + }, + "LivePreRollConfiguration": { + "documentation": "

The configuration for pre-roll ad insertion.

", + "shape": "LivePreRollConfiguration" + }, + "ManifestProcessingRules": { + "documentation": "

The configuration for manifest processing rules. Manifest processing rules enable customization of the personalized manifests created by MediaTailor.

", + "shape": "ManifestProcessingRules" + }, + "Name": { + "documentation": "

The identifier for the playback configuration.

", + "shape": "__string" + }, + "PersonalizationThresholdSeconds": { + "documentation": "

Defines the maximum duration of underfilled ad time (in seconds) allowed in an ad break. If the duration of underfilled ad time exceeds the personalization threshold, then the personalization of the ad break is abandoned and the underlying content is shown. This feature applies to ad replacement in live and VOD streams, rather than ad insertion, because it relies on an underlying content stream. For more information about ad break behavior, including ad replacement and insertion, see Ad Behavior in AWS Elemental MediaTailor.

", + "shape": "__integerMin1" + }, + "SlateAdUrl": { + "documentation": "

The URL for a high-quality video asset to transcode and use to fill in time that's not used by ads. AWS Elemental MediaTailor shows the slate to fill in gaps in media content. Configuring the slate is optional for non-VPAID configurations. For VPAID, the slate is required because MediaTailor provides it in the slots that are designated for dynamic ad content. The slate must be a high-quality asset that contains both audio and video.

", + "shape": "__string" + }, + "Tags": { + "documentation": "

The tags to assign to the playback configuration.

", + "locationName": "tags", + "shape": "__mapOf__string" + }, + "TranscodeProfileName": { + "documentation": "

The name that is used to associate this playback configuration with a custom transcode profile. This overrides the dynamic transcoding defaults of MediaTailor. Use this only if you have already set up custom profiles with the help of AWS Support.

", + "shape": "__string" + }, + "VideoContentSourceUrl": { + "documentation": "

The URL prefix for the parent manifest for the stream, minus the asset ID. The maximum length is 512 characters.

", + "shape": "__string" + } + }, + "type": "structure" + }, + "PutPlaybackConfigurationResponse": { + "members": { + "AdDecisionServerUrl": { + "documentation": "

The URL for the ad decision server (ADS). This includes the specification of static parameters and placeholders for dynamic parameters. AWS Elemental MediaTailor substitutes player-specific and session-specific parameters as needed when calling the ADS. Alternately, for testing, you can provide a static VAST URL. The maximum length is 25,000 characters.

", + "shape": "__string" + }, + "AvailSuppression": { + "documentation": "

The configuration for avail suppression, also known as ad suppression. For more information about ad suppression, see Ad Suppression.

", + "shape": "AvailSuppression" + }, + "Bumper": { + "documentation": "

The configuration for bumpers. Bumpers are short audio or video clips that play at the start or before the end of an ad break. To learn more about bumpers, see Bumpers.

", + "shape": "Bumper" + }, + "CdnConfiguration": { + "documentation": "

The configuration for using a content delivery network (CDN), like Amazon CloudFront, for content and ad segment management.

", + "shape": "CdnConfiguration" + }, + "ConfigurationAliases": { + "documentation": "

The player parameters and aliases used as dynamic variables during session initialization. For more information, see Domain Variables.

", + "shape": "ConfigurationAliasesResponse" + }, + "DashConfiguration": { + "documentation": "

The configuration for DASH content.

", + "shape": "DashConfiguration" + }, + "HlsConfiguration": { + "documentation": "

The configuration for HLS content.

", + "shape": "HlsConfiguration" + }, + "LivePreRollConfiguration": { + "documentation": "

The configuration for pre-roll ad insertion.

", + "shape": "LivePreRollConfiguration" + }, + "ManifestProcessingRules": { + "documentation": "

The configuration for manifest processing rules. Manifest processing rules enable customization of the personalized manifests created by MediaTailor.

", + "shape": "ManifestProcessingRules" + }, + "Name": { + "documentation": "

The identifier for the playback configuration.

", + "shape": "__string" + }, + "PersonalizationThresholdSeconds": { + "documentation": "

Defines the maximum duration of underfilled ad time (in seconds) allowed in an ad break. If the duration of underfilled ad time exceeds the personalization threshold, then the personalization of the ad break is abandoned and the underlying content is shown. This feature applies to ad replacement in live and VOD streams, rather than ad insertion, because it relies on an underlying content stream. For more information about ad break behavior, including ad replacement and insertion, see Ad Behavior in AWS Elemental MediaTailor.

", + "shape": "__integerMin1" + }, + "PlaybackConfigurationArn": { + "documentation": "

The Amazon Resource Name (ARN) for the playback configuration.

", + "shape": "__string" + }, + "PlaybackEndpointPrefix": { + "documentation": "

The URL that the player accesses to get a manifest from AWS Elemental MediaTailor. This session will use server-side reporting.

", + "shape": "__string" + }, + "SessionInitializationEndpointPrefix": { + "documentation": "

The URL that the player uses to initialize a session that uses client-side reporting.

", + "shape": "__string" + }, + "SlateAdUrl": { + "documentation": "

The URL for a high-quality video asset to transcode and use to fill in time that's not used by ads. AWS Elemental MediaTailor shows the slate to fill in gaps in media content. Configuring the slate is optional for non-VPAID playback configurations. For VPAID, the slate is required because MediaTailor provides it in the slots designated for dynamic ad content. The slate must be a high-quality asset that contains both audio and video.

", + "shape": "__string" + }, + "Tags": { + "documentation": "

The tags assigned to the playback configuration.

", + "locationName": "tags", + "shape": "__mapOf__string" + }, + "TranscodeProfileName": { + "documentation": "

The name that is used to associate this playback configuration with a custom transcode profile. This overrides the dynamic transcoding defaults of MediaTailor. Use this only if you have already set up custom profiles with the help of AWS Support.

", + "shape": "__string" + }, + "VideoContentSourceUrl": { + "documentation": "

The URL prefix for the parent manifest for the stream, minus the asset ID. The maximum length is 512 characters.

", + "shape": "__string" + } + }, + "type": "structure" + }, + "RelativePosition": { + "enum": [ + "BEFORE_PROGRAM", + "AFTER_PROGRAM" + ], + "type": "string" + }, + "RequestOutputItem": { + "documentation": "

The ouput configuration for this channel.

", + "members": { + "DashPlaylistSettings": { + "documentation": "

DASH manifest configuration parameters.

", + "shape": "DashPlaylistSettings" + }, + "HlsPlaylistSettings": { + "documentation": "

HLS playlist configuration parameters.

", + "shape": "HlsPlaylistSettings" + }, + "ManifestName": { + "documentation": "

The name of the manifest for the channel. The name appears in the PlaybackUrl.

", + "shape": "__string" + }, + "SourceGroup": { + "documentation": "

A string used to match which HttpPackageConfiguration is used for each VodSource.

", + "shape": "__string" + } + }, + "required": [ + "ManifestName", + "SourceGroup" + ], + "type": "structure" + }, + "RequestOutputs": { + "documentation": "

An object that represents an object in the CreateChannel request.

", + "member": { + "shape": "RequestOutputItem" + }, + "type": "list" + }, + "ResponseOutputItem": { + "documentation": "

This response includes only the \"property\" : \"type\" property.

", + "members": { + "DashPlaylistSettings": { + "documentation": "

DASH manifest configuration settings.

", + "shape": "DashPlaylistSettings" + }, + "HlsPlaylistSettings": { + "documentation": "

HLS manifest configuration settings.

", + "shape": "HlsPlaylistSettings" + }, + "ManifestName": { + "documentation": "

The name of the manifest for the channel that will appear in the channel output's playback URL.

", + "shape": "__string" + }, + "PlaybackUrl": { + "documentation": "

The URL used for playback by content players.

", + "shape": "__string" + }, + "SourceGroup": { + "documentation": "

A string used to associate a package configuration source group with a channel output.

", + "shape": "__string" + } + }, + "required": [ + "ManifestName", + "PlaybackUrl", + "SourceGroup" + ], + "type": "structure" + }, + "ResponseOutputs": { + "member": { + "shape": "ResponseOutputItem" + }, + "type": "list" + }, + "ScheduleConfiguration": { + "documentation": "

Schedule configuration parameters. A channel must be stopped before changes can be made to the schedule.

", + "members": { + "Transition": { + "documentation": "

Program transition configurations.

", + "shape": "Transition" + } + }, + "required": [ + "Transition" + ], + "type": "structure" + }, + "ScheduleEntry": { + "documentation": "

The properties for a schedule.

", + "members": { + "ApproximateDurationSeconds": { + "documentation": "

The approximate duration of this program, in seconds.

", + "shape": "__long" + }, + "ApproximateStartTime": { + "documentation": "

The approximate time that the program will start playing.

", + "shape": "__timestampUnix" + }, + "Arn": { + "documentation": "

The ARN of the program.

", + "shape": "__string" + }, + "ChannelName": { + "documentation": "

The name of the channel that uses this schedule.

", + "shape": "__string" + }, + "ProgramName": { + "documentation": "

The name of the program.

", + "shape": "__string" + }, + "SourceLocationName": { + "documentation": "

The name of the source location.

", + "shape": "__string" + }, + "VodSourceName": { + "documentation": "

The name of the VOD source.

", + "shape": "__string" + } + }, + "required": [ + "VodSourceName", + "ChannelName", + "SourceLocationName", + "Arn", + "ProgramName" + ], + "type": "structure" + }, + "SlateSource": { + "documentation": "

Slate VOD source configuration.

", + "members": { + "SourceLocationName": { + "documentation": "

The name of the source location where the slate VOD source is stored.

", + "shape": "__string" + }, + "VodSourceName": { + "documentation": "

The slate VOD source name. The VOD source must already exist in a source location before it can be used for slate.

", + "shape": "__string" + } + }, + "type": "structure" + }, + "SourceLocation": { + "documentation": "

This response includes only the \"type\" : \"object\" property.

", + "members": { + "AccessConfiguration": { + "documentation": "

The access configuration for the source location.

", + "shape": "AccessConfiguration" + }, + "Arn": { + "documentation": "

The ARN of the SourceLocation.

", + "shape": "__string" + }, + "CreationTime": { + "documentation": "

The timestamp that indicates when the source location was created.

", + "shape": "__timestampUnix" + }, + "DefaultSegmentDeliveryConfiguration": { + "documentation": "

The default segment delivery configuration.

", + "shape": "DefaultSegmentDeliveryConfiguration" + }, + "HttpConfiguration": { + "documentation": "

The HTTP configuration for the source location.

", + "shape": "HttpConfiguration" + }, + "LastModifiedTime": { + "documentation": "

The timestamp that indicates when the source location was last modified.

", + "shape": "__timestampUnix" + }, + "SourceLocationName": { + "documentation": "

The name of the source location.

", + "shape": "__string" + }, + "Tags": { + "documentation": "

The tags assigned to the source location.

", + "locationName": "tags", + "shape": "__mapOf__string" + } + }, + "required": [ + "SourceLocationName", + "HttpConfiguration", + "Arn" + ], + "type": "structure" + }, + "SpliceInsertMessage": { + "documentation": "

Splice insert message configuration.

", + "members": { + "AvailNum": { + "documentation": "

This is written to splice_insert.avail_num, as defined in section 9.7.3.1 of the SCTE-35 specification. The default value is 0. Values must be between 0 and 256, inclusive.

", + "shape": "__integer" + }, + "AvailsExpected": { + "documentation": "

This is written to splice_insert.avails_expected, as defined in section 9.7.3.1 of the SCTE-35 specification. The default value is 0. Values must be between 0 and 256, inclusive.

", + "shape": "__integer" + }, + "SpliceEventId": { + "documentation": "

This is written to splice_insert.splice_event_id, as defined in section 9.7.3.1 of the SCTE-35 specification. The default value is 1.

", + "shape": "__integer" + }, + "UniqueProgramId": { + "documentation": "

This is written to splice_insert.unique_program_id, as defined in section 9.7.3.1 of the SCTE-35 specification. The default value is 0. Values must be between 0 and 256, inclusive.

", + "shape": "__integer" + } + }, + "type": "structure" + }, + "StartChannelRequest": { + "members": { + "ChannelName": { + "documentation": "

The identifier for the channel you are working on.

", + "location": "uri", + "locationName": "channelName", + "shape": "__string" + } + }, + "required": [ + "ChannelName" + ], + "type": "structure" + }, + "StartChannelResponse": { + "members": {}, + "type": "structure" + }, + "StopChannelRequest": { + "members": { + "ChannelName": { + "documentation": "

The identifier for the channel you are working on.

", + "location": "uri", + "locationName": "channelName", + "shape": "__string" + } + }, + "required": [ + "ChannelName" + ], + "type": "structure" + }, + "StopChannelResponse": { + "members": {}, + "type": "structure" + }, + "TagResourceRequest": { + "members": { + "ResourceArn": { + "documentation": "

The Amazon Resource Name (ARN) for the playback configuration. You can get this from the response to any playback configuration request.

", + "location": "uri", + "locationName": "ResourceArn", + "shape": "__string" + }, + "Tags": { + "documentation": "

A comma-separated list of tag key:value pairs.

", + "locationName": "tags", + "shape": "__mapOf__string" + } + }, + "required": [ + "ResourceArn", + "Tags" + ], + "type": "structure" + }, + "Transition": { + "documentation": "

Program transition configuration.

", + "members": { + "RelativePosition": { + "documentation": "

The position where this program will be inserted relative to the RelativeProgram. Possible values are: AFTER_PROGRAM, and BEFORE_PROGRAM.

", + "shape": "RelativePosition" + }, + "RelativeProgram": { + "documentation": "

The name of the program that this program will be inserted next to, as defined by RelativePosition.

", + "shape": "__string" + }, + "Type": { + "documentation": "

When the program should be played. RELATIVE means that programs will be played back-to-back.

", + "shape": "__string" + } + }, + "required": [ + "Type", + "RelativePosition" + ], + "type": "structure" + }, + "Type": { + "enum": [ + "DASH", + "HLS" + ], + "type": "string" + }, + "UntagResourceRequest": { + "members": { + "ResourceArn": { + "documentation": "

The Amazon Resource Name (ARN) for the playback configuration. You can get this from the response to any playback configuration request.

", + "location": "uri", + "locationName": "ResourceArn", + "shape": "__string" + }, + "TagKeys": { + "documentation": "

A comma-separated list of the tag keys to remove from the playback configuration.

", + "location": "querystring", + "locationName": "tagKeys", + "shape": "__listOf__string" + } + }, + "required": [ + "ResourceArn", + "TagKeys" + ], + "type": "structure" + }, + "UpdateChannelRequest": { + "members": { + "ChannelName": { + "documentation": "

The identifier for the channel you are working on.

", + "location": "uri", + "locationName": "channelName", + "shape": "__string" + }, + "Outputs": { + "documentation": "

The channel's output properties.

", + "shape": "RequestOutputs" + } + }, + "required": [ + "ChannelName", + "Outputs" + ], + "type": "structure" + }, + "UpdateChannelResponse": { + "members": { + "Arn": { + "documentation": "

The ARN of the channel.

", + "shape": "__string" + }, + "ChannelName": { + "documentation": "

The name of the channel.

", + "shape": "__string" + }, + "ChannelState": { + "documentation": "

Indicates whether the channel is in a running state or not.

", + "shape": "ChannelState" + }, + "CreationTime": { + "documentation": "

The timestamp of when the channel was created.

", + "shape": "__timestampUnix" + }, + "LastModifiedTime": { + "documentation": "

The timestamp of when the channel was last modified.

", + "shape": "__timestampUnix" + }, + "Outputs": { + "documentation": "

The channel's output properties.

", + "shape": "ResponseOutputs" + }, + "PlaybackMode": { + "documentation": "

The type of playback for this channel. The only supported value is LOOP.

", + "shape": "__string" + }, + "Tags": { + "documentation": "

The tags assigned to the channel.

", + "locationName": "tags", + "shape": "__mapOf__string" + } + }, + "type": "structure" + }, + "UpdateSourceLocationRequest": { + "members": { + "AccessConfiguration": { + "documentation": "

Access configuration parameters. Configures the type of authentication used to access content from your source location.

", + "shape": "AccessConfiguration" + }, + "DefaultSegmentDeliveryConfiguration": { + "documentation": "

The optional configuration for the host server that serves segments.

", + "shape": "DefaultSegmentDeliveryConfiguration" + }, + "HttpConfiguration": { + "documentation": "

The HTTP configuration for the source location.

", + "shape": "HttpConfiguration" + }, + "SourceLocationName": { + "documentation": "

The identifier for the source location you are working on.

", + "location": "uri", + "locationName": "sourceLocationName", + "shape": "__string" + } + }, + "required": [ + "SourceLocationName", + "HttpConfiguration" + ], + "type": "structure" + }, + "UpdateSourceLocationResponse": { + "members": { + "AccessConfiguration": { + "documentation": "

The access configuration for the source location.

", + "shape": "AccessConfiguration" + }, + "Arn": { + "documentation": "

The ARN of the source location.

", + "shape": "__string" + }, + "CreationTime": { + "documentation": "

The timestamp that indicates when the source location was created.

", + "shape": "__timestampUnix" + }, + "DefaultSegmentDeliveryConfiguration": { + "documentation": "

The default segment delivery configuration settings.

", + "shape": "DefaultSegmentDeliveryConfiguration" + }, + "HttpConfiguration": { + "documentation": "

The HTTP package configuration settings for the source location.

", + "shape": "HttpConfiguration" + }, + "LastModifiedTime": { + "documentation": "

The timestamp that indicates when the source location was last modified.

", + "shape": "__timestampUnix" + }, + "SourceLocationName": { + "documentation": "

The name of the source location.

", + "shape": "__string" + }, + "Tags": { + "documentation": "

The tags assigned to the source location.

", + "locationName": "tags", + "shape": "__mapOf__string" + } + }, + "type": "structure" + }, + "UpdateVodSourceRequest": { + "members": { + "HttpPackageConfigurations": { + "documentation": "

An array of HTTP package configurations for the VOD source on this account.

", + "shape": "HttpPackageConfigurations" + }, + "SourceLocationName": { + "documentation": "

The identifier for the source location you are working on.

", + "location": "uri", + "locationName": "sourceLocationName", + "shape": "__string" + }, + "VodSourceName": { + "documentation": "

The identifier for the VOD source you are working on.

", + "location": "uri", + "locationName": "vodSourceName", + "shape": "__string" + } + }, + "required": [ + "SourceLocationName", + "VodSourceName", + "HttpPackageConfigurations" + ], + "type": "structure" + }, + "UpdateVodSourceResponse": { + "members": { + "Arn": { + "documentation": "

The ARN of the VOD source.

", + "shape": "__string" + }, + "CreationTime": { + "documentation": "

The timestamp that indicates when the VOD source was created.

", + "shape": "__timestampUnix" + }, + "HttpPackageConfigurations": { + "documentation": "

The HTTP package configurations.

", + "shape": "HttpPackageConfigurations" + }, + "LastModifiedTime": { + "documentation": "

The ARN for the VOD source.

", + "shape": "__timestampUnix" + }, + "SourceLocationName": { + "documentation": "

The name of the source location associated with the VOD source.

", + "shape": "__string" + }, + "Tags": { + "documentation": "

The tags assigned to the VOD source.

", + "locationName": "tags", + "shape": "__mapOf__string" + }, + "VodSourceName": { + "documentation": "

The name of the VOD source.

", + "shape": "__string" + } + }, + "type": "structure" + }, + "VodSource": { + "documentation": "

VOD source configuration parameters.

", + "members": { + "Arn": { + "documentation": "

The ARN for the VOD source.

", + "shape": "__string" + }, + "CreationTime": { + "documentation": "

The timestamp that indicates when the VOD source was created.

", + "shape": "__timestampUnix" + }, + "HttpPackageConfigurations": { + "documentation": "

The HTTP package configurations for the VOD source.

", + "shape": "HttpPackageConfigurations" + }, + "LastModifiedTime": { + "documentation": "

The timestamp that indicates when the VOD source was last modified.

", + "shape": "__timestampUnix" + }, + "SourceLocationName": { + "documentation": "

The name of the source location that the VOD source is associated with.

", + "shape": "__string" + }, + "Tags": { + "documentation": "

The tags assigned to the VOD source.

", + "locationName": "tags", + "shape": "__mapOf__string" + }, + "VodSourceName": { + "documentation": "

The name of the VOD source.

", + "shape": "__string" + } + }, + "required": [ + "VodSourceName", + "SourceLocationName", + "HttpPackageConfigurations", + "Arn" + ], + "type": "structure" + }, + "__boolean": { + "type": "boolean" + }, + "__integer": { + "type": "integer" + }, + "__integerMin1": { + "min": 1, + "type": "integer" + }, + "__listOfAdBreak": { + "member": { + "shape": "AdBreak" + }, + "type": "list" + }, + "__listOfChannel": { + "member": { + "shape": "Channel" + }, + "type": "list" + }, + "__listOfPlaybackConfiguration": { + "member": { + "shape": "PlaybackConfiguration" + }, + "type": "list" + }, + "__listOfScheduleEntry": { + "member": { + "shape": "ScheduleEntry" + }, + "type": "list" + }, + "__listOfSourceLocation": { + "member": { + "shape": "SourceLocation" + }, + "type": "list" + }, + "__listOfVodSource": { + "member": { + "shape": "VodSource" + }, + "type": "list" + }, + "__listOf__string": { + "member": { + "shape": "__string" + }, + "type": "list" + }, + "__long": { + "type": "long" + }, + "__mapOf__string": { + "key": { + "shape": "__string" + }, + "type": "map", + "value": { + "shape": "__string" + } + }, + "__string": { + "type": "string" + }, + "__timestampUnix": { + "timestampFormat": "unixTimestamp", + "type": "timestamp" } + } } diff --git a/botocore/data/mwaa/2020-07-01/service-2.json b/botocore/data/mwaa/2020-07-01/service-2.json index 92e6ba9b..7dec78b9 100644 --- a/botocore/data/mwaa/2020-07-01/service-2.json +++ b/botocore/data/mwaa/2020-07-01/service-2.json @@ -309,6 +309,10 @@ "shape":"MaxWorkers", "documentation":"

The maximum number of workers that you want to run in your environment. MWAA scales the number of Apache Airflow workers and the Fargate containers that run your tasks up to the number you specify in this field. When there are no more tasks running, and no more in the queue, MWAA disposes of the extra containers leaving the one worker that is included with your environment.

" }, + "MinWorkers":{ + "shape":"MinWorkers", + "documentation":"

The minimum number of workers that you want to run in your environment. MWAA scales the number of Apache Airflow workers and the Fargate containers that run your tasks up to the number you specify in the MaxWorkers field. When there are no more tasks running, and no more in the queue, MWAA disposes of the extra containers leaving the worker count you specify in the MinWorkers field.

" + }, "Name":{ "shape":"EnvironmentName", "documentation":"

The name of your MWAA environment.

", @@ -474,7 +478,11 @@ }, "MaxWorkers":{ "shape":"MaxWorkers", - "documentation":"

The Maximum Workers of the Amazon MWAA Environment.

" + "documentation":"

The maximum number of workers to run in your Amazon MWAA Environment.

" + }, + "MinWorkers":{ + "shape":"MinWorkers", + "documentation":"

The minimum number of workers to run in your Amazon MWAA Environment.

" }, "Name":{ "shape":"EnvironmentName", @@ -557,7 +565,9 @@ "AVAILABLE", "UPDATING", "DELETING", - "DELETED" + "DELETED", + "UNAVAILABLE", + "UPDATE_FAILED" ] }, "ErrorCode":{"type":"string"}, @@ -772,6 +782,11 @@ }, "documentation":"

Internal only API.

" }, + "MinWorkers":{ + "type":"integer", + "box":true, + "min":1 + }, "ModuleLoggingConfiguration":{ "type":"structure", "members":{ @@ -873,7 +888,7 @@ "type":"string", "max":1224, "min":1, - "pattern":"^arn:aws(-[a-z]+)?:s3:::airflow-[a-z0-9.\\-]+$" + "pattern":"^arn:aws(-[a-z]+)?:s3:::[a-z0-9.\\-]+$" }, "S3ObjectVersion":{ "type":"string", @@ -1087,7 +1102,11 @@ }, "MaxWorkers":{ "shape":"MaxWorkers", - "documentation":"

The Maximum Workers to update of your Amazon MWAA environment.

" + "documentation":"

The maximum number of workers to update of your Amazon MWAA environment.

" + }, + "MinWorkers":{ + "shape":"MinWorkers", + "documentation":"

The minimum number of workers to update of your Amazon MWAA environment.

" }, "Name":{ "shape":"EnvironmentName", diff --git a/botocore/data/network-firewall/2020-11-12/service-2.json b/botocore/data/network-firewall/2020-11-12/service-2.json index a3c89009..03520551 100644 --- a/botocore/data/network-firewall/2020-11-12/service-2.json +++ b/botocore/data/network-firewall/2020-11-12/service-2.json @@ -46,7 +46,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, {"shape":"InvalidTokenException"}, - {"shape":"InvalidOperationException"} + {"shape":"InvalidOperationException"}, + {"shape":"InsufficientCapacityException"} ], "documentation":"

Associates the specified subnets in the Amazon VPC to the firewall. You can specify one subnet for each of the Availability Zones that the VPC spans.

This request creates an AWS Network Firewall firewall endpoint in each of the subnets. To enable the firewall's protections, you must also modify the VPC's route tables for each subnet's Availability Zone, to redirect the traffic that's coming into and going out of the zone through the firewall endpoint.

" }, @@ -147,6 +148,7 @@ "input":{"shape":"DeleteResourcePolicyRequest"}, "output":{"shape":"DeleteResourcePolicyResponse"}, "errors":[ + {"shape":"InvalidRequestException"}, {"shape":"InternalServerError"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} @@ -228,6 +230,7 @@ "input":{"shape":"DescribeResourcePolicyRequest"}, "output":{"shape":"DescribeResourcePolicyResponse"}, "errors":[ + {"shape":"InvalidRequestException"}, {"shape":"InternalServerError"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} @@ -322,7 +325,8 @@ "input":{"shape":"ListTagsForResourceRequest"}, "output":{"shape":"ListTagsForResourceResponse"}, "errors":[ - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"} ], "documentation":"

Retrieves the tags associated with the specified resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.

You can tag the AWS resources that you manage through AWS Network Firewall: firewalls, firewall policies, and rule groups.

" }, @@ -790,7 +794,7 @@ }, "Rules":{ "shape":"RulesString", - "documentation":"

The name of a file containing stateful rule group rules specifications in Suricata flat format, with one rule per line. Use this to import your existing Suricata compatible rule groups.

You must provide either this rules setting or a populated RuleGroup setting, but not both.

You can provide your rule group specification in a file through this setting when you create or update your rule group. The call response returns a RuleGroup object that Network Firewall has populated from your file. Network Firewall uses the file contents to populate the rule group rules, but does not maintain a reference to the file or use the file in any way after performing the create or update. If you call DescribeRuleGroup to retrieve the rule group, Network Firewall returns rules settings inside a RuleGroup object.

" + "documentation":"

A string containing stateful rule group rules specifications in Suricata flat format, with one rule per line. Use this to import your existing Suricata compatible rule groups.

You must provide either this rules setting or a populated RuleGroup setting, but not both.

You can provide your rule group specification in Suricata flat format through this setting when you create or update your rule group. The call response returns a RuleGroup object that Network Firewall has populated from your string.

" }, "Type":{ "shape":"RuleGroupType", @@ -1252,7 +1256,7 @@ }, "StatelessFragmentDefaultActions":{ "shape":"StatelessActions", - "documentation":"

The actions to take on a fragmented packet if it doesn't match any of the stateless rules in the policy. If you want non-matching fragmented packets to be forwarded for stateful inspection, specify aws:forward_to_sfe.

You must specify one of the standard actions: aws:pass, aws:drop, or aws:forward_to_sfe. In addition, you can specify custom actions that are compatible with your standard section choice.

For example, you could specify [\"aws:pass\"] or you could specify [\"aws:pass\", “customActionName”]. For information about compatibility, see the custom action descriptions under CustomAction.

" + "documentation":"

The actions to take on a fragmented UDP packet if it doesn't match any of the stateless rules in the policy. Network Firewall only manages UDP packet fragments and silently drops packet fragments for other protocols. If you want non-matching fragmented UDP packets to be forwarded for stateful inspection, specify aws:forward_to_sfe.

You must specify one of the standard actions: aws:pass, aws:drop, or aws:forward_to_sfe. In addition, you can specify custom actions that are compatible with your standard section choice.

For example, you could specify [\"aws:pass\"] or you could specify [\"aws:pass\", “customActionName”]. For information about compatibility, see the custom action descriptions under CustomAction.

" }, "StatelessCustomActions":{ "shape":"CustomActions", @@ -1384,7 +1388,7 @@ "members":{ "Protocol":{ "shape":"StatefulRuleProtocol", - "documentation":"

The protocol to inspect for. To match with any protocol, specify ANY.

" + "documentation":"

The protocol to inspect for. To specify all, you can use IP, because all traffic on AWS and on the internet is IP.

" }, "Source":{ "shape":"Source", @@ -1717,10 +1721,14 @@ "members":{ "SyncStatus":{ "shape":"PerObjectSyncStatus", - "documentation":"

" + "documentation":"

Indicates whether this object is in sync with the version indicated in the update token.

" + }, + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

The current version of the object that is either in sync or pending synchronization.

" } }, - "documentation":"

" + "documentation":"

Provides configuration status for a single policy or rule group that is used for a firewall endpoint. Network Firewall provides each endpoint with the rules that are configured in the firewall policy. Each time you add a subnet or modify the associated firewall policy, Network Firewall synchronizes the rules in the endpoint, so it can properly filter network traffic. This is part of a SyncState for a firewall.

" }, "PerObjectSyncStatus":{ "type":"string", @@ -2021,7 +2029,7 @@ "members":{ "RulesString":{ "shape":"RulesString", - "documentation":"

Stateful inspection criteria, provided in Suricata compatible intrusion prevention system (IPS) rules. Suricata is an open-source network IPS that includes a standard rule-based language for network traffic inspection.

These rules contain the inspection criteria and the action to take for traffic that matches the criteria, so this type of rule group doesn't have a separate action setting.

You can provide the rules from a file that you've stored in an Amazon S3 bucket, or by providing the rules in a Suricata rules string. To import from Amazon S3, provide the fully qualified name of the file that contains the rules definitions. To provide a Suricata rule string, provide the complete, Suricata compatible rule.

" + "documentation":"

Stateful inspection criteria, provided in Suricata compatible intrusion prevention system (IPS) rules. Suricata is an open-source network IPS that includes a standard rule-based language for network traffic inspection.

These rules contain the inspection criteria and the action to take for traffic that matches the criteria, so this type of rule group doesn't have a separate action setting.

" }, "RulesSourceList":{ "shape":"RulesSourceList", @@ -2048,18 +2056,18 @@ "members":{ "Targets":{ "shape":"RuleTargets", - "documentation":"

The domains that you want to inspect for in your traffic flows. To provide multiple domains, separate them with commas.

" + "documentation":"

The domains that you want to inspect for in your traffic flows. To provide multiple domains, separate them with commas. Valid domain specifications are the following:

" }, "TargetTypes":{ "shape":"TargetTypes", - "documentation":"

" + "documentation":"

The protocols you want to inspect. Specify TLS_SNI for HTTPS. Specity HTTP_HOST for HTTP. You can specify either or both.

" }, "GeneratedRulesType":{ "shape":"GeneratedRulesType", "documentation":"

Whether you want to allow or deny access to the domains in your target list.

" } }, - "documentation":"

Stateful inspection criteria for a domain list rule group.

" + "documentation":"

Stateful inspection criteria for a domain list rule group.

For HTTPS traffic, domain filtering is SNI-based. It uses the server name indicator extension of the TLS handshake.

By default, Network Firewall domain list inspection only includes traffic coming from the VPC where you deploy the firewall. To inspect traffic from IP addresses outside of the deployment VPC, you set the HOME_NET rule variable to include the CIDR range of the deployment VPC plus the other CIDR ranges. For more information, see RuleVariables in this guide and Stateful domain list rule groups in AWS Network Firewall in the Network Firewall Developer Guide

" }, "RulesString":{ "type":"string", @@ -2646,7 +2654,7 @@ }, "Rules":{ "shape":"RulesString", - "documentation":"

The name of a file containing stateful rule group rules specifications in Suricata flat format, with one rule per line. Use this to import your existing Suricata compatible rule groups.

You must provide either this rules setting or a populated RuleGroup setting, but not both.

You can provide your rule group specification in a file through this setting when you create or update your rule group. The call response returns a RuleGroup object that Network Firewall has populated from your file. Network Firewall uses the file contents to populate the rule group rules, but does not maintain a reference to the file or use the file in any way after performing the create or update. If you call DescribeRuleGroup to retrieve the rule group, Network Firewall returns rules settings inside a RuleGroup object.

" + "documentation":"

A string containing stateful rule group rules specifications in Suricata flat format, with one rule per line. Use this to import your existing Suricata compatible rule groups.

You must provide either this rules setting or a populated RuleGroup setting, but not both.

You can provide your rule group specification in Suricata flat format through this setting when you create or update your rule group. The call response returns a RuleGroup object that Network Firewall has populated from your string.

" }, "Type":{ "shape":"RuleGroupType", @@ -2748,5 +2756,5 @@ "member":{"shape":"VpcId"} } }, - "documentation":"

This is the API Reference for AWS Network Firewall. This guide is for developers who need detailed information about the Network Firewall API actions, data types, and errors.

Network Firewall is a stateful, managed, network firewall and intrusion detection and prevention service for Amazon Virtual Private Cloud (Amazon VPC). With Network Firewall, you can filter traffic at the perimeter of your VPC. This includes filtering traffic going to and coming from an internet gateway, NAT gateway, or over VPN or AWS Direct Connect. Network Firewall uses rules that are compatible with Suricata, a free, open source intrusion detection system (IDS) engine. For information about Suricata, see the Suricata website.

You can use Network Firewall to monitor and protect your VPC traffic in a number of ways. The following are just a few examples:

To enable Network Firewall for your VPCs, you perform steps in both Amazon VPC and in Network Firewall. For information about using Amazon VPC, see Amazon VPC User Guide.

To start using Network Firewall, do the following:

  1. (Optional) If you don't already have a VPC that you want to protect, create it in Amazon VPC.

  2. In Amazon VPC, in each Availability Zone where you want to have a firewall endpoint, create a subnet for the sole use of Network Firewall.

  3. In Network Firewall, create stateless and stateful rule groups, to define the components of the network traffic filtering behavior that you want your firewall to have.

  4. In Network Firewall, create a firewall policy that uses your rule groups and specifies additional default traffic filtering behavior.

  5. In Network Firewall, create a firewall and specify your new firewall policy and VPC subnets. Network Firewall creates a firewall endpoint in each subnet that you specify, with the behavior that's defined in the firewall policy.

  6. In Amazon VPC, use ingress routing enhancements to route traffic through the new firewall endpoints.

" + "documentation":"

This is the API Reference for AWS Network Firewall. This guide is for developers who need detailed information about the Network Firewall API actions, data types, and errors.

Network Firewall is a stateful, managed, network firewall and intrusion detection and prevention service for Amazon Virtual Private Cloud (Amazon VPC). With Network Firewall, you can filter traffic at the perimeter of your VPC. This includes filtering traffic going to and coming from an internet gateway, NAT gateway, or over VPN or AWS Direct Connect. Network Firewall uses rules that are compatible with Suricata, a free, open source intrusion detection system (IDS) engine. For information about Suricata, see the Suricata website.

You can use Network Firewall to monitor and protect your VPC traffic in a number of ways. The following are just a few examples:

To enable Network Firewall for your VPCs, you perform steps in both Amazon VPC and in Network Firewall. For information about using Amazon VPC, see Amazon VPC User Guide.

To start using Network Firewall, do the following:

  1. (Optional) If you don't already have a VPC that you want to protect, create it in Amazon VPC.

  2. In Amazon VPC, in each Availability Zone where you want to have a firewall endpoint, create a subnet for the sole use of Network Firewall.

  3. In Network Firewall, create stateless and stateful rule groups, to define the components of the network traffic filtering behavior that you want your firewall to have.

  4. In Network Firewall, create a firewall policy that uses your rule groups and specifies additional default traffic filtering behavior.

  5. In Network Firewall, create a firewall and specify your new firewall policy and VPC subnets. Network Firewall creates a firewall endpoint in each subnet that you specify, with the behavior that's defined in the firewall policy.

  6. In Amazon VPC, use ingress routing enhancements to route traffic through the new firewall endpoints.

" } diff --git a/botocore/data/organizations/2016-11-28/service-2.json b/botocore/data/organizations/2016-11-28/service-2.json index 8ba6e2ee..06fd17a7 100644 --- a/botocore/data/organizations/2016-11-28/service-2.json +++ b/botocore/data/organizations/2016-11-28/service-2.json @@ -1320,6 +1320,10 @@ "INTERNAL_FAILURE", "GOVCLOUD_ACCOUNT_ALREADY_EXISTS", "MISSING_BUSINESS_VALIDATION", + "FAILED_BUSINESS_VALIDATION", + "PENDING_BUSINESS_VALIDATION", + "INVALID_IDENTITY_FOR_BUSINESS_VALIDATION", + "UNKNOWN_BUSINESS_VALIDATION", "MISSING_PAYMENT_INSTRUMENT" ] }, @@ -1411,7 +1415,7 @@ }, "FailureReason":{ "shape":"CreateAccountFailureReason", - "documentation":"

If the request failed, a description of the reason for the failure.

" + "documentation":"

If the request failed, a description of the reason for the failure.

" } }, "documentation":"

Contains the status about a CreateAccount or CreateGovCloudAccount request to create an AWS account or an AWS GovCloud (US) account in an organization.

" @@ -2078,7 +2082,7 @@ "Message":{"shape":"ExceptionMessage"}, "Reason":{"shape":"HandshakeConstraintViolationExceptionReason"} }, - "documentation":"

The requested operation would violate the constraint identified in the reason code.

Some of the reasons in the following list might not be applicable to this specific API or operation:

", + "documentation":"

The requested operation would violate the constraint identified in the reason code.

Some of the reasons in the following list might not be applicable to this specific API or operation:

", "exception":true }, "HandshakeConstraintViolationExceptionReason":{ @@ -2088,6 +2092,7 @@ "HANDSHAKE_RATE_LIMIT_EXCEEDED", "ALREADY_IN_AN_ORGANIZATION", "ORGANIZATION_ALREADY_HAS_ALL_FEATURES", + "ORGANIZATION_IS_ALREADY_PENDING_ALL_FEATURES_MIGRATION", "INVITE_DISABLED_DURING_ENABLE_ALL_FEATURES", "PAYMENT_INSTRUMENT_REQUIRED", "ORGANIZATION_FROM_DIFFERENT_SELLER_OF_RECORD", @@ -2239,7 +2244,7 @@ "Message":{"shape":"ExceptionMessage"}, "Reason":{"shape":"InvalidInputExceptionReason"} }, - "documentation":"

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation.

", + "documentation":"

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation.

", "exception":true }, "InvalidInputExceptionReason":{ @@ -2267,7 +2272,8 @@ "INVALID_ROLE_NAME", "INVALID_SYSTEM_TAGS_PARAMETER", "DUPLICATE_TAG_KEY", - "TARGET_NOT_SUPPORTED" + "TARGET_NOT_SUPPORTED", + "INVALID_EMAIL_ADDRESS_TARGET" ] }, "InviteAccountToOrganizationRequest":{ diff --git a/botocore/data/personalize-events/2018-03-22/service-2.json b/botocore/data/personalize-events/2018-03-22/service-2.json index f58b95e7..06501f96 100644 --- a/botocore/data/personalize-events/2018-03-22/service-2.json +++ b/botocore/data/personalize-events/2018-03-22/service-2.json @@ -22,7 +22,7 @@ "errors":[ {"shape":"InvalidInputException"} ], - "documentation":"

Records user interaction event data. For more information see event-record-api.

" + "documentation":"

Records user interaction event data. For more information see Recording Events.

" }, "PutItems":{ "name":"PutItems", @@ -33,9 +33,10 @@ "input":{"shape":"PutItemsRequest"}, "errors":[ {"shape":"InvalidInputException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"} ], - "documentation":"

Adds one or more items to an Items dataset. For more information see importing-items.

" + "documentation":"

Adds one or more items to an Items dataset. For more information see Importing Items Incrementally.

" }, "PutUsers":{ "name":"PutUsers", @@ -46,9 +47,10 @@ "input":{"shape":"PutUsersRequest"}, "errors":[ {"shape":"InvalidInputException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"} ], - "documentation":"

Adds one or more users to a Users dataset. For more information see importing-users.

" + "documentation":"

Adds one or more users to a Users dataset. For more information see Importing Users Incrementally.

" } }, "shapes":{ @@ -139,11 +141,11 @@ }, "properties":{ "shape":"ItemProperties", - "documentation":"

A string map of item-specific metadata. Each element in the map consists of a key-value pair. For example,

{\"numberOfRatings\": \"12\"}

The keys use camel case names that match the fields in the Items schema. In the above example, the numberOfRatings would match the 'NUMBER_OF_RATINGS' field defined in the Items schema.

", + "documentation":"

A string map of item-specific metadata. Each element in the map consists of a key-value pair. For example, {\"numberOfRatings\": \"12\"}.

The keys use camel case names that match the fields in the schema for the Items dataset. In the previous example, the numberOfRatings matches the 'NUMBER_OF_RATINGS' field defined in the Items schema. For categorical string data, to include multiple categories for a single item, separate each category with a pipe separator (|). For example, \\\"Horror|Action\\\".

", "jsonvalue":true } }, - "documentation":"

Represents item metadata added to an Items dataset using the PutItems API.

" + "documentation":"

Represents item metadata added to an Items dataset using the PutItems API. For more information see Importing Items Incrementally.

" }, "ItemId":{ "type":"string", @@ -158,7 +160,7 @@ }, "ItemProperties":{ "type":"string", - "max":1024, + "max":4096, "min":1 }, "PutEventsRequest":{ @@ -179,7 +181,7 @@ }, "sessionId":{ "shape":"StringType", - "documentation":"

The session ID associated with the user's visit. Your application generates the sessionId when a user first visits your website or uses your application. Amazon Personalize uses the sessionId to associate events with the user before they log in. For more information see event-record-api.

" + "documentation":"

The session ID associated with the user's visit. Your application generates the sessionId when a user first visits your website or uses your application. Amazon Personalize uses the sessionId to associate events with the user before they log in. For more information, see Recording Events.

" }, "eventList":{ "shape":"EventList", @@ -196,7 +198,7 @@ "members":{ "datasetArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Number (ARN) of the Items dataset you are adding the item or items to.

" + "documentation":"

The Amazon Resource Name (ARN) of the Items dataset you are adding the item or items to.

" }, "items":{ "shape":"ItemList", @@ -213,7 +215,7 @@ "members":{ "datasetArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Number (ARN) of the Users dataset you are adding the user or users to.

" + "documentation":"

The Amazon Resource Name (ARN) of the Users dataset you are adding the user or users to.

" }, "users":{ "shape":"UserList", @@ -226,6 +228,15 @@ "max":40, "min":1 }, + "ResourceInUseException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

The specified resource is in use.

", + "error":{"httpStatusCode":409}, + "exception":true + }, "ResourceNotFoundException":{ "type":"structure", "members":{ @@ -250,11 +261,11 @@ }, "properties":{ "shape":"UserProperties", - "documentation":"

A string map of user-specific metadata. Each element in the map consists of a key-value pair. For example,

{\"numberOfVideosWatched\": \"45\"}

The keys use camel case names that match the fields in the Users schema. In the above example, the numberOfVideosWatched would match the 'NUMBER_OF_VIDEOS_WATCHED' field defined in the Users schema.

", + "documentation":"

A string map of user-specific metadata. Each element in the map consists of a key-value pair. For example, {\"numberOfVideosWatched\": \"45\"}.

The keys use camel case names that match the fields in the schema for the Users dataset. In the previous example, the numberOfVideosWatched matches the 'NUMBER_OF_VIDEOS_WATCHED' field defined in the Users schema. For categorical string data, to include multiple categories for a single user, separate each category with a pipe separator (|). For example, \\\"Member|Frequent shopper\\\".

", "jsonvalue":true } }, - "documentation":"

Represents user metadata added to a Users dataset using the PutUsers API.

" + "documentation":"

Represents user metadata added to a Users dataset using the PutUsers API. For more information see Importing Users Incrementally.

" }, "UserId":{ "type":"string", @@ -269,9 +280,9 @@ }, "UserProperties":{ "type":"string", - "max":1024, + "max":4096, "min":1 } }, - "documentation":"

Amazon Personalize can consume real-time user event data, such as stream or click data, and use it for model training either alone or combined with historical data. For more information see recording-events.

" + "documentation":"

Amazon Personalize can consume real-time user event data, such as stream or click data, and use it for model training either alone or combined with historical data. For more information see Recording Events.

" } diff --git a/botocore/data/pinpoint/2016-12-01/service-2.json b/botocore/data/pinpoint/2016-12-01/service-2.json index 9fc2c36d..d4dd55f6 100644 --- a/botocore/data/pinpoint/2016-12-01/service-2.json +++ b/botocore/data/pinpoint/2016-12-01/service-2.json @@ -6457,9 +6457,21 @@ "shape": "MessageType", "documentation": "

The SMS message type. Valid values are TRANSACTIONAL (for messages that are critical or time-sensitive, such as a one-time passwords) and PROMOTIONAL (for messsages that aren't critical or time-sensitive, such as marketing messages).

" }, + "OriginationNumber": { + "shape": "__string", + "documentation": "

The long code to send the SMS message from. This value should be one of the dedicated long codes that's assigned to your AWS account. Although it isn't required, we recommend that you specify the long code using an E.164 format to ensure prompt and accurate delivery of the message. For example, +12065550100.

" + }, "SenderId": { "shape": "__string", "documentation": "

The sender ID to display on recipients' devices when they receive the SMS message.

" + }, + "EntityId": { + "shape": "__string", + "documentation": "

The entity ID or Principal Entity (PE) id received from the regulatory body for sending SMS in your country.

" + }, + "TemplateId": { + "shape": "__string", + "documentation": "

The template ID received from the regulatory body for sending SMS in your country.

" } }, "documentation": "

Specifies the content and settings for an SMS message that's sent to recipients of a campaign.

" @@ -11048,9 +11060,21 @@ "shape": "MessageType", "documentation": "

The SMS message type. Valid values are TRANSACTIONAL (for messages that are critical or time-sensitive, such as a one-time passwords) and PROMOTIONAL (for messsages that aren't critical or time-sensitive, such as marketing messages).

" }, + "OriginationNumber": { + "shape": "__string", + "documentation": "

The long code to send the SMS message from. This value should be one of the dedicated long codes that's assigned to your AWS account. Although it isn't required, we recommend that you specify the long code using an E.164 format to ensure prompt and accurate delivery of the message. For example, +12065550100.

" + }, "SenderId": { "shape": "__string", "documentation": "

The sender ID to display as the sender of the message on a recipient's device. Support for sender IDs varies by country or region. For more information, see Supported Countries and Regions in the Amazon Pinpoint User Guide.

" + }, + "EntityId": { + "shape": "__string", + "documentation": "

The entity ID or Principal Entity (PE) id received from the regulatory body for sending SMS in your country.

" + }, + "TemplateId": { + "shape": "__string", + "documentation": "

The template ID received from the regulatory body for sending SMS in your country.

" } }, "documentation": "

Specifies the sender ID and message type for an SMS message that's sent to participants in a journey.

" @@ -12265,6 +12289,14 @@ "Substitutions": { "shape": "MapOfListOf__string", "documentation": "

The message variables to use in the SMS message. You can override the default variables with individual address variables.

" + }, + "EntityId": { + "shape": "__string", + "documentation": "

The entity ID or Principal Entity (PE) id received from the regulatory body for sending SMS in your country.

" + }, + "TemplateId": { + "shape": "__string", + "documentation": "

The template ID received from the regulatory body for sending SMS in your country.

" } }, "documentation": "

Specifies the default settings for a one-time SMS message that's sent directly to an endpoint.

" diff --git a/botocore/data/qldb-session/2019-07-11/service-2.json b/botocore/data/qldb-session/2019-07-11/service-2.json index 20e4ce45..14eeea08 100644 --- a/botocore/data/qldb-session/2019-07-11/service-2.json +++ b/botocore/data/qldb-session/2019-07-11/service-2.json @@ -27,7 +27,8 @@ {"shape":"InvalidSessionException"}, {"shape":"OccConflictException"}, {"shape":"RateExceededException"}, - {"shape":"LimitExceededException"} + {"shape":"LimitExceededException"}, + {"shape":"CapacityExceededException"} ], "documentation":"

Sends a command to an Amazon QLDB ledger.

Instead of interacting directly with this API, we recommend using the QLDB driver or the QLDB shell to execute data transactions on a ledger.

  • If you are working with an AWS SDK, use the QLDB driver. The driver provides a high-level abstraction layer above this QLDB Session data plane and manages SendCommand API calls for you. For information and a list of supported programming languages, see Getting started with the driver in the Amazon QLDB Developer Guide.

  • If you are working with the AWS Command Line Interface (AWS CLI), use the QLDB shell. The shell is a command line interface that uses the QLDB driver to interact with a ledger. For information, see Accessing Amazon QLDB using the QLDB shell.

" } @@ -58,6 +59,14 @@ "documentation":"

Returned if the request is malformed or contains an error such as an invalid parameter value or a missing required parameter.

", "exception":true }, + "CapacityExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Returned when the request exceeds the processing capacity of the ledger.

", + "exception":true + }, "CommitDigest":{"type":"blob"}, "CommitTransactionRequest":{ "type":"structure", @@ -198,11 +207,11 @@ "members":{ "ReadIOs":{ "shape":"ReadIOs", - "documentation":"

The number of read I/O requests that the command performed.

" + "documentation":"

The number of read I/O requests that the command made.

" }, "WriteIOs":{ "shape":"WriteIOs", - "documentation":"

The number of write I/O requests that the command performed.

" + "documentation":"

The number of write I/O requests that the command made.

" } }, "documentation":"

Contains I/O usage metrics for a command that was invoked.

" @@ -413,7 +422,7 @@ "members":{ "ProcessingTimeMilliseconds":{ "shape":"ProcessingTimeMilliseconds", - "documentation":"

The amount of time that was taken for the command to finish processing, measured in milliseconds.

" + "documentation":"

The amount of time that QLDB spent on processing the command, measured in milliseconds.

" } }, "documentation":"

Contains server-side performance information for a command. Amazon QLDB captures timing information between the times when it receives the request and when it sends the corresponding response.

" diff --git a/botocore/data/quicksight/2018-04-01/paginators-1.json b/botocore/data/quicksight/2018-04-01/paginators-1.json index ea142457..d339bbaf 100644 --- a/botocore/data/quicksight/2018-04-01/paginators-1.json +++ b/botocore/data/quicksight/2018-04-01/paginators-1.json @@ -1,3 +1,88 @@ { - "pagination": {} + "pagination": { + "ListAnalyses": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "AnalysisSummaryList" + }, + "ListDashboardVersions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "DashboardVersionSummaryList" + }, + "ListDashboards": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "DashboardSummaryList" + }, + "ListDataSets": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "DataSetSummaries" + }, + "ListDataSources": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "DataSources" + }, + "ListIngestions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Ingestions" + }, + "ListNamespaces": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Namespaces" + }, + "ListTemplateAliases": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "TemplateAliasList" + }, + "ListTemplateVersions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "TemplateVersionSummaryList" + }, + "ListTemplates": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "TemplateSummaryList" + }, + "ListThemeVersions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ThemeVersionSummaryList" + }, + "ListThemes": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ThemeSummaryList" + }, + "SearchAnalyses": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "AnalysisSummaryList" + }, + "SearchDashboards": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "DashboardSummaryList" + } + } } diff --git a/botocore/data/quicksight/2018-04-01/paginators-1.sdk-extras.json b/botocore/data/quicksight/2018-04-01/paginators-1.sdk-extras.json new file mode 100644 index 00000000..480a467d --- /dev/null +++ b/botocore/data/quicksight/2018-04-01/paginators-1.sdk-extras.json @@ -0,0 +1,91 @@ +{ + "version": 1.0, + "merge": { + "pagination": { + "ListAnalyses": { + "non_aggregate_keys": [ + "Status", + "RequestId" + ] + }, + "ListDashboardVersions": { + "non_aggregate_keys": [ + "Status", + "RequestId" + ] + }, + "ListTemplateAliases": { + "non_aggregate_keys": [ + "Status", + "RequestId" + ] + }, + "ListTemplateVersions": { + "non_aggregate_keys": [ + "Status", + "RequestId" + ] + }, + "ListTemplates": { + "non_aggregate_keys": [ + "Status", + "RequestId" + ] + }, + "ListThemeVersions": { + "non_aggregate_keys": [ + "Status", + "RequestId" + ] + }, + "ListThemes": { + "non_aggregate_keys": [ + "Status", + "RequestId" + ] + }, + "SearchAnalyses": { + "non_aggregate_keys": [ + "Status", + "RequestId" + ] + }, + "SearchDashboards": { + "non_aggregate_keys": [ + "Status", + "RequestId" + ] + }, + "ListNamespaces": { + "non_aggregate_keys": [ + "Status", + "RequestId" + ] + }, + "ListIngestions": { + "non_aggregate_keys": [ + "Status", + "RequestId" + ] + }, + "ListDataSources": { + "non_aggregate_keys": [ + "Status", + "RequestId" + ] + }, + "ListDataSets": { + "non_aggregate_keys": [ + "Status", + "RequestId" + ] + }, + "ListDashboards": { + "non_aggregate_keys": [ + "Status", + "RequestId" + ] + } + } + } +} diff --git a/botocore/data/quicksight/2018-04-01/service-2.json b/botocore/data/quicksight/2018-04-01/service-2.json index 63a9f1f6..0525c757 100644 --- a/botocore/data/quicksight/2018-04-01/service-2.json +++ b/botocore/data/quicksight/2018-04-01/service-2.json @@ -2042,8 +2042,7 @@ "AnalysisName":{ "type":"string", "max":2048, - "min":1, - "pattern":"[\\u0020-\\u00FF]+" + "min":1 }, "AnalysisSearchFilter":{ "type":"structure", @@ -2066,7 +2065,8 @@ "AnalysisSearchFilterList":{ "type":"list", "member":{"shape":"AnalysisSearchFilter"}, - "max":1 + "max":1, + "min":1 }, "AnalysisSourceEntity":{ "type":"structure", @@ -2801,6 +2801,10 @@ "shape":"ColumnGroupList", "documentation":"

Groupings of columns that work together in certain QuickSight features. Currently, only geospatial hierarchy is supported.

" }, + "FieldFolders":{ + "shape":"FieldFolderMap", + "documentation":"

The folder that contains fields and nested subfolders for your dataset.

" + }, "Permissions":{ "shape":"ResourcePermissionList", "documentation":"

A list of resource permissions on the dataset.

" @@ -2874,7 +2878,7 @@ }, "Type":{ "shape":"DataSourceType", - "documentation":"

The type of the data source. Currently, the supported types for this operation are: ATHENA, AURORA, AURORA_POSTGRESQL, MARIADB, MYSQL, POSTGRESQL, PRESTO, REDSHIFT, S3, SNOWFLAKE, SPARK, SQLSERVER, TERADATA. Use ListDataSources to return a list of all data sources.

" + "documentation":"

The type of the data source. Currently, the supported types for this operation are: ATHENA, AURORA, AURORA_POSTGRESQL, AMAZON_ELASTICSEARCH, MARIADB, MYSQL, POSTGRESQL, PRESTO, REDSHIFT, S3, SNOWFLAKE, SPARK, SQLSERVER, TERADATA. Use ListDataSources to return a list of all data sources.

AMAZON_ELASTICSEARCH is for Amazon managed Elasticsearch Service.

" }, "DataSourceParameters":{ "shape":"DataSourceParameters", @@ -3608,8 +3612,7 @@ "DashboardName":{ "type":"string", "max":2048, - "min":1, - "pattern":"[\\u0020-\\u00FF]+" + "min":1 }, "DashboardPublishOptions":{ "type":"structure", @@ -3651,7 +3654,8 @@ "DashboardSearchFilterList":{ "type":"list", "member":{"shape":"DashboardSearchFilter"}, - "max":1 + "max":1, + "min":1 }, "DashboardSourceEntity":{ "type":"structure", @@ -3873,6 +3877,10 @@ "shape":"ColumnGroupList", "documentation":"

Groupings of columns that work together in certain Amazon QuickSight features. Currently, only geospatial hierarchy is supported.

" }, + "FieldFolders":{ + "shape":"FieldFolderMap", + "documentation":"

The folder that contains fields and nested subfolders for your dataset.

" + }, "RowLevelPermissionDataSet":{ "shape":"RowLevelPermissionDataSet", "documentation":"

The row-level security configuration for the dataset.

" @@ -6005,6 +6013,34 @@ "max":4096, "min":1 }, + "FieldFolder":{ + "type":"structure", + "members":{ + "description":{ + "shape":"FieldFolderDescription", + "documentation":"

The description for a field folder.

" + }, + "columns":{ + "shape":"FolderColumnList", + "documentation":"

A folder has a list of columns. A column can only be in one folder.

" + } + }, + "documentation":"

A FieldFolder element is a folder that contains fields and nested subfolders.

" + }, + "FieldFolderDescription":{ + "type":"string", + "max":500 + }, + "FieldFolderMap":{ + "type":"map", + "key":{"shape":"FieldFolderPath"}, + "value":{"shape":"FieldFolder"} + }, + "FieldFolderPath":{ + "type":"string", + "max":1000, + "min":1 + }, "FileFormat":{ "type":"string", "enum":[ @@ -6031,6 +6067,11 @@ "type":"string", "enum":["StringEquals"] }, + "FolderColumnList":{ + "type":"list", + "member":{"shape":"String"}, + "max":5000 + }, "GeoSpatialColumnGroup":{ "type":"structure", "required":[ @@ -8585,18 +8626,18 @@ "members":{ "Namespace":{ "shape":"Namespace", - "documentation":"

The namespace associated with the row-level permissions dataset.

" + "documentation":"

The namespace associated with the dataset that contains permissions for RLS.

" }, "Arn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the permission dataset.

" + "documentation":"

The Amazon Resource Name (ARN) of the dataset that contains permissions for RLS.

" }, "PermissionPolicy":{ "shape":"RowLevelPermissionPolicy", - "documentation":"

Permission policy.

" + "documentation":"

The type of permissions to use when interpretting the permissions for RLS. DENY_ACCESS is included for backward compatibility only.

" } }, - "documentation":"

The row-level security configuration for the dataset.

" + "documentation":"

Information about a dataset that contains permissions for row-level security (RLS). The permissions dataset maps fields to users or groups. For more information, see Using Row-Level Security (RLS) to Restrict Access to a Dataset in the Amazon QuickSight User Guide.

The option to deny permissions by setting PermissionPolicy to DENY_ACCESS is not supported for new RLS datasets.

" }, "RowLevelPermissionPolicy":{ "type":"string", @@ -8635,7 +8676,7 @@ "members":{ "DataSourceArn":{ "shape":"Arn", - "documentation":"

The amazon Resource Name (ARN) for the data source.

" + "documentation":"

The Amazon Resource Name (ARN) for the data source.

" }, "UploadSettings":{ "shape":"UploadSettings", @@ -9108,8 +9149,7 @@ "TemplateName":{ "type":"string", "max":2048, - "min":1, - "pattern":"[\\u0020-\\u00FF]+" + "min":1 }, "TemplateSourceAnalysis":{ "type":"structure", @@ -10221,6 +10261,10 @@ "shape":"ColumnGroupList", "documentation":"

Groupings of columns that work together in certain QuickSight features. Currently, only geospatial hierarchy is supported.

" }, + "FieldFolders":{ + "shape":"FieldFolderMap", + "documentation":"

The folder that contains fields and nested subfolders for your dataset.

" + }, "RowLevelPermissionDataSet":{ "shape":"RowLevelPermissionDataSet", "documentation":"

The row-level security configuration for the data you want to create.

" diff --git a/botocore/data/rds/2014-10-31/paginators-1.json b/botocore/data/rds/2014-10-31/paginators-1.json index af1667ff..7c11906e 100644 --- a/botocore/data/rds/2014-10-31/paginators-1.json +++ b/botocore/data/rds/2014-10-31/paginators-1.json @@ -210,6 +210,12 @@ "limit_key": "MaxRecords", "output_token": "Marker", "result_key": "ExportTasks" + }, + "DescribeDBProxyEndpoints": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "DBProxyEndpoints" } } } diff --git a/botocore/data/rds/2014-10-31/service-2.json b/botocore/data/rds/2014-10-31/service-2.json index b5cfb72c..0e070d04 100644 --- a/botocore/data/rds/2014-10-31/service-2.json +++ b/botocore/data/rds/2014-10-31/service-2.json @@ -454,6 +454,26 @@ ], "documentation":"

Creates a new DB proxy.

" }, + "CreateDBProxyEndpoint":{ + "name":"CreateDBProxyEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBProxyEndpointRequest"}, + "output":{ + "shape":"CreateDBProxyEndpointResponse", + "resultWrapper":"CreateDBProxyEndpointResult" + }, + "errors":[ + {"shape":"InvalidSubnet"}, + {"shape":"DBProxyNotFoundFault"}, + {"shape":"DBProxyEndpointAlreadyExistsFault"}, + {"shape":"DBProxyEndpointQuotaExceededFault"}, + {"shape":"InvalidDBProxyStateFault"} + ], + "documentation":"

Creates a DBProxyEndpoint. Only applies to proxies that are associated with Aurora DB clusters. You can use DB proxy endpoints to specify read/write or read-only access to the DB cluster. You can also use DB proxy endpoints to access a DB proxy through a different VPC than the proxy's default VPC.

" + }, "CreateDBSecurityGroup":{ "name":"CreateDBSecurityGroup", "http":{ @@ -720,7 +740,24 @@ {"shape":"DBProxyNotFoundFault"}, {"shape":"InvalidDBProxyStateFault"} ], - "documentation":"

Deletes an existing proxy.

" + "documentation":"

Deletes an existing DB proxy.

" + }, + "DeleteDBProxyEndpoint":{ + "name":"DeleteDBProxyEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBProxyEndpointRequest"}, + "output":{ + "shape":"DeleteDBProxyEndpointResponse", + "resultWrapper":"DeleteDBProxyEndpointResult" + }, + "errors":[ + {"shape":"DBProxyEndpointNotFoundFault"}, + {"shape":"InvalidDBProxyEndpointStateFault"} + ], + "documentation":"

Deletes a DBProxyEndpoint. Doing so removes the ability to access the DB proxy using the endpoint that you defined. The endpoint that you delete might have provided capabilities such as read/write or read-only operations, or using a different VPC than the DB proxy's default VPC.

" }, "DeleteDBSecurityGroup":{ "name":"DeleteDBSecurityGroup", @@ -1115,6 +1152,23 @@ ], "documentation":"

Returns information about DB proxies.

" }, + "DescribeDBProxyEndpoints":{ + "name":"DescribeDBProxyEndpoints", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBProxyEndpointsRequest"}, + "output":{ + "shape":"DescribeDBProxyEndpointsResponse", + "resultWrapper":"DescribeDBProxyEndpointsResult" + }, + "errors":[ + {"shape":"DBProxyNotFoundFault"}, + {"shape":"DBProxyEndpointNotFoundFault"} + ], + "documentation":"

Returns information about DB proxy endpoints.

" + }, "DescribeDBProxyTargetGroups":{ "name":"DescribeDBProxyTargetGroups", "http":{ @@ -1487,6 +1541,25 @@ ], "documentation":"

Forces a failover for a DB cluster.

A failover for a DB cluster promotes one of the Aurora Replicas (read-only instances) in the DB cluster to be the primary instance (the cluster writer).

Amazon Aurora will automatically fail over to an Aurora Replica, if one exists, when the primary instance fails. You can force a failover when you want to simulate a failure of a primary instance for testing. Because each instance in a DB cluster has its own endpoint address, you will need to clean up and re-establish any existing connections that use those endpoint addresses when the failover is complete.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

This action only applies to Aurora DB clusters.

" }, + "FailoverGlobalCluster":{ + "name":"FailoverGlobalCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"FailoverGlobalClusterMessage"}, + "output":{ + "shape":"FailoverGlobalClusterResult", + "resultWrapper":"FailoverGlobalClusterResult" + }, + "errors":[ + {"shape":"GlobalClusterNotFoundFault"}, + {"shape":"InvalidGlobalClusterStateFault"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"DBClusterNotFoundFault"} + ], + "documentation":"

Initiates the failover process for an Aurora global database (GlobalCluster).

A failover for an Aurora global database promotes one of secondary read-only DB clusters to be the primary DB cluster and demotes the primary DB cluster to being a secondary (read-only) DB cluster. In other words, the role of the current primary DB cluster and the selected (target) DB cluster are switched. The selected secondary DB cluster assumes full read/write capabilities for the Aurora global database.

For more information about failing over an Amazon Aurora global database, see Managed planned failover for Amazon Aurora global databases in the Amazon Aurora User Guide.

This action applies to GlobalCluster (Aurora global databases) only. Use this action only on healthy Aurora global databases with running Aurora DB clusters and no Region-wide outages, to test disaster recovery scenarios or to reconfigure your Aurora global database topology.

" + }, "ImportInstallationMedia":{ "name":"ImportInstallationMedia", "http":{ @@ -1709,6 +1782,25 @@ ], "documentation":"

Changes the settings for an existing DB proxy.

" }, + "ModifyDBProxyEndpoint":{ + "name":"ModifyDBProxyEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBProxyEndpointRequest"}, + "output":{ + "shape":"ModifyDBProxyEndpointResponse", + "resultWrapper":"ModifyDBProxyEndpointResult" + }, + "errors":[ + {"shape":"DBProxyEndpointNotFoundFault"}, + {"shape":"DBProxyEndpointAlreadyExistsFault"}, + {"shape":"InvalidDBProxyEndpointStateFault"}, + {"shape":"InvalidDBProxyStateFault"} + ], + "documentation":"

Changes the settings for an existing DB proxy endpoint.

" + }, "ModifyDBProxyTargetGroup":{ "name":"ModifyDBProxyTargetGroup", "http":{ @@ -2751,6 +2843,12 @@ "locationName":"AvailableProcessorFeature" } }, + "AwsBackupRecoveryPointArn":{ + "type":"string", + "max":350, + "min":43, + "pattern":"^arn:aws[a-z-]*:backup:[-a-z0-9]+:[0-9]{12}:[-a-z]+:([a-z0-9\\-]+:)?[a-z][a-z0-9\\-]{0,255}$" + }, "BacktrackDBClusterMessage":{ "type":"structure", "required":[ @@ -3312,7 +3410,7 @@ }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", - "documentation":"

The list of log types that need to be enabled for exporting to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

Aurora MySQL

Possible values are audit, error, general, and slowquery.

Aurora PostgreSQL

Possible values are postgresql and upgrade.

" + "documentation":"

The list of log types that need to be enabled for exporting to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

Aurora MySQL

Possible values are audit, error, general, and slowquery.

Aurora PostgreSQL

Possible value is postgresql.

" }, "EngineMode":{ "shape":"String", @@ -3348,7 +3446,7 @@ }, "EnableGlobalWriteForwarding":{ "shape":"BooleanOptional", - "documentation":"

A value that indicates whether to enable write operations to be forwarded from this cluster to the primary cluster in an Aurora global database. The resulting changes are replicated back to this cluster. This parameter only applies to DB clusters that are secondary clusters in an Aurora global database. By default, Aurora disallows write operations for secondary clusters.

" + "documentation":"

A value that indicates whether to enable this DB cluster to forward write operations to the primary cluster of an Aurora global database (GlobalCluster). By default, write operations are not allowed on Aurora DB clusters that are secondary clusters in an Aurora global database.

You can set this value only on Aurora DB clusters that are members of an Aurora global database. With this parameter enabled, a secondary cluster can forward writes to the current primary cluster and the resulting changes are replicated back to this cluster. For the primary DB cluster of an Aurora global database, this value is used immediately if the primary is demoted by the FailoverGlobalCluster API operation, but it does nothing until then.

" } }, "documentation":"

" @@ -3802,6 +3900,46 @@ "DBParameterGroup":{"shape":"DBParameterGroup"} } }, + "CreateDBProxyEndpointRequest":{ + "type":"structure", + "required":[ + "DBProxyName", + "DBProxyEndpointName", + "VpcSubnetIds" + ], + "members":{ + "DBProxyName":{ + "shape":"DBProxyName", + "documentation":"

The name of the DB proxy associated with the DB proxy endpoint that you create.

" + }, + "DBProxyEndpointName":{ + "shape":"DBProxyEndpointName", + "documentation":"

The name of the DB proxy endpoint to create.

" + }, + "VpcSubnetIds":{ + "shape":"StringList", + "documentation":"

The VPC subnet IDs for the DB proxy endpoint that you create. You can specify a different set of subnet IDs than for the original DB proxy.

" + }, + "VpcSecurityGroupIds":{ + "shape":"StringList", + "documentation":"

The VPC security group IDs for the DB proxy endpoint that you create. You can specify a different set of security group IDs than for the original DB proxy. The default is the default security group for the VPC.

" + }, + "TargetRole":{ + "shape":"DBProxyEndpointTargetRole", + "documentation":"

A value that indicates whether the DB proxy endpoint can be used for read/write or read-only operations. The default is READ_WRITE.

" + }, + "Tags":{"shape":"TagList"} + } + }, + "CreateDBProxyEndpointResponse":{ + "type":"structure", + "members":{ + "DBProxyEndpoint":{ + "shape":"DBProxyEndpoint", + "documentation":"

The DBProxyEndpoint object that is created by the API operation. The DB proxy endpoint that you create might provide capabilities such as read/write or read-only operations, or using a different VPC than the proxy's default VPC.

" + } + } + }, "CreateDBProxyRequest":{ "type":"structure", "required":[ @@ -4573,6 +4711,12 @@ }, "exception":true }, + "DBClusterIdentifier":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[A-Za-z][0-9A-Za-z-:._]*" + }, "DBClusterList":{ "type":"list", "member":{ @@ -4831,7 +4975,11 @@ }, "Engine":{ "shape":"String", - "documentation":"

Specifies the name of the database engine.

" + "documentation":"

Specifies the name of the database engine for this DB cluster snapshot.

" + }, + "EngineMode":{ + "shape":"String", + "documentation":"

Provides the engine mode of the database engine for this DB cluster snapshot.

" }, "AllocatedStorage":{ "shape":"Integer", @@ -4855,7 +5003,7 @@ }, "MasterUsername":{ "shape":"String", - "documentation":"

Provides the master username for the DB cluster snapshot.

" + "documentation":"

Provides the master username for this DB cluster snapshot.

" }, "EngineVersion":{ "shape":"String", @@ -5329,6 +5477,10 @@ "CustomerOwnedIpEnabled":{ "shape":"BooleanOptional", "documentation":"

Specifies whether a customer-owned IP address (CoIP) is enabled for an RDS on Outposts DB instance.

A CoIP provides local or external connectivity to resources in your Outpost subnets through your on-premises network. For some use cases, a CoIP can provide lower latency for connections to the DB instance from outside of its virtual private cloud (VPC) on your local network.

For more information about RDS on Outposts, see Working with Amazon RDS on AWS Outposts in the Amazon RDS User Guide.

For more information about CoIPs, see Customer-owned IP addresses in the AWS Outposts User Guide.

" + }, + "AwsBackupRecoveryPointArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the recovery point in AWS Backup.

" } }, "documentation":"

Contains the details of an Amazon RDS DB instance.

This data type is used as a response element in the DescribeDBInstances action.

", @@ -5798,6 +5950,10 @@ "shape":"String", "documentation":"

The engine family applies to MySQL and PostgreSQL for both RDS and Aurora.

" }, + "VpcId":{ + "shape":"String", + "documentation":"

Provides the VPC ID of the DB proxy.

" + }, "VpcSecurityGroupIds":{ "shape":"StringList", "documentation":"

Provides a list of VPC security groups that the proxy belongs to.

" @@ -5816,7 +5972,7 @@ }, "Endpoint":{ "shape":"String", - "documentation":"

The endpoint that you can use to connect to the proxy. You include the endpoint value in the connection string for a database client application.

" + "documentation":"

The endpoint that you can use to connect to the DB proxy. You include the endpoint value in the connection string for a database client application.

" }, "RequireTLS":{ "shape":"Boolean", @@ -5847,16 +6003,136 @@ }, "documentation":"

The specified proxy name must be unique for all proxies owned by your AWS account in the specified AWS Region.

", "error":{ - "code":"DBProxyTargetExistsFault", + "code":"DBProxyAlreadyExistsFault", "httpStatusCode":400, "senderFault":true }, "exception":true }, + "DBProxyEndpoint":{ + "type":"structure", + "members":{ + "DBProxyEndpointName":{ + "shape":"String", + "documentation":"

The name for the DB proxy endpoint. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it can't end with a hyphen or contain two consecutive hyphens.

" + }, + "DBProxyEndpointArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) for the DB proxy endpoint.

" + }, + "DBProxyName":{ + "shape":"String", + "documentation":"

The identifier for the DB proxy that is associated with this DB proxy endpoint.

" + }, + "Status":{ + "shape":"DBProxyEndpointStatus", + "documentation":"

The current status of this DB proxy endpoint. A status of available means the endpoint is ready to handle requests. Other values indicate that you must wait for the endpoint to be ready, or take some action to resolve an issue.

" + }, + "VpcId":{ + "shape":"String", + "documentation":"

Provides the VPC ID of the DB proxy endpoint.

" + }, + "VpcSecurityGroupIds":{ + "shape":"StringList", + "documentation":"

Provides a list of VPC security groups that the DB proxy endpoint belongs to.

" + }, + "VpcSubnetIds":{ + "shape":"StringList", + "documentation":"

The EC2 subnet IDs for the DB proxy endpoint.

" + }, + "Endpoint":{ + "shape":"String", + "documentation":"

The endpoint that you can use to connect to the DB proxy. You include the endpoint value in the connection string for a database client application.

" + }, + "CreatedDate":{ + "shape":"TStamp", + "documentation":"

The date and time when the DB proxy endpoint was first created.

" + }, + "TargetRole":{ + "shape":"DBProxyEndpointTargetRole", + "documentation":"

A value that indicates whether the DB proxy endpoint can be used for read/write or read-only operations.

" + }, + "IsDefault":{ + "shape":"Boolean", + "documentation":"

A value that indicates whether this endpoint is the default endpoint for the associated DB proxy. Default DB proxy endpoints always have read/write capability. Other endpoints that you associate with the DB proxy can be either read/write or read-only.

" + } + }, + "documentation":"

The data structure representing an endpoint associated with a DB proxy. RDS automatically creates one endpoint for each DB proxy. For Aurora DB clusters, you can associate additional endpoints with the same DB proxy. These endpoints can be read/write or read-only. They can also reside in different VPCs than the associated DB proxy.

This data type is used as a response element in the DescribeDBProxyEndpoints operation.

" + }, + "DBProxyEndpointAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The specified DB proxy endpoint name must be unique for all DB proxy endpoints owned by your AWS account in the specified AWS Region.

", + "error":{ + "code":"DBProxyEndpointAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBProxyEndpointList":{ + "type":"list", + "member":{"shape":"DBProxyEndpoint"} + }, + "DBProxyEndpointName":{ + "type":"string", + "max":63, + "min":1, + "pattern":"[a-zA-Z][a-zA-Z0-9]*(-[a-zA-Z0-9]+)*" + }, + "DBProxyEndpointNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The DB proxy endpoint doesn't exist.

", + "error":{ + "code":"DBProxyEndpointNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBProxyEndpointQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The DB proxy already has the maximum number of endpoints.

", + "error":{ + "code":"DBProxyEndpointQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBProxyEndpointStatus":{ + "type":"string", + "enum":[ + "available", + "modifying", + "incompatible-network", + "insufficient-resource-limits", + "creating", + "deleting" + ] + }, + "DBProxyEndpointTargetRole":{ + "type":"string", + "enum":[ + "READ_WRITE", + "READ_ONLY" + ] + }, "DBProxyList":{ "type":"list", "member":{"shape":"DBProxy"} }, + "DBProxyName":{ + "type":"string", + "max":63, + "min":1, + "pattern":"[a-zA-Z][a-zA-Z0-9]*(-[a-zA-Z0-9]+)*" + }, "DBProxyNotFoundFault":{ "type":"structure", "members":{ @@ -5922,6 +6198,10 @@ "shape":"TargetType", "documentation":"

Specifies the kind of database, such as an RDS DB instance or an Aurora DB cluster, that the target represents.

" }, + "Role":{ + "shape":"TargetRole", + "documentation":"

A value that indicates whether the target of the proxy can be used for read/write or read-only operations.

" + }, "TargetHealth":{ "shape":"TargetHealth", "documentation":"

Information about the connection health of the RDS Proxy target.

" @@ -6612,6 +6892,25 @@ }, "documentation":"

" }, + "DeleteDBProxyEndpointRequest":{ + "type":"structure", + "required":["DBProxyEndpointName"], + "members":{ + "DBProxyEndpointName":{ + "shape":"DBProxyEndpointName", + "documentation":"

The name of the DB proxy endpoint to delete.

" + } + } + }, + "DeleteDBProxyEndpointResponse":{ + "type":"structure", + "members":{ + "DBProxyEndpoint":{ + "shape":"DBProxyEndpoint", + "documentation":"

The data structure representing the details of the DB proxy endpoint that you delete.

" + } + } + }, "DeleteDBProxyRequest":{ "type":"structure", "required":["DBProxyName"], @@ -7206,7 +7505,7 @@ "members":{ "DBProxyName":{ "shape":"String", - "documentation":"

The name of the DB proxy.

" + "documentation":"

The name of the DB proxy. If you omit this parameter, the output includes information about all DB proxies owned by your AWS account ID.

" }, "Filters":{ "shape":"FilterList", @@ -7235,6 +7534,44 @@ } } }, + "DescribeDBProxyEndpointsRequest":{ + "type":"structure", + "members":{ + "DBProxyName":{ + "shape":"DBProxyName", + "documentation":"

The name of the DB proxy whose endpoints you want to describe. If you omit this parameter, the output includes information about all DB proxy endpoints associated with all your DB proxies.

" + }, + "DBProxyEndpointName":{ + "shape":"DBProxyEndpointName", + "documentation":"

The name of a DB proxy endpoint to describe. If you omit this parameter, the output includes information about all DB proxy endpoints associated with the specified proxy.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

This parameter is not currently supported.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + }, + "MaxRecords":{ + "shape":"MaxRecords", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

" + } + } + }, + "DescribeDBProxyEndpointsResponse":{ + "type":"structure", + "members":{ + "DBProxyEndpoints":{ + "shape":"DBProxyEndpointList", + "documentation":"

The list of ProxyEndpoint objects returned by the API operation.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + } + } + }, "DescribeDBProxyTargetGroupsRequest":{ "type":"structure", "required":["DBProxyName"], @@ -8359,6 +8696,56 @@ "DBCluster":{"shape":"DBCluster"} } }, + "FailoverGlobalClusterMessage":{ + "type":"structure", + "required":[ + "GlobalClusterIdentifier", + "TargetDbClusterIdentifier" + ], + "members":{ + "GlobalClusterIdentifier":{ + "shape":"GlobalClusterIdentifier", + "documentation":"

Identifier of the Aurora global database (GlobalCluster) that should be failed over. The identifier is the unique key assigned by the user when the Aurora global database was created. In other words, it's the name of the Aurora global database that you want to fail over.

Constraints:

" + }, + "TargetDbClusterIdentifier":{ + "shape":"DBClusterIdentifier", + "documentation":"

Identifier of the secondary Aurora DB cluster that you want to promote to primary for the Aurora global database (GlobalCluster.) Use the Amazon Resource Name (ARN) for the identifier so that Aurora can locate the cluster in its AWS Region.

" + } + } + }, + "FailoverGlobalClusterResult":{ + "type":"structure", + "members":{ + "GlobalCluster":{"shape":"GlobalCluster"} + } + }, + "FailoverState":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"FailoverStatus", + "documentation":"

The current status of the Aurora global database (GlobalCluster). Possible values are as follows:

" + }, + "FromDbClusterArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the Aurora DB cluster that is currently being demoted, and which is associated with this state.

" + }, + "ToDbClusterArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the Aurora DB cluster that is currently being promoted, and which is associated with this state.

" + } + }, + "documentation":"

Contains the state of scheduled or in-process failover operations on an Aurora global database (GlobalCluster). This Data type is empty unless a failover operation is scheduled or is currently underway on the Aurora global database.

", + "wrapper":true + }, + "FailoverStatus":{ + "type":"string", + "enum":[ + "pending", + "failing-over", + "cancelling" + ] + }, "FeatureNameList":{ "type":"list", "member":{"shape":"String"} @@ -8437,6 +8824,10 @@ "GlobalClusterMembers":{ "shape":"GlobalClusterMemberList", "documentation":"

The list of cluster IDs for secondary clusters within the global database cluster. Currently limited to 1 item.

" + }, + "FailoverState":{ + "shape":"FailoverState", + "documentation":"

A data object containing all properties for the current state of an in-process or pending failover process for this Aurora global database. This object is empty unless the FailoverGlobalCluster API operation has been called on this Aurora global database (GlobalCluster).

" } }, "documentation":"

A data type representing an Aurora global database.

", @@ -8454,6 +8845,12 @@ }, "exception":true }, + "GlobalClusterIdentifier":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[A-Za-z][0-9A-Za-z-:._]*" + }, "GlobalClusterList":{ "type":"list", "member":{ @@ -8850,6 +9247,18 @@ }, "exception":true }, + "InvalidDBProxyEndpointStateFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

You can't perform this operation while the DB proxy endpoint is in a particular state.

", + "error":{ + "code":"InvalidDBProxyEndpointStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "InvalidDBProxyStateFault":{ "type":"structure", "members":{ @@ -9266,7 +9675,7 @@ }, "EnableGlobalWriteForwarding":{ "shape":"BooleanOptional", - "documentation":"

A value that indicates whether to enable write operations to be forwarded from this cluster to the primary cluster in an Aurora global database. The resulting changes are replicated back to this cluster. This parameter only applies to DB clusters that are secondary clusters in an Aurora global database. By default, Aurora disallows write operations for secondary clusters.

" + "documentation":"

A value that indicates whether to enable this DB cluster to forward write operations to the primary cluster of an Aurora global database (GlobalCluster). By default, write operations are not allowed on Aurora DB clusters that are secondary clusters in an Aurora global database.

You can set this value only on Aurora DB clusters that are members of an Aurora global database. With this parameter enabled, a secondary cluster can forward writes to the current primary cluster and the resulting changes are replicated back to this cluster. For the primary DB cluster of an Aurora global database, this value is used immediately if the primary is demoted by the FailoverGlobalCluster API operation, but it does nothing until then.

" } }, "documentation":"

" @@ -9506,6 +9915,10 @@ "EnableCustomerOwnedIp":{ "shape":"BooleanOptional", "documentation":"

A value that indicates whether to enable a customer-owned IP address (CoIP) for an RDS on Outposts DB instance.

A CoIP provides local or external connectivity to resources in your Outpost subnets through your on-premises network. For some use cases, a CoIP can provide lower latency for connections to the DB instance from outside of its virtual private cloud (VPC) on your local network.

For more information about RDS on Outposts, see Working with Amazon RDS on AWS Outposts in the Amazon RDS User Guide.

For more information about CoIPs, see Customer-owned IP addresses in the AWS Outposts User Guide.

" + }, + "AwsBackupRecoveryPointArn":{ + "shape":"AwsBackupRecoveryPointArn", + "documentation":"

The Amazon Resource Name (ARN) of the recovery point in AWS Backup.

" } }, "documentation":"

" @@ -9534,6 +9947,33 @@ }, "documentation":"

" }, + "ModifyDBProxyEndpointRequest":{ + "type":"structure", + "required":["DBProxyEndpointName"], + "members":{ + "DBProxyEndpointName":{ + "shape":"DBProxyEndpointName", + "documentation":"

The name of the DB proxy sociated with the DB proxy endpoint that you want to modify.

" + }, + "NewDBProxyEndpointName":{ + "shape":"DBProxyEndpointName", + "documentation":"

The new identifier for the DBProxyEndpoint. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it can't end with a hyphen or contain two consecutive hyphens.

" + }, + "VpcSecurityGroupIds":{ + "shape":"StringList", + "documentation":"

The VPC security group IDs for the DB proxy endpoint. When the DB proxy endpoint uses a different VPC than the original proxy, you also specify a different set of security group IDs than for the original proxy.

" + } + } + }, + "ModifyDBProxyEndpointResponse":{ + "type":"structure", + "members":{ + "DBProxyEndpoint":{ + "shape":"DBProxyEndpoint", + "documentation":"

The DBProxyEndpoint object representing the new settings for the DB proxy endpoint.

" + } + } + }, "ModifyDBProxyRequest":{ "type":"structure", "required":["DBProxyName"], @@ -12650,13 +13090,22 @@ "UNREACHABLE", "CONNECTION_FAILED", "AUTH_FAILURE", - "PENDING_PROXY_CAPACITY" + "PENDING_PROXY_CAPACITY", + "INVALID_REPLICATION_STATE" ] }, "TargetList":{ "type":"list", "member":{"shape":"DBProxyTarget"} }, + "TargetRole":{ + "type":"string", + "enum":[ + "READ_WRITE", + "READ_ONLY", + "UNKNOWN" + ] + }, "TargetState":{ "type":"string", "enum":[ @@ -12704,7 +13153,19 @@ }, "IsMajorVersionUpgrade":{ "shape":"Boolean", - "documentation":"

A value that indicates whether a database engine is upgraded to a major version.

" + "documentation":"

A value that indicates whether upgrading to the target version requires upgrading the major version of the database engine.

" + }, + "SupportedEngineModes":{ + "shape":"EngineModeList", + "documentation":"

A list of the supported DB engine modes for the target engine version.

" + }, + "SupportsParallelQuery":{ + "shape":"BooleanOptional", + "documentation":"

A value that indicates whether you can use Aurora parallel query with the target engine version.

" + }, + "SupportsGlobalDatabases":{ + "shape":"BooleanOptional", + "documentation":"

A value that indicates whether you can use Aurora global databases with the target engine version.

" } }, "documentation":"

The version of the database engine that a DB instance can be upgraded to.

" diff --git a/botocore/data/redshift-data/2019-12-20/service-2.json b/botocore/data/redshift-data/2019-12-20/service-2.json index c782792f..7ca24483 100644 --- a/botocore/data/redshift-data/2019-12-20/service-2.json +++ b/botocore/data/redshift-data/2019-12-20/service-2.json @@ -66,7 +66,8 @@ "output":{"shape":"ExecuteStatementOutput"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"ExecuteStatementException"} + {"shape":"ExecuteStatementException"}, + {"shape":"ActiveStatementsExceededException"} ], "documentation":"

Runs an SQL statement, which can be data manipulation language (DML) or data definition language (DDL). This statement must be a single SQL statement. Depending on the authorization method, use one of the following combinations of request parameters:

" }, @@ -143,6 +144,14 @@ } }, "shapes":{ + "ActiveStatementsExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The number of active statements exceeds the limit.

", + "exception":true + }, "Blob":{"type":"blob"}, "Boolean":{ "type":"boolean", @@ -287,6 +296,10 @@ "shape":"String", "documentation":"

The error message from the cluster if the SQL statement encountered an error while running.

" }, + "HasResultSet":{ + "shape":"Boolean", + "documentation":"

A value that indicates whether the statement has a result set. The result set can be empty.

" + }, "Id":{ "shape":"UUID", "documentation":"

The identifier of the SQL statement described. This value is a universally unique identifier (UUID) generated by Amazon Redshift Data API.

" @@ -327,15 +340,22 @@ }, "DescribeTableRequest":{ "type":"structure", - "required":["ClusterIdentifier"], + "required":[ + "ClusterIdentifier", + "Database" + ], "members":{ "ClusterIdentifier":{ "shape":"Location", "documentation":"

The cluster identifier. This parameter is required when authenticating using either AWS Secrets Manager or temporary credentials.

" }, + "ConnectedDatabase":{ + "shape":"String", + "documentation":"

A database name. The connected database is specified when you connect with your authentication credentials.

" + }, "Database":{ "shape":"String", - "documentation":"

The name of the database. This parameter is required when authenticating using temporary credentials.

" + "documentation":"

The name of the database that contains the tables to be described. If ConnectedDatabase is not specified, this is also the database to connect to with your authentication credentials.

" }, "DbUser":{ "shape":"String", @@ -494,7 +514,8 @@ "documentation":"

A value of the string data type.

" } }, - "documentation":"

A data value in a column.

" + "documentation":"

A data value in a column.

", + "union":true }, "FieldList":{ "type":"list", @@ -604,9 +625,13 @@ "shape":"Location", "documentation":"

The cluster identifier. This parameter is required when authenticating using either AWS Secrets Manager or temporary credentials.

" }, + "ConnectedDatabase":{ + "shape":"String", + "documentation":"

A database name. The connected database is specified when you connect with your authentication credentials.

" + }, "Database":{ "shape":"String", - "documentation":"

The name of the database. This parameter is required when authenticating using temporary credentials.

" + "documentation":"

The name of the database that contains the schemas to list. If ConnectedDatabase is not specified, this is also the database to connect to with your authentication credentials.

" }, "DbUser":{ "shape":"String", @@ -659,6 +684,10 @@ "shape":"String", "documentation":"

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned NextToken value in the next NextToken parameter and retrying the command. If the NextToken field is empty, all response records have been retrieved for the request.

" }, + "RoleLevel":{ + "shape":"Boolean", + "documentation":"

A value that filters which statements to return in the response. If true, all statements run by the caller's IAM role are returned. If false, only statements run by the caller's IAM role in the current IAM session are returned. The default is true.

" + }, "StatementName":{ "shape":"StatementNameString", "documentation":"

The name of the SQL statement specified as input to ExecuteStatement to identify the query. You can list multiple statements by providing a prefix that matches the beginning of the statement name. For example, to list myStatement1, myStatement2, myStatement3, and so on, then provide the a value of myStatement. Data API does a case-sensitive match of SQL statement names to the prefix value you provide.

" @@ -694,9 +723,13 @@ "shape":"Location", "documentation":"

The cluster identifier. This parameter is required when authenticating using either AWS Secrets Manager or temporary credentials.

" }, + "ConnectedDatabase":{ + "shape":"String", + "documentation":"

A database name. The connected database is specified when you connect with your authentication credentials.

" + }, "Database":{ "shape":"String", - "documentation":"

The name of the database. This parameter is required when authenticating using temporary credentials.

" + "documentation":"

The name of the database that contains the tables to list. If ConnectedDatabase is not specified, this is also the database to connect to with your authentication credentials.

" }, "DbUser":{ "shape":"String", @@ -739,7 +772,11 @@ }, "Location":{"type":"string"}, "Long":{"type":"long"}, - "PageSize":{"type":"integer"}, + "PageSize":{ + "type":"integer", + "max":1000, + "min":0 + }, "ResourceNotFoundException":{ "type":"structure", "required":[ @@ -816,13 +853,13 @@ "StatusString":{ "type":"string", "enum":[ - "ABORTED", - "ALL", - "FAILED", - "FINISHED", + "SUBMITTED", "PICKED", "STARTED", - "SUBMITTED" + "FINISHED", + "ABORTED", + "FAILED", + "ALL" ] }, "String":{"type":"string"}, @@ -863,5 +900,5 @@ }, "bool":{"type":"boolean"} }, - "documentation":"

You can use the Amazon Redshift Data API to run queries on Amazon Redshift tables. You can run individual SQL statements, which are committed if the statement succeeds.

" + "documentation":"

You can use the Amazon Redshift Data API to run queries on Amazon Redshift tables. You can run individual SQL statements, which are committed if the statement succeeds.

For more information about the Amazon Redshift Data API, see Using the Amazon Redshift Data API in the Amazon Redshift Cluster Management Guide.

" } diff --git a/botocore/data/redshift/2012-12-01/service-2.json b/botocore/data/redshift/2012-12-01/service-2.json index 4ee9baaa..52059f5a 100644 --- a/botocore/data/redshift/2012-12-01/service-2.json +++ b/botocore/data/redshift/2012-12-01/service-2.json @@ -2231,6 +2231,10 @@ "ClusterNamespaceArn":{ "shape":"String", "documentation":"

The namespace Amazon Resource Name (ARN) of the cluster.

" + }, + "TotalStorageCapacityInMegaBytes":{ + "shape":"LongOptional", + "documentation":"

The total storage capacity of the cluster in megabytes.

" } }, "documentation":"

Describes a cluster.

", @@ -2982,7 +2986,7 @@ }, "AutomatedSnapshotRetentionPeriod":{ "shape":"IntegerOptional", - "documentation":"

The number of days that automated snapshots are retained. If the value is 0, automated snapshots are disabled. Even if automated snapshots are disabled, you can still create manual snapshots when you want with CreateClusterSnapshot.

Default: 1

Constraints: Must be a value from 0 to 35.

" + "documentation":"

The number of days that automated snapshots are retained. If the value is 0, automated snapshots are disabled. Even if automated snapshots are disabled, you can still create manual snapshots when you want with CreateClusterSnapshot.

You can't disable automated snapshots for RA3 node types. Set the automated retention period from 1-35 days.

Default: 1

Constraints: Must be a value from 0 to 35.

" }, "ManualSnapshotRetentionPeriod":{ "shape":"IntegerOptional", @@ -5653,7 +5657,7 @@ }, "AutomatedSnapshotRetentionPeriod":{ "shape":"IntegerOptional", - "documentation":"

The number of days that automated snapshots are retained. If the value is 0, automated snapshots are disabled. Even if automated snapshots are disabled, you can still create manual snapshots when you want with CreateClusterSnapshot.

If you decrease the automated snapshot retention period from its current value, existing automated snapshots that fall outside of the new retention period will be immediately deleted.

Default: Uses existing setting.

Constraints: Must be a value from 0 to 35.

" + "documentation":"

The number of days that automated snapshots are retained. If the value is 0, automated snapshots are disabled. Even if automated snapshots are disabled, you can still create manual snapshots when you want with CreateClusterSnapshot.

If you decrease the automated snapshot retention period from its current value, existing automated snapshots that fall outside of the new retention period will be immediately deleted.

You can't disable automated snapshots for RA3 node types. Set the automated retention period from 1-35 days.

Default: Uses existing setting.

Constraints: Must be a value from 0 to 35.

" }, "ManualSnapshotRetentionPeriod":{ "shape":"IntegerOptional", @@ -5958,6 +5962,35 @@ } } }, + "NetworkInterface":{ + "type":"structure", + "members":{ + "NetworkInterfaceId":{ + "shape":"String", + "documentation":"

The network interface identifier.

" + }, + "SubnetId":{ + "shape":"String", + "documentation":"

The subnet identifier.

" + }, + "PrivateIpAddress":{ + "shape":"String", + "documentation":"

The IPv4 address of the network interface within the subnet.

" + }, + "AvailabilityZone":{ + "shape":"String", + "documentation":"

The Availability Zone.

" + } + }, + "documentation":"

Describes a network interface.

" + }, + "NetworkInterfaceList":{ + "type":"list", + "member":{ + "shape":"NetworkInterface", + "locationName":"NetworkInterface" + } + }, "NodeConfigurationOption":{ "type":"structure", "members":{ @@ -6124,7 +6157,7 @@ }, "ParameterValue":{ "shape":"String", - "documentation":"

The value of the parameter.

" + "documentation":"

The value of the parameter. If ParameterName is wlm_json_configuration, then the maximum size of ParameterValue is 8000 characters.

" }, "Description":{ "shape":"String", @@ -6760,7 +6793,7 @@ }, "AutomatedSnapshotRetentionPeriod":{ "shape":"IntegerOptional", - "documentation":"

The number of days that automated snapshots are retained. If the value is 0, automated snapshots are disabled. Even if automated snapshots are disabled, you can still create manual snapshots when you want with CreateClusterSnapshot.

Default: The value selected for the cluster from which the snapshot was taken.

Constraints: Must be a value from 0 to 35.

" + "documentation":"

The number of days that automated snapshots are retained. If the value is 0, automated snapshots are disabled. Even if automated snapshots are disabled, you can still create manual snapshots when you want with CreateClusterSnapshot.

You can't disable automated snapshots for RA3 node types. Set the automated retention period from 1-35 days.

Default: The value selected for the cluster from which the snapshot was taken.

Constraints: Must be a value from 0 to 35.

" }, "ManualSnapshotRetentionPeriod":{ "shape":"IntegerOptional", @@ -8280,6 +8313,14 @@ "VpcEndpointId":{ "shape":"String", "documentation":"

The connection endpoint ID for connecting an Amazon Redshift cluster through the proxy.

" + }, + "VpcId":{ + "shape":"String", + "documentation":"

The VPC identifier that the endpoint is associated.

" + }, + "NetworkInterfaces":{ + "shape":"NetworkInterfaceList", + "documentation":"

One or more network interfaces of the endpoint. Also known as an interface endpoint.

" } }, "documentation":"

The connection endpoint for connecting an Amazon Redshift cluster through the proxy.

" diff --git a/botocore/data/s3/2006-03-01/service-2.json b/botocore/data/s3/2006-03-01/service-2.json index c9db84ba..92367b69 100644 --- a/botocore/data/s3/2006-03-01/service-2.json +++ b/botocore/data/s3/2006-03-01/service-2.json @@ -26,7 +26,7 @@ {"shape":"NoSuchUpload"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadAbort.html", - "documentation":"

This operation aborts a multipart upload. After a multipart upload is aborted, no additional parts can be uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by all parts.

To verify that all parts have been removed, so you don't get charged for the part storage, you should call the ListParts operation and ensure that the parts list is empty.

For information about permissions required to use the multipart upload API, see Multipart Upload API and Permissions.

The following operations are related to AbortMultipartUpload:

" + "documentation":"

This action aborts a multipart upload. After a multipart upload is aborted, no additional parts can be uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by all parts.

To verify that all parts have been removed, so you don't get charged for the part storage, you should call the ListParts action and ensure that the parts list is empty.

For information about permissions required to use the multipart upload, see Multipart Upload and Permissions.

The following operations are related to AbortMultipartUpload:

" }, "CompleteMultipartUpload":{ "name":"CompleteMultipartUpload", @@ -37,7 +37,7 @@ "input":{"shape":"CompleteMultipartUploadRequest"}, "output":{"shape":"CompleteMultipartUploadOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadComplete.html", - "documentation":"

Completes a multipart upload by assembling previously uploaded parts.

You first initiate the multipart upload and then upload all parts using the UploadPart operation. After successfully uploading all relevant parts of an upload, you call this operation to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new object. In the Complete Multipart Upload request, you must provide the parts list. You must ensure that the parts list is complete. This operation concatenates the parts that you provide in the list. For each part in the list, you must provide the part number and the ETag value, returned after that part was uploaded.

Processing of a Complete Multipart Upload request could take several minutes to complete. After Amazon S3 begins processing the request, it sends an HTTP response header that specifies a 200 OK response. While processing is in progress, Amazon S3 periodically sends white space characters to keep the connection from timing out. Because a request could fail after the initial 200 OK response has been sent, it is important that you check the response body to determine whether the request succeeded.

Note that if CompleteMultipartUpload fails, applications should be prepared to retry the failed requests. For more information, see Amazon S3 Error Best Practices.

For more information about multipart uploads, see Uploading Objects Using Multipart Upload.

For information about permissions required to use the multipart upload API, see Multipart Upload API and Permissions.

CompleteMultipartUpload has the following special errors:

The following operations are related to CompleteMultipartUpload:

" + "documentation":"

Completes a multipart upload by assembling previously uploaded parts.

You first initiate the multipart upload and then upload all parts using the UploadPart operation. After successfully uploading all relevant parts of an upload, you call this action to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new object. In the Complete Multipart Upload request, you must provide the parts list. You must ensure that the parts list is complete. This action concatenates the parts that you provide in the list. For each part in the list, you must provide the part number and the ETag value, returned after that part was uploaded.

Processing of a Complete Multipart Upload request could take several minutes to complete. After Amazon S3 begins processing the request, it sends an HTTP response header that specifies a 200 OK response. While processing is in progress, Amazon S3 periodically sends white space characters to keep the connection from timing out. Because a request could fail after the initial 200 OK response has been sent, it is important that you check the response body to determine whether the request succeeded.

Note that if CompleteMultipartUpload fails, applications should be prepared to retry the failed requests. For more information, see Amazon S3 Error Best Practices.

For more information about multipart uploads, see Uploading Objects Using Multipart Upload.

For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions.

CompleteMultipartUpload has the following special errors:

The following operations are related to CompleteMultipartUpload:

" }, "CopyObject":{ "name":"CopyObject", @@ -51,7 +51,7 @@ {"shape":"ObjectNotInActiveTierError"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectCOPY.html", - "documentation":"

Creates a copy of an object that is already stored in Amazon S3.

You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic operation using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy API. For more information, see Copy Object Using the REST Multipart Upload API.

All copy requests must be authenticated. Additionally, you must have read access to the source object and write access to the destination bucket. For more information, see REST Authentication. Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account.

A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. If the error occurs before the copy operation starts, you receive a standard Amazon S3 error. If the error occurs during the copy operation, the error response is embedded in the 200 OK response. This means that a 200 OK response can contain either a success or an error. Design your application to parse the contents of the response and handle it appropriately.

If the copy is successful, you receive a response with information about the copied object.

If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not, it would not contain the content-length, and you would need to read the entire body.

The copy request charge is based on the storage class and Region that you specify for the destination object. For pricing information, see Amazon S3 pricing.

Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request error. For more information, see Transfer Acceleration.

Metadata

When copying an object, you can preserve all metadata (default) or specify new metadata. However, the ACL is not preserved and is set to private for the user making the request. To override the default ACL setting, specify a new ACL when generating a copy request. For more information, see Using ACLs.

To specify whether you want the object metadata copied from the source object or replaced with metadata provided in the request, you can optionally add the x-amz-metadata-directive header. When you grant permissions, you can use the s3:x-amz-metadata-directive condition key to enforce certain metadata behavior when objects are uploaded. For more information, see Specifying Conditions in a Policy in the Amazon S3 Developer Guide. For a complete list of Amazon S3-specific condition keys, see Actions, Resources, and Condition Keys for Amazon S3.

x-amz-copy-source-if Headers

To only copy an object under certain conditions, such as whether the Etag matches or whether the object was modified before or after a specified date, use the following request parameters:

If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since headers are present in the request and evaluate as follows, Amazon S3 returns 200 OK and copies the data:

If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since headers are present in the request and evaluate as follows, Amazon S3 returns the 412 Precondition Failed response code:

All headers with the x-amz- prefix, including x-amz-copy-source, must be signed.

Server-side encryption

When you perform a CopyObject operation, you can optionally use the appropriate encryption-related headers to encrypt the object using server-side encryption with AWS managed encryption keys (SSE-S3 or SSE-KMS) or a customer-provided encryption key. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts the data when you access it. For more information about server-side encryption, see Using Server-Side Encryption.

If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. For more information, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service Developer Guide.

Access Control List (ACL)-Specific Request Headers

When copying an object, you can optionally use headers to grant ACL-based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.

Storage Class Options

You can use the CopyObject operation to change the storage class of an object that is already stored in Amazon S3 using the StorageClass parameter. For more information, see Storage Classes in the Amazon S3 Service Developer Guide.

Versioning

By default, x-amz-copy-source identifies the current version of an object to copy. If the current version is a delete marker, Amazon S3 behaves as if the object was deleted. To copy a different version, use the versionId subresource.

If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for the object being copied. This version ID is different from the version ID of the source object. Amazon S3 returns the version ID of the copied object in the x-amz-version-id response header in the response.

If you do not enable versioning or suspend it on the target bucket, the version ID that Amazon S3 generates is always null.

If the source object's storage class is GLACIER, you must restore a copy of this object before you can use it as a source object for the copy operation. For more information, see RestoreObject.

The following operations are related to CopyObject:

For more information, see Copying Objects.

", + "documentation":"

Creates a copy of an object that is already stored in Amazon S3.

You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy API. For more information, see Copy Object Using the REST Multipart Upload API.

All copy requests must be authenticated. Additionally, you must have read access to the source object and write access to the destination bucket. For more information, see REST Authentication. Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account.

A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. If the error occurs before the copy action starts, you receive a standard Amazon S3 error. If the error occurs during the copy operation, the error response is embedded in the 200 OK response. This means that a 200 OK response can contain either a success or an error. Design your application to parse the contents of the response and handle it appropriately.

If the copy is successful, you receive a response with information about the copied object.

If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not, it would not contain the content-length, and you would need to read the entire body.

The copy request charge is based on the storage class and Region that you specify for the destination object. For pricing information, see Amazon S3 pricing.

Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request error. For more information, see Transfer Acceleration.

Metadata

When copying an object, you can preserve all metadata (default) or specify new metadata. However, the ACL is not preserved and is set to private for the user making the request. To override the default ACL setting, specify a new ACL when generating a copy request. For more information, see Using ACLs.

To specify whether you want the object metadata copied from the source object or replaced with metadata provided in the request, you can optionally add the x-amz-metadata-directive header. When you grant permissions, you can use the s3:x-amz-metadata-directive condition key to enforce certain metadata behavior when objects are uploaded. For more information, see Specifying Conditions in a Policy in the Amazon S3 Developer Guide. For a complete list of Amazon S3-specific condition keys, see Actions, Resources, and Condition Keys for Amazon S3.

x-amz-copy-source-if Headers

To only copy an object under certain conditions, such as whether the Etag matches or whether the object was modified before or after a specified date, use the following request parameters:

If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since headers are present in the request and evaluate as follows, Amazon S3 returns 200 OK and copies the data:

If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since headers are present in the request and evaluate as follows, Amazon S3 returns the 412 Precondition Failed response code:

All headers with the x-amz- prefix, including x-amz-copy-source, must be signed.

Server-side encryption

When you perform a CopyObject operation, you can optionally use the appropriate encryption-related headers to encrypt the object using server-side encryption with AWS managed encryption keys (SSE-S3 or SSE-KMS) or a customer-provided encryption key. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts the data when you access it. For more information about server-side encryption, see Using Server-Side Encryption.

If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. For more information, see Amazon S3 Bucket Keys in the Amazon S3 User Guide.

Access Control List (ACL)-Specific Request Headers

When copying an object, you can optionally use headers to grant ACL-based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.

Storage Class Options

You can use the CopyObject action to change the storage class of an object that is already stored in Amazon S3 using the StorageClass parameter. For more information, see Storage Classes in the Amazon S3 Service Developer Guide.

Versioning

By default, x-amz-copy-source identifies the current version of an object to copy. If the current version is a delete marker, Amazon S3 behaves as if the object was deleted. To copy a different version, use the versionId subresource.

If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for the object being copied. This version ID is different from the version ID of the source object. Amazon S3 returns the version ID of the copied object in the x-amz-version-id response header in the response.

If you do not enable versioning or suspend it on the target bucket, the version ID that Amazon S3 generates is always null.

If the source object's storage class is GLACIER, you must restore a copy of this object before you can use it as a source object for the copy operation. For more information, see RestoreObject.

The following operations are related to CopyObject:

For more information, see Copying Objects.

", "alias":"PutObjectCopy" }, "CreateBucket":{ @@ -79,7 +79,7 @@ "input":{"shape":"CreateMultipartUploadRequest"}, "output":{"shape":"CreateMultipartUploadOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadInitiate.html", - "documentation":"

This operation initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see UploadPart). You also include this upload ID in the final request to either complete or abort the multipart upload request.

For more information about multipart uploads, see Multipart Upload Overview.

If you have configured a lifecycle rule to abort incomplete multipart uploads, the upload must complete within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort operation and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy.

For information about the permissions required to use the multipart upload API, see Multipart Upload API and Permissions.

For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see Authenticating Requests (AWS Signature Version 4).

After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stop charging you for storing them only after you either complete or abort a multipart upload.

You can optionally request server-side encryption. For server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You can provide your own encryption key, or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or Amazon S3-managed encryption keys. If you choose to provide your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must match the headers you used in the request to initiate the upload by using CreateMultipartUpload.

To perform a multipart upload with encryption using an AWS KMS CMK, the requester must have permission to the kms:Encrypt, kms:Decrypt, kms:ReEncrypt*, kms:GenerateDataKey*, and kms:DescribeKey actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload.

If your AWS Identity and Access Management (IAM) user or role is in the same AWS account as the AWS KMS CMK, then you must have these permissions on the key policy. If your IAM user or role belongs to a different account than the key, then you must have the permissions on both the key policy and your IAM user or role.

For more information, see Protecting Data Using Server-Side Encryption.

Access Permissions

When copying an object, you can optionally specify the accounts or groups that should be granted specific permissions on the new object. There are two ways to grant the permissions using the request headers:

  • Specify a canned ACL with the x-amz-acl request header. For more information, see Canned ACL.

  • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

Server-Side- Encryption-Specific Request Headers

You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption. Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. The option you use depends on whether you want to use AWS managed encryption keys or provide your own encryption key.

  • Use encryption keys managed by Amazon S3 or customer master keys (CMKs) stored in AWS Key Management Service (AWS KMS) – If you want AWS to manage the keys used to encrypt data, specify the following headers in the request.

    • x-amz-server-side-encryption

    • x-amz-server-side-encryption-aws-kms-key-id

    • x-amz-server-side-encryption-context

    If you specify x-amz-server-side-encryption:aws:kms, but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS managed CMK in AWS KMS to protect the data.

    All GET and PUT requests for an object protected by AWS KMS fail if you don't make them with SSL or by using SigV4.

    For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.

  • Use customer-provided encryption keys – If you want to manage your own encryption keys, provide all the following headers in the request.

    • x-amz-server-side-encryption-customer-algorithm

    • x-amz-server-side-encryption-customer-key

    • x-amz-server-side-encryption-customer-key-MD5

    For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.

Access-Control-List (ACL)-Specific Request Headers

You also can use the following access control–related headers with this operation. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the access control list (ACL) on the object. For more information, see Using ACLs. With this operation, you can grant access permissions using one of the following two methods:

  • Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.

  • Specify access permissions explicitly — To explicitly grant access permissions to specific AWS accounts or groups, use the following headers. Each header maps to specific permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview. In the header, you specify a list of grantees who get the specific permission. To grant permissions explicitly, use:

    • x-amz-grant-read

    • x-amz-grant-write

    • x-amz-grant-read-acp

    • x-amz-grant-write-acp

    • x-amz-grant-full-control

    You specify each grantee as a type=value pair, where the type is one of the following:

    • id – if the value specified is the canonical user ID of an AWS account

    • uri – if you are granting permissions to a predefined group

    • emailAddress – if the value specified is the email address of an AWS account

      Using email addresses to specify a grantee is only supported in the following AWS Regions:

      • US East (N. Virginia)

      • US West (N. California)

      • US West (Oregon)

      • Asia Pacific (Singapore)

      • Asia Pacific (Sydney)

      • Asia Pacific (Tokyo)

      • Europe (Ireland)

      • South America (São Paulo)

      For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.

    For example, the following x-amz-grant-read header grants the AWS accounts identified by account IDs permissions to read object data and its metadata:

    x-amz-grant-read: id=\"11112222333\", id=\"444455556666\"

The following operations are related to CreateMultipartUpload:

", + "documentation":"

This action initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see UploadPart). You also include this upload ID in the final request to either complete or abort the multipart upload request.

For more information about multipart uploads, see Multipart Upload Overview.

If you have configured a lifecycle rule to abort incomplete multipart uploads, the upload must complete within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort action and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy.

For information about the permissions required to use the multipart upload API, see Multipart Upload and Permissions.

For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see Authenticating Requests (AWS Signature Version 4).

After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stop charging you for storing them only after you either complete or abort a multipart upload.

You can optionally request server-side encryption. For server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You can provide your own encryption key, or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or Amazon S3-managed encryption keys. If you choose to provide your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must match the headers you used in the request to initiate the upload by using CreateMultipartUpload.

To perform a multipart upload with encryption using an AWS KMS CMK, the requester must have permission to the kms:Encrypt, kms:Decrypt, kms:ReEncrypt*, kms:GenerateDataKey*, and kms:DescribeKey actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload.

If your AWS Identity and Access Management (IAM) user or role is in the same AWS account as the AWS KMS CMK, then you must have these permissions on the key policy. If your IAM user or role belongs to a different account than the key, then you must have the permissions on both the key policy and your IAM user or role.

For more information, see Protecting Data Using Server-Side Encryption.

Access Permissions

When copying an object, you can optionally specify the accounts or groups that should be granted specific permissions on the new object. There are two ways to grant the permissions using the request headers:

  • Specify a canned ACL with the x-amz-acl request header. For more information, see Canned ACL.

  • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

Server-Side- Encryption-Specific Request Headers

You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption. Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. The option you use depends on whether you want to use AWS managed encryption keys or provide your own encryption key.

  • Use encryption keys managed by Amazon S3 or customer master keys (CMKs) stored in AWS Key Management Service (AWS KMS) – If you want AWS to manage the keys used to encrypt data, specify the following headers in the request.

    • x-amz-server-side-encryption

    • x-amz-server-side-encryption-aws-kms-key-id

    • x-amz-server-side-encryption-context

    If you specify x-amz-server-side-encryption:aws:kms, but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS managed CMK in AWS KMS to protect the data.

    All GET and PUT requests for an object protected by AWS KMS fail if you don't make them with SSL or by using SigV4.

    For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.

  • Use customer-provided encryption keys – If you want to manage your own encryption keys, provide all the following headers in the request.

    • x-amz-server-side-encryption-customer-algorithm

    • x-amz-server-side-encryption-customer-key

    • x-amz-server-side-encryption-customer-key-MD5

    For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.

Access-Control-List (ACL)-Specific Request Headers

You also can use the following access control–related headers with this operation. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the access control list (ACL) on the object. For more information, see Using ACLs. With this operation, you can grant access permissions using one of the following two methods:

  • Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.

  • Specify access permissions explicitly — To explicitly grant access permissions to specific AWS accounts or groups, use the following headers. Each header maps to specific permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview. In the header, you specify a list of grantees who get the specific permission. To grant permissions explicitly, use:

    • x-amz-grant-read

    • x-amz-grant-write

    • x-amz-grant-read-acp

    • x-amz-grant-write-acp

    • x-amz-grant-full-control

    You specify each grantee as a type=value pair, where the type is one of the following:

    • id – if the value specified is the canonical user ID of an AWS account

    • uri – if you are granting permissions to a predefined group

    • emailAddress – if the value specified is the email address of an AWS account

      Using email addresses to specify a grantee is only supported in the following AWS Regions:

      • US East (N. Virginia)

      • US West (N. California)

      • US West (Oregon)

      • Asia Pacific (Singapore)

      • Asia Pacific (Sydney)

      • Asia Pacific (Tokyo)

      • Europe (Ireland)

      • South America (São Paulo)

      For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.

    For example, the following x-amz-grant-read header grants the AWS accounts identified by account IDs permissions to read object data and its metadata:

    x-amz-grant-read: id=\"11112222333\", id=\"444455556666\"

The following operations are related to CreateMultipartUpload:

", "alias":"InitiateMultipartUpload" }, "DeleteBucket":{ @@ -101,7 +101,7 @@ "responseCode":204 }, "input":{"shape":"DeleteBucketAnalyticsConfigurationRequest"}, - "documentation":"

Deletes an analytics configuration for the bucket (specified by the analytics configuration ID).

To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For information about the Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis.

The following operations are related to DeleteBucketAnalyticsConfiguration:

" + "documentation":"

Deletes an analytics configuration for the bucket (specified by the analytics configuration ID).

To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For information about the Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis.

The following operations are related to DeleteBucketAnalyticsConfiguration:

" }, "DeleteBucketCors":{ "name":"DeleteBucketCors", @@ -112,7 +112,7 @@ }, "input":{"shape":"DeleteBucketCorsRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEcors.html", - "documentation":"

Deletes the cors configuration information set for the bucket.

To use this operation, you must have permission to perform the s3:PutBucketCORS action. The bucket owner has this permission by default and can grant this permission to others.

For information about cors, see Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service Developer Guide.

Related Resources:

" + "documentation":"

Deletes the cors configuration information set for the bucket.

To use this operation, you must have permission to perform the s3:PutBucketCORS action. The bucket owner has this permission by default and can grant this permission to others.

For information about cors, see Enabling Cross-Origin Resource Sharing in the Amazon S3 User Guide.

Related Resources:

" }, "DeleteBucketEncryption":{ "name":"DeleteBucketEncryption", @@ -122,7 +122,7 @@ "responseCode":204 }, "input":{"shape":"DeleteBucketEncryptionRequest"}, - "documentation":"

This implementation of the DELETE operation removes default encryption from the bucket. For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption in the Amazon Simple Storage Service Developer Guide.

To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

Related Resources

" + "documentation":"

This implementation of the DELETE action removes default encryption from the bucket. For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption in the Amazon S3 User Guide.

To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon S3 User Guide.

Related Resources

" }, "DeleteBucketIntelligentTieringConfiguration":{ "name":"DeleteBucketIntelligentTieringConfiguration", @@ -142,7 +142,7 @@ "responseCode":204 }, "input":{"shape":"DeleteBucketInventoryConfigurationRequest"}, - "documentation":"

Deletes an inventory configuration (identified by the inventory ID) from the bucket.

To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For information about the Amazon S3 inventory feature, see Amazon S3 Inventory.

Operations related to DeleteBucketInventoryConfiguration include:

" + "documentation":"

Deletes an inventory configuration (identified by the inventory ID) from the bucket.

To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For information about the Amazon S3 inventory feature, see Amazon S3 Inventory.

Operations related to DeleteBucketInventoryConfiguration include:

" }, "DeleteBucketLifecycle":{ "name":"DeleteBucketLifecycle", @@ -163,7 +163,7 @@ "responseCode":204 }, "input":{"shape":"DeleteBucketMetricsConfigurationRequest"}, - "documentation":"

Deletes a metrics configuration for the Amazon CloudWatch request metrics (specified by the metrics configuration ID) from the bucket. Note that this doesn't include the daily storage metrics.

To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon CloudWatch.

The following operations are related to DeleteBucketMetricsConfiguration:

" + "documentation":"

Deletes a metrics configuration for the Amazon CloudWatch request metrics (specified by the metrics configuration ID) from the bucket. Note that this doesn't include the daily storage metrics.

To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon CloudWatch.

The following operations are related to DeleteBucketMetricsConfiguration:

" }, "DeleteBucketOwnershipControls":{ "name":"DeleteBucketOwnershipControls", @@ -184,7 +184,7 @@ }, "input":{"shape":"DeleteBucketPolicyRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEpolicy.html", - "documentation":"

This implementation of the DELETE operation uses the policy subresource to delete the policy of a specified bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the DeleteBucketPolicy permissions on the specified bucket and belong to the bucket owner's account to use this operation.

If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and UserPolicies.

The following operations are related to DeleteBucketPolicy

" + "documentation":"

This implementation of the DELETE action uses the policy subresource to delete the policy of a specified bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the DeleteBucketPolicy permissions on the specified bucket and belong to the bucket owner's account to use this operation.

If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and UserPolicies.

The following operations are related to DeleteBucketPolicy

" }, "DeleteBucketReplication":{ "name":"DeleteBucketReplication", @@ -194,7 +194,7 @@ "responseCode":204 }, "input":{"shape":"DeleteBucketReplicationRequest"}, - "documentation":"

Deletes the replication configuration from the bucket.

To use this operation, you must have permissions to perform the s3:PutReplicationConfiguration action. The bucket owner has these permissions by default and can grant it to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

It can take a while for the deletion of a replication configuration to fully propagate.

For information about replication configuration, see Replication in the Amazon S3 Developer Guide.

The following operations are related to DeleteBucketReplication:

" + "documentation":"

Deletes the replication configuration from the bucket.

To use this operation, you must have permissions to perform the s3:PutReplicationConfiguration action. The bucket owner has these permissions by default and can grant it to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

It can take a while for the deletion of a replication configuration to fully propagate.

For information about replication configuration, see Replication in the Amazon S3 Developer Guide.

The following operations are related to DeleteBucketReplication:

" }, "DeleteBucketTagging":{ "name":"DeleteBucketTagging", @@ -216,7 +216,7 @@ }, "input":{"shape":"DeleteBucketWebsiteRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEwebsite.html", - "documentation":"

This operation removes the website configuration for a bucket. Amazon S3 returns a 200 OK response upon successfully deleting a website configuration on the specified bucket. You will get a 200 OK response if the website configuration you are trying to delete does not exist on the bucket. Amazon S3 returns a 404 response if the bucket specified in the request does not exist.

This DELETE operation requires the S3:DeleteBucketWebsite permission. By default, only the bucket owner can delete the website configuration attached to a bucket. However, bucket owners can grant other users permission to delete the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite permission.

For more information about hosting websites, see Hosting Websites on Amazon S3.

The following operations are related to DeleteBucketWebsite:

" + "documentation":"

This action removes the website configuration for a bucket. Amazon S3 returns a 200 OK response upon successfully deleting a website configuration on the specified bucket. You will get a 200 OK response if the website configuration you are trying to delete does not exist on the bucket. Amazon S3 returns a 404 response if the bucket specified in the request does not exist.

This DELETE action requires the S3:DeleteBucketWebsite permission. By default, only the bucket owner can delete the website configuration attached to a bucket. However, bucket owners can grant other users permission to delete the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite permission.

For more information about hosting websites, see Hosting Websites on Amazon S3.

The following operations are related to DeleteBucketWebsite:

" }, "DeleteObject":{ "name":"DeleteObject", @@ -228,7 +228,7 @@ "input":{"shape":"DeleteObjectRequest"}, "output":{"shape":"DeleteObjectOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectDELETE.html", - "documentation":"

Removes the null version (if there is one) of an object and inserts a delete marker, which becomes the latest version of the object. If there isn't a null version, Amazon S3 does not remove any objects.

To remove a specific version, you must be the bucket owner and you must use the version Id subresource. Using this subresource permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the response header, x-amz-delete-marker, to true.

If the object you want to delete is in a bucket where the bucket versioning configuration is MFA Delete enabled, you must include the x-amz-mfa request header in the DELETE versionId request. Requests that include x-amz-mfa must use HTTPS.

For more information about MFA Delete, see Using MFA Delete. To see sample requests that use versioning, see Sample Request.

You can delete objects by explicitly calling the DELETE Object API or configure its lifecycle (PutBucketLifecycle) to enable Amazon S3 to remove them for you. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration actions.

The following operation is related to DeleteObject:

" + "documentation":"

Removes the null version (if there is one) of an object and inserts a delete marker, which becomes the latest version of the object. If there isn't a null version, Amazon S3 does not remove any objects.

To remove a specific version, you must be the bucket owner and you must use the version Id subresource. Using this subresource permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the response header, x-amz-delete-marker, to true.

If the object you want to delete is in a bucket where the bucket versioning configuration is MFA Delete enabled, you must include the x-amz-mfa request header in the DELETE versionId request. Requests that include x-amz-mfa must use HTTPS.

For more information about MFA Delete, see Using MFA Delete. To see sample requests that use versioning, see Sample Request.

You can delete objects by explicitly calling DELETE Object or configure its lifecycle (PutBucketLifecycle) to enable Amazon S3 to remove them for you. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration actions.

The following action is related to DeleteObject:

" }, "DeleteObjectTagging":{ "name":"DeleteObjectTagging", @@ -250,7 +250,7 @@ "input":{"shape":"DeleteObjectsRequest"}, "output":{"shape":"DeleteObjectsOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/multiobjectdeleteapi.html", - "documentation":"

This operation enables you to delete multiple objects from a bucket using a single HTTP request. If you know the object keys that you want to delete, then this operation provides a suitable alternative to sending individual delete requests, reducing per-request overhead.

The request contains a list of up to 1000 keys that you want to delete. In the XML, you provide the object key names, and optionally, version IDs if you want to delete a specific version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a delete operation and returns the result of that delete, success, or failure, in the response. Note that if the object specified in the request is not found, Amazon S3 returns the result as deleted.

The operation supports two modes for the response: verbose and quiet. By default, the operation uses verbose mode in which the response includes the result of deletion of each key in your request. In quiet mode the response includes only keys where the delete operation encountered an error. For a successful deletion, the operation does not return any information about the delete in the response body.

When performing this operation on an MFA Delete enabled bucket, that attempts to delete any versioned objects, you must include an MFA token. If you do not provide one, the entire request will fail, even if there are non-versioned objects you are trying to delete. If you provide an invalid token, whether there are versioned keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA Delete.

Finally, the Content-MD5 header is required for all Multi-Object Delete requests. Amazon S3 uses the header value to ensure that your request body has not been altered in transit.

The following operations are related to DeleteObjects:

", + "documentation":"

This action enables you to delete multiple objects from a bucket using a single HTTP request. If you know the object keys that you want to delete, then this action provides a suitable alternative to sending individual delete requests, reducing per-request overhead.

The request contains a list of up to 1000 keys that you want to delete. In the XML, you provide the object key names, and optionally, version IDs if you want to delete a specific version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a delete action and returns the result of that delete, success, or failure, in the response. Note that if the object specified in the request is not found, Amazon S3 returns the result as deleted.

The action supports two modes for the response: verbose and quiet. By default, the action uses verbose mode in which the response includes the result of deletion of each key in your request. In quiet mode the response includes only keys where the delete action encountered an error. For a successful deletion, the action does not return any information about the delete in the response body.

When performing this action on an MFA Delete enabled bucket, that attempts to delete any versioned objects, you must include an MFA token. If you do not provide one, the entire request will fail, even if there are non-versioned objects you are trying to delete. If you provide an invalid token, whether there are versioned keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA Delete.

Finally, the Content-MD5 header is required for all Multi-Object Delete requests. Amazon S3 uses the header value to ensure that your request body has not been altered in transit.

The following operations are related to DeleteObjects:

", "alias":"DeleteMultipleObjects", "httpChecksumRequired":true }, @@ -262,7 +262,7 @@ "responseCode":204 }, "input":{"shape":"DeletePublicAccessBlockRequest"}, - "documentation":"

Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock permission. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

The following operations are related to DeletePublicAccessBlock:

" + "documentation":"

Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock permission. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

The following operations are related to DeletePublicAccessBlock:

" }, "GetBucketAccelerateConfiguration":{ "name":"GetBucketAccelerateConfiguration", @@ -272,7 +272,7 @@ }, "input":{"shape":"GetBucketAccelerateConfigurationRequest"}, "output":{"shape":"GetBucketAccelerateConfigurationOutput"}, - "documentation":"

This implementation of the GET operation uses the accelerate subresource to return the Transfer Acceleration state of a bucket, which is either Enabled or Suspended. Amazon S3 Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to and from Amazon S3.

To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

You set the Transfer Acceleration state of an existing bucket to Enabled or Suspended by using the PutBucketAccelerateConfiguration operation.

A GET accelerate request does not return a state value for a bucket that has no transfer acceleration state. A bucket has no Transfer Acceleration state if a state has never been set on the bucket.

For more information about transfer acceleration, see Transfer Acceleration in the Amazon Simple Storage Service Developer Guide.

Related Resources

" + "documentation":"

This implementation of the GET action uses the accelerate subresource to return the Transfer Acceleration state of a bucket, which is either Enabled or Suspended. Amazon S3 Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to and from Amazon S3.

To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon S3 User Guide.

You set the Transfer Acceleration state of an existing bucket to Enabled or Suspended by using the PutBucketAccelerateConfiguration operation.

A GET accelerate request does not return a state value for a bucket that has no transfer acceleration state. A bucket has no Transfer Acceleration state if a state has never been set on the bucket.

For more information about transfer acceleration, see Transfer Acceleration in the Amazon S3 User Guide.

Related Resources

" }, "GetBucketAcl":{ "name":"GetBucketAcl", @@ -283,7 +283,7 @@ "input":{"shape":"GetBucketAclRequest"}, "output":{"shape":"GetBucketAclOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETacl.html", - "documentation":"

This implementation of the GET operation uses the acl subresource to return the access control list (ACL) of a bucket. To use GET to return the ACL of the bucket, you must have READ_ACP access to the bucket. If READ_ACP permission is granted to the anonymous user, you can return the ACL of the bucket without using an authorization header.

Related Resources

" + "documentation":"

This implementation of the GET action uses the acl subresource to return the access control list (ACL) of a bucket. To use GET to return the ACL of the bucket, you must have READ_ACP access to the bucket. If READ_ACP permission is granted to the anonymous user, you can return the ACL of the bucket without using an authorization header.

Related Resources

" }, "GetBucketAnalyticsConfiguration":{ "name":"GetBucketAnalyticsConfiguration", @@ -293,7 +293,7 @@ }, "input":{"shape":"GetBucketAnalyticsConfigurationRequest"}, "output":{"shape":"GetBucketAnalyticsConfigurationOutput"}, - "documentation":"

This implementation of the GET operation returns an analytics configuration (identified by the analytics configuration ID) from the bucket.

To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis in the Amazon Simple Storage Service Developer Guide.

Related Resources

" + "documentation":"

This implementation of the GET action returns an analytics configuration (identified by the analytics configuration ID) from the bucket.

To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.

For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis in the Amazon S3 User Guide.

Related Resources

" }, "GetBucketCors":{ "name":"GetBucketCors", @@ -314,7 +314,7 @@ }, "input":{"shape":"GetBucketEncryptionRequest"}, "output":{"shape":"GetBucketEncryptionOutput"}, - "documentation":"

Returns the default encryption configuration for an Amazon S3 bucket. For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption.

To use this operation, you must have permission to perform the s3:GetEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

The following operations are related to GetBucketEncryption:

" + "documentation":"

Returns the default encryption configuration for an Amazon S3 bucket. If the bucket does not have a default encryption configuration, GetBucketEncryption returns ServerSideEncryptionConfigurationNotFoundError.

For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption.

To use this operation, you must have permission to perform the s3:GetEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

The following operations are related to GetBucketEncryption:

" }, "GetBucketIntelligentTieringConfiguration":{ "name":"GetBucketIntelligentTieringConfiguration", @@ -334,7 +334,7 @@ }, "input":{"shape":"GetBucketInventoryConfigurationRequest"}, "output":{"shape":"GetBucketInventoryConfigurationOutput"}, - "documentation":"

Returns an inventory configuration (identified by the inventory configuration ID) from the bucket.

To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For information about the Amazon S3 inventory feature, see Amazon S3 Inventory.

The following operations are related to GetBucketInventoryConfiguration:

" + "documentation":"

Returns an inventory configuration (identified by the inventory configuration ID) from the bucket.

To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For information about the Amazon S3 inventory feature, see Amazon S3 Inventory.

The following operations are related to GetBucketInventoryConfiguration:

" }, "GetBucketLifecycle":{ "name":"GetBucketLifecycle", @@ -345,7 +345,7 @@ "input":{"shape":"GetBucketLifecycleRequest"}, "output":{"shape":"GetBucketLifecycleOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETlifecycle.html", - "documentation":"

For an updated version of this API, see GetBucketLifecycleConfiguration. If you configured a bucket lifecycle using the filter element, you should see the updated version of this topic. This topic is provided for backward compatibility.

Returns the lifecycle configuration information set on the bucket. For information about lifecycle configuration, see Object Lifecycle Management.

To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

GetBucketLifecycle has the following special error:

The following operations are related to GetBucketLifecycle:

", + "documentation":"

For an updated version of this API, see GetBucketLifecycleConfiguration. If you configured a bucket lifecycle using the filter element, you should see the updated version of this topic. This topic is provided for backward compatibility.

Returns the lifecycle configuration information set on the bucket. For information about lifecycle configuration, see Object Lifecycle Management.

To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

GetBucketLifecycle has the following special error:

The following operations are related to GetBucketLifecycle:

", "deprecated":true }, "GetBucketLifecycleConfiguration":{ @@ -356,7 +356,7 @@ }, "input":{"shape":"GetBucketLifecycleConfigurationRequest"}, "output":{"shape":"GetBucketLifecycleConfigurationOutput"}, - "documentation":"

Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, or a combination of both. Accordingly, this section describes the latest API. The response describes the new filter element that you can use to specify a filter to select a subset of objects to which the rule applies. If you are using a previous version of the lifecycle configuration, it still works. For the earlier API description, see GetBucketLifecycle.

Returns the lifecycle configuration information set on the bucket. For information about lifecycle configuration, see Object Lifecycle Management.

To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration action. The bucket owner has this permission, by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

GetBucketLifecycleConfiguration has the following special error:

The following operations are related to GetBucketLifecycleConfiguration:

" + "documentation":"

Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, or a combination of both. Accordingly, this section describes the latest API. The response describes the new filter element that you can use to specify a filter to select a subset of objects to which the rule applies. If you are using a previous version of the lifecycle configuration, it still works. For the earlier action, see GetBucketLifecycle.

Returns the lifecycle configuration information set on the bucket. For information about lifecycle configuration, see Object Lifecycle Management.

To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration action. The bucket owner has this permission, by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

GetBucketLifecycleConfiguration has the following special error:

The following operations are related to GetBucketLifecycleConfiguration:

" }, "GetBucketLocation":{ "name":"GetBucketLocation", @@ -388,7 +388,7 @@ }, "input":{"shape":"GetBucketMetricsConfigurationRequest"}, "output":{"shape":"GetBucketMetricsConfigurationOutput"}, - "documentation":"

Gets a metrics configuration (specified by the metrics configuration ID) from the bucket. Note that this doesn't include the daily storage metrics.

To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon CloudWatch.

The following operations are related to GetBucketMetricsConfiguration:

" + "documentation":"

Gets a metrics configuration (specified by the metrics configuration ID) from the bucket. Note that this doesn't include the daily storage metrics.

To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon CloudWatch.

The following operations are related to GetBucketMetricsConfiguration:

" }, "GetBucketNotification":{ "name":"GetBucketNotification", @@ -410,7 +410,7 @@ }, "input":{"shape":"GetBucketNotificationConfigurationRequest"}, "output":{"shape":"NotificationConfiguration"}, - "documentation":"

Returns the notification configuration of a bucket.

If notifications are not enabled on the bucket, the operation returns an empty NotificationConfiguration element.

By default, you must be the bucket owner to read the notification configuration of a bucket. However, the bucket owner can use a bucket policy to grant permission to other users to read this configuration with the s3:GetBucketNotification permission.

For more information about setting and reading the notification configuration on a bucket, see Setting Up Notification of Bucket Events. For more information about bucket policies, see Using Bucket Policies.

The following operation is related to GetBucketNotification:

" + "documentation":"

Returns the notification configuration of a bucket.

If notifications are not enabled on the bucket, the action returns an empty NotificationConfiguration element.

By default, you must be the bucket owner to read the notification configuration of a bucket. However, the bucket owner can use a bucket policy to grant permission to other users to read this configuration with the s3:GetBucketNotification permission.

For more information about setting and reading the notification configuration on a bucket, see Setting Up Notification of Bucket Events. For more information about bucket policies, see Using Bucket Policies.

The following action is related to GetBucketNotification:

" }, "GetBucketOwnershipControls":{ "name":"GetBucketOwnershipControls", @@ -431,7 +431,7 @@ "input":{"shape":"GetBucketPolicyRequest"}, "output":{"shape":"GetBucketPolicyOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETpolicy.html", - "documentation":"

Returns the policy of a specified bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the GetBucketPolicy permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.

If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and User Policies.

The following operation is related to GetBucketPolicy:

" + "documentation":"

Returns the policy of a specified bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the GetBucketPolicy permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.

If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and User Policies.

The following action is related to GetBucketPolicy:

" }, "GetBucketPolicyStatus":{ "name":"GetBucketPolicyStatus", @@ -451,7 +451,7 @@ }, "input":{"shape":"GetBucketReplicationRequest"}, "output":{"shape":"GetBucketReplicationOutput"}, - "documentation":"

Returns the replication configuration of a bucket.

It can take a while to propagate the put or delete a replication configuration to all Amazon S3 systems. Therefore, a get request soon after put or delete can return a wrong result.

For information about replication configuration, see Replication in the Amazon Simple Storage Service Developer Guide.

This operation requires permissions for the s3:GetReplicationConfiguration action. For more information about permissions, see Using Bucket Policies and User Policies.

If you include the Filter element in a replication configuration, you must also include the DeleteMarkerReplication and Priority elements. The response also returns those elements.

For information about GetBucketReplication errors, see List of replication-related error codes

The following operations are related to GetBucketReplication:

" + "documentation":"

Returns the replication configuration of a bucket.

It can take a while to propagate the put or delete a replication configuration to all Amazon S3 systems. Therefore, a get request soon after put or delete can return a wrong result.

For information about replication configuration, see Replication in the Amazon S3 User Guide.

This action requires permissions for the s3:GetReplicationConfiguration action. For more information about permissions, see Using Bucket Policies and User Policies.

If you include the Filter element in a replication configuration, you must also include the DeleteMarkerReplication and Priority elements. The response also returns those elements.

For information about GetBucketReplication errors, see List of replication-related error codes

The following operations are related to GetBucketReplication:

" }, "GetBucketRequestPayment":{ "name":"GetBucketRequestPayment", @@ -495,7 +495,7 @@ "input":{"shape":"GetBucketWebsiteRequest"}, "output":{"shape":"GetBucketWebsiteOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETwebsite.html", - "documentation":"

Returns the website configuration for a bucket. To host website on Amazon S3, you can configure a bucket as website by adding a website configuration. For more information about hosting websites, see Hosting Websites on Amazon S3.

This GET operation requires the S3:GetBucketWebsite permission. By default, only the bucket owner can read the bucket website configuration. However, bucket owners can allow other users to read the website configuration by writing a bucket policy granting them the S3:GetBucketWebsite permission.

The following operations are related to DeleteBucketWebsite:

" + "documentation":"

Returns the website configuration for a bucket. To host website on Amazon S3, you can configure a bucket as website by adding a website configuration. For more information about hosting websites, see Hosting Websites on Amazon S3.

This GET action requires the S3:GetBucketWebsite permission. By default, only the bucket owner can read the bucket website configuration. However, bucket owners can allow other users to read the website configuration by writing a bucket policy granting them the S3:GetBucketWebsite permission.

The following operations are related to DeleteBucketWebsite:

" }, "GetObject":{ "name":"GetObject", @@ -510,7 +510,7 @@ {"shape":"InvalidObjectState"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGET.html", - "documentation":"

Retrieves objects from Amazon S3. To use GET, you must have READ access to the object. If you grant READ access to the anonymous user, you can return the object without using an authorization header.

An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer file system. You can, however, create a logical hierarchy by using object key names that imply a folder structure. For example, instead of naming an object sample.jpg, you can name it photos/2006/February/sample.jpg.

To get an object from such a logical hierarchy, specify the full key name for the object in the GET operation. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg, specify the resource as /photos/2006/February/sample.jpg. For a path-style request example, if you have the object photos/2006/February/sample.jpg in the bucket named examplebucket, specify the resource as /examplebucket/photos/2006/February/sample.jpg. For more information about request types, see HTTP Host Header Bucket Specification.

To distribute large files to many people, you can save bandwidth costs by using BitTorrent. For more information, see Amazon S3 Torrent. For more information about returning the ACL of an object, see GetObjectAcl.

If the object you are retrieving is stored in the S3 Glacier or S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you must first restore a copy using RestoreObject. Otherwise, this operation returns an InvalidObjectStateError error. For information about restoring archived objects, see Restoring Archived Objects.

Encryption request headers, like x-amz-server-side-encryption, should not be sent for GET requests if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.

If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object, you must use the following headers:

For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).

Assuming you have permission to read object tags (permission for the s3:GetObjectVersionTagging action), the response also returns the x-amz-tagging-count header that provides the count of number of tags associated with the object. You can use GetObjectTagging to retrieve the tag set associated with an object.

Permissions

You need the s3:GetObject permission for this operation. For more information, see Specifying Permissions in a Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

Versioning

By default, the GET operation returns the current version of an object. To return a different version, use the versionId subresource.

If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true in the response.

For more information about versioning, see PutBucketVersioning.

Overriding Response Header Values

There are times when you want to override certain response header values in a GET response. For example, you might override the Content-Disposition response header value in your GET request.

You can override values for a set of response headers using the following query parameters. These response header values are sent only on a successful request, that is, when status code 200 OK is returned. The set of headers you can override using these parameters is a subset of the headers that Amazon S3 accepts when you create an object. The response headers that you can override for the GET response are Content-Type, Content-Language, Expires, Cache-Control, Content-Disposition, and Content-Encoding. To override these header values in the GET response, you use the following request parameters.

You must sign the request, either using an Authorization header or a presigned URL, when using these parameters. They cannot be used with an unsigned (anonymous) request.

Additional Considerations about Request Headers

If both of the If-Match and If-Unmodified-Since headers are present in the request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since condition evaluates to false; then, S3 returns 200 OK and the data requested.

If both of the If-None-Match and If-Modified-Since headers are present in the request as follows: If-None-Match condition evaluates to false, and; If-Modified-Since condition evaluates to true; then, S3 returns 304 Not Modified response code.

For more information about conditional requests, see RFC 7232.

The following operations are related to GetObject:

" + "documentation":"

Retrieves objects from Amazon S3. To use GET, you must have READ access to the object. If you grant READ access to the anonymous user, you can return the object without using an authorization header.

An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer file system. You can, however, create a logical hierarchy by using object key names that imply a folder structure. For example, instead of naming an object sample.jpg, you can name it photos/2006/February/sample.jpg.

To get an object from such a logical hierarchy, specify the full key name for the object in the GET operation. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg, specify the resource as /photos/2006/February/sample.jpg. For a path-style request example, if you have the object photos/2006/February/sample.jpg in the bucket named examplebucket, specify the resource as /examplebucket/photos/2006/February/sample.jpg. For more information about request types, see HTTP Host Header Bucket Specification.

To distribute large files to many people, you can save bandwidth costs by using BitTorrent. For more information, see Amazon S3 Torrent. For more information about returning the ACL of an object, see GetObjectAcl.

If the object you are retrieving is stored in the S3 Glacier or S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you must first restore a copy using RestoreObject. Otherwise, this action returns an InvalidObjectStateError error. For information about restoring archived objects, see Restoring Archived Objects.

Encryption request headers, like x-amz-server-side-encryption, should not be sent for GET requests if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.

If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object, you must use the following headers:

For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).

Assuming you have permission to read object tags (permission for the s3:GetObjectVersionTagging action), the response also returns the x-amz-tagging-count header that provides the count of number of tags associated with the object. You can use GetObjectTagging to retrieve the tag set associated with an object.

Permissions

You need the s3:GetObject permission for this operation. For more information, see Specifying Permissions in a Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

Versioning

By default, the GET action returns the current version of an object. To return a different version, use the versionId subresource.

If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true in the response.

For more information about versioning, see PutBucketVersioning.

Overriding Response Header Values

There are times when you want to override certain response header values in a GET response. For example, you might override the Content-Disposition response header value in your GET request.

You can override values for a set of response headers using the following query parameters. These response header values are sent only on a successful request, that is, when status code 200 OK is returned. The set of headers you can override using these parameters is a subset of the headers that Amazon S3 accepts when you create an object. The response headers that you can override for the GET response are Content-Type, Content-Language, Expires, Cache-Control, Content-Disposition, and Content-Encoding. To override these header values in the GET response, you use the following request parameters.

You must sign the request, either using an Authorization header or a presigned URL, when using these parameters. They cannot be used with an unsigned (anonymous) request.

Additional Considerations about Request Headers

If both of the If-Match and If-Unmodified-Since headers are present in the request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since condition evaluates to false; then, S3 returns 200 OK and the data requested.

If both of the If-None-Match and If-Modified-Since headers are present in the request as follows: If-None-Match condition evaluates to false, and; If-Modified-Since condition evaluates to true; then, S3 returns 304 Not Modified response code.

For more information about conditional requests, see RFC 7232.

The following operations are related to GetObject:

" }, "GetObjectAcl":{ "name":"GetObjectAcl", @@ -564,7 +564,7 @@ }, "input":{"shape":"GetObjectTaggingRequest"}, "output":{"shape":"GetObjectTaggingOutput"}, - "documentation":"

Returns the tag-set of an object. You send the GET request against the tagging subresource associated with the object.

To use this operation, you must have permission to perform the s3:GetObjectTagging action. By default, the GET operation returns information about current version of an object. For a versioned bucket, you can have multiple versions of an object in your bucket. To retrieve tags of any other version, use the versionId query parameter. You also need permission for the s3:GetObjectVersionTagging action.

By default, the bucket owner has this permission and can grant this permission to others.

For information about the Amazon S3 object tagging feature, see Object Tagging.

The following operation is related to GetObjectTagging:

" + "documentation":"

Returns the tag-set of an object. You send the GET request against the tagging subresource associated with the object.

To use this operation, you must have permission to perform the s3:GetObjectTagging action. By default, the GET action returns information about current version of an object. For a versioned bucket, you can have multiple versions of an object in your bucket. To retrieve tags of any other version, use the versionId query parameter. You also need permission for the s3:GetObjectVersionTagging action.

By default, the bucket owner has this permission and can grant this permission to others.

For information about the Amazon S3 object tagging feature, see Object Tagging.

The following action is related to GetObjectTagging:

" }, "GetObjectTorrent":{ "name":"GetObjectTorrent", @@ -575,7 +575,7 @@ "input":{"shape":"GetObjectTorrentRequest"}, "output":{"shape":"GetObjectTorrentOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGETtorrent.html", - "documentation":"

Returns torrent files from a bucket. BitTorrent can save you bandwidth when you're distributing large files. For more information about BitTorrent, see Using BitTorrent with Amazon S3.

You can get torrent only for objects that are less than 5 GB in size, and that are not encrypted using server-side encryption with a customer-provided encryption key.

To use GET, you must have READ access to the object.

This action is not supported by Amazon S3 on Outposts.

The following operation is related to GetObjectTorrent:

" + "documentation":"

Returns torrent files from a bucket. BitTorrent can save you bandwidth when you're distributing large files. For more information about BitTorrent, see Using BitTorrent with Amazon S3.

You can get torrent only for objects that are less than 5 GB in size, and that are not encrypted using server-side encryption with a customer-provided encryption key.

To use GET, you must have READ access to the object.

This action is not supported by Amazon S3 on Outposts.

The following action is related to GetObjectTorrent:

" }, "GetPublicAccessBlock":{ "name":"GetPublicAccessBlock", @@ -598,7 +598,7 @@ {"shape":"NoSuchBucket"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketHEAD.html", - "documentation":"

This operation is useful to determine if a bucket exists and you have permission to access it. The operation returns a 200 OK if the bucket exists and you have permission to access it. Otherwise, the operation might return responses such as 404 Not Found and 403 Forbidden.

To use this operation, you must have permissions to perform the s3:ListBucket action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

" + "documentation":"

This action is useful to determine if a bucket exists and you have permission to access it. The action returns a 200 OK if the bucket exists and you have permission to access it.

If the bucket does not exist or you do not have permission to access it, the HEAD request returns a generic 404 Not Found or 403 Forbidden code. A message body is not included, so you cannot determine the exception beyond these error codes.

To use this operation, you must have permissions to perform the s3:ListBucket action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

" }, "HeadObject":{ "name":"HeadObject", @@ -612,7 +612,7 @@ {"shape":"NoSuchKey"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectHEAD.html", - "documentation":"

The HEAD operation retrieves metadata from an object without returning the object itself. This operation is useful if you're only interested in an object's metadata. To use HEAD, you must have READ access to the object.

A HEAD request has the same options as a GET operation on an object. The response is identical to the GET response except that there is no response body.

If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers:

For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).

Encryption request headers, like x-amz-server-side-encryption, should not be sent for GET requests if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.

Request headers are limited to 8 KB in size. For more information, see Common Request Headers.

Consider the following when using request headers:

For more information about conditional requests, see RFC 7232.

Permissions

You need the s3:GetObject permission for this operation. For more information, see Specifying Permissions in a Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

The following operation is related to HeadObject:

" + "documentation":"

The HEAD action retrieves metadata from an object without returning the object itself. This action is useful if you're only interested in an object's metadata. To use HEAD, you must have READ access to the object.

A HEAD request has the same options as a GET action on an object. The response is identical to the GET response except that there is no response body. Because of this, if the HEAD request generates an error, it returns a generic 404 Not Found or 403 Forbidden code. It is not possible to retrieve the exact exception beyond these error codes.

If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers:

For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).

  • Encryption request headers, like x-amz-server-side-encryption, should not be sent for GET requests if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.

  • The last modified property in this case is the creation date of the object.

Request headers are limited to 8 KB in size. For more information, see Common Request Headers.

Consider the following when using request headers:

For more information about conditional requests, see RFC 7232.

Permissions

You need the s3:GetObject permission for this operation. For more information, see Specifying Permissions in a Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

The following action is related to HeadObject:

" }, "ListBucketAnalyticsConfigurations":{ "name":"ListBucketAnalyticsConfigurations", @@ -622,7 +622,7 @@ }, "input":{"shape":"ListBucketAnalyticsConfigurationsRequest"}, "output":{"shape":"ListBucketAnalyticsConfigurationsOutput"}, - "documentation":"

Lists the analytics configurations for the bucket. You can have up to 1,000 analytics configurations per bucket.

This operation supports list pagination and does not return more than 100 configurations at a time. You should always check the IsTruncated element in the response. If there are no more configurations to list, IsTruncated is set to false. If there are more configurations to list, IsTruncated is set to true, and there will be a value in NextContinuationToken. You use the NextContinuationToken value to continue the pagination of the list by passing the value in continuation-token in the request to GET the next page.

To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis.

The following operations are related to ListBucketAnalyticsConfigurations:

" + "documentation":"

Lists the analytics configurations for the bucket. You can have up to 1,000 analytics configurations per bucket.

This action supports list pagination and does not return more than 100 configurations at a time. You should always check the IsTruncated element in the response. If there are no more configurations to list, IsTruncated is set to false. If there are more configurations to list, IsTruncated is set to true, and there will be a value in NextContinuationToken. You use the NextContinuationToken value to continue the pagination of the list by passing the value in continuation-token in the request to GET the next page.

To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis.

The following operations are related to ListBucketAnalyticsConfigurations:

" }, "ListBucketIntelligentTieringConfigurations":{ "name":"ListBucketIntelligentTieringConfigurations", @@ -642,7 +642,7 @@ }, "input":{"shape":"ListBucketInventoryConfigurationsRequest"}, "output":{"shape":"ListBucketInventoryConfigurationsOutput"}, - "documentation":"

Returns a list of inventory configurations for the bucket. You can have up to 1,000 analytics configurations per bucket.

This operation supports list pagination and does not return more than 100 configurations at a time. Always check the IsTruncated element in the response. If there are no more configurations to list, IsTruncated is set to false. If there are more configurations to list, IsTruncated is set to true, and there is a value in NextContinuationToken. You use the NextContinuationToken value to continue the pagination of the list by passing the value in continuation-token in the request to GET the next page.

To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For information about the Amazon S3 inventory feature, see Amazon S3 Inventory

The following operations are related to ListBucketInventoryConfigurations:

" + "documentation":"

Returns a list of inventory configurations for the bucket. You can have up to 1,000 analytics configurations per bucket.

This action supports list pagination and does not return more than 100 configurations at a time. Always check the IsTruncated element in the response. If there are no more configurations to list, IsTruncated is set to false. If there are more configurations to list, IsTruncated is set to true, and there is a value in NextContinuationToken. You use the NextContinuationToken value to continue the pagination of the list by passing the value in continuation-token in the request to GET the next page.

To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For information about the Amazon S3 inventory feature, see Amazon S3 Inventory

The following operations are related to ListBucketInventoryConfigurations:

" }, "ListBucketMetricsConfigurations":{ "name":"ListBucketMetricsConfigurations", @@ -652,7 +652,7 @@ }, "input":{"shape":"ListBucketMetricsConfigurationsRequest"}, "output":{"shape":"ListBucketMetricsConfigurationsOutput"}, - "documentation":"

Lists the metrics configurations for the bucket. The metrics configurations are only for the request metrics of the bucket and do not provide information on daily storage metrics. You can have up to 1,000 configurations per bucket.

This operation supports list pagination and does not return more than 100 configurations at a time. Always check the IsTruncated element in the response. If there are no more configurations to list, IsTruncated is set to false. If there are more configurations to list, IsTruncated is set to true, and there is a value in NextContinuationToken. You use the NextContinuationToken value to continue the pagination of the list by passing the value in continuation-token in the request to GET the next page.

To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For more information about metrics configurations and CloudWatch request metrics, see Monitoring Metrics with Amazon CloudWatch.

The following operations are related to ListBucketMetricsConfigurations:

" + "documentation":"

Lists the metrics configurations for the bucket. The metrics configurations are only for the request metrics of the bucket and do not provide information on daily storage metrics. You can have up to 1,000 configurations per bucket.

This action supports list pagination and does not return more than 100 configurations at a time. Always check the IsTruncated element in the response. If there are no more configurations to list, IsTruncated is set to false. If there are more configurations to list, IsTruncated is set to true, and there is a value in NextContinuationToken. You use the NextContinuationToken value to continue the pagination of the list by passing the value in continuation-token in the request to GET the next page.

To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For more information about metrics configurations and CloudWatch request metrics, see Monitoring Metrics with Amazon CloudWatch.

The following operations are related to ListBucketMetricsConfigurations:

" }, "ListBuckets":{ "name":"ListBuckets", @@ -674,7 +674,7 @@ "input":{"shape":"ListMultipartUploadsRequest"}, "output":{"shape":"ListMultipartUploadsOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadListMPUpload.html", - "documentation":"

This operation lists in-progress multipart uploads. An in-progress multipart upload is a multipart upload that has been initiated using the Initiate Multipart Upload request, but has not yet been completed or aborted.

This operation returns at most 1,000 multipart uploads in the response. 1,000 multipart uploads is the maximum number of uploads a response can include, which is also the default value. You can further limit the number of uploads in a response by specifying the max-uploads parameter in the response. If additional multipart uploads satisfy the list criteria, the response will contain an IsTruncated element with the value true. To list the additional multipart uploads, use the key-marker and upload-id-marker request parameters.

In the response, the uploads are sorted by key. If your application has initiated more than one multipart upload using the same object key, then uploads in the response are first sorted by key. Additionally, uploads are sorted in ascending order within each key by the upload initiation time.

For more information on multipart uploads, see Uploading Objects Using Multipart Upload.

For information on permissions required to use the multipart upload API, see Multipart Upload API and Permissions.

The following operations are related to ListMultipartUploads:

" + "documentation":"

This action lists in-progress multipart uploads. An in-progress multipart upload is a multipart upload that has been initiated using the Initiate Multipart Upload request, but has not yet been completed or aborted.

This action returns at most 1,000 multipart uploads in the response. 1,000 multipart uploads is the maximum number of uploads a response can include, which is also the default value. You can further limit the number of uploads in a response by specifying the max-uploads parameter in the response. If additional multipart uploads satisfy the list criteria, the response will contain an IsTruncated element with the value true. To list the additional multipart uploads, use the key-marker and upload-id-marker request parameters.

In the response, the uploads are sorted by key. If your application has initiated more than one multipart upload using the same object key, then uploads in the response are first sorted by key. Additionally, uploads are sorted in ascending order within each key by the upload initiation time.

For more information on multipart uploads, see Uploading Objects Using Multipart Upload.

For information on permissions required to use the multipart upload API, see Multipart Upload and Permissions.

The following operations are related to ListMultipartUploads:

" }, "ListObjectVersions":{ "name":"ListObjectVersions", @@ -700,7 +700,7 @@ {"shape":"NoSuchBucket"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGET.html", - "documentation":"

Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Be sure to design your application to parse the contents of the response and handle it appropriately.

This API has been revised. We recommend that you use the newer version, ListObjectsV2, when developing applications. For backward compatibility, Amazon S3 continues to support ListObjects.

The following operations are related to ListObjects:

", + "documentation":"

Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Be sure to design your application to parse the contents of the response and handle it appropriately.

This action has been revised. We recommend that you use the newer version, ListObjectsV2, when developing applications. For backward compatibility, Amazon S3 continues to support ListObjects.

The following operations are related to ListObjects:

", "alias":"GetBucket" }, "ListObjectsV2":{ @@ -714,7 +714,7 @@ "errors":[ {"shape":"NoSuchBucket"} ], - "documentation":"

Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately.

To use this operation, you must have READ access to the bucket.

To use this operation in an AWS Identity and Access Management (IAM) policy, you must have permissions to perform the s3:ListBucket action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

This section describes the latest revision of the API. We recommend that you use this revised API for application development. For backward compatibility, Amazon S3 continues to support the prior version of this API, ListObjects.

To get a list of your buckets, see ListBuckets.

The following operations are related to ListObjectsV2:

" + "documentation":"

Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately. Objects are returned sorted in an ascending order of the respective key names in the list.

To use this operation, you must have READ access to the bucket.

To use this action in an AWS Identity and Access Management (IAM) policy, you must have permissions to perform the s3:ListBucket action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

This section describes the latest revision of this action. We recommend that you use this revised API for application development. For backward compatibility, Amazon S3 continues to support the prior version of this API, ListObjects.

To get a list of your buckets, see ListBuckets.

The following operations are related to ListObjectsV2:

" }, "ListParts":{ "name":"ListParts", @@ -725,7 +725,7 @@ "input":{"shape":"ListPartsRequest"}, "output":{"shape":"ListPartsOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadListParts.html", - "documentation":"

Lists the parts that have been uploaded for a specific multipart upload. This operation must include the upload ID, which you obtain by sending the initiate multipart upload request (see CreateMultipartUpload). This request returns a maximum of 1,000 uploaded parts. The default number of parts returned is 1,000 parts. You can restrict the number of parts returned by specifying the max-parts request parameter. If your multipart upload consists of more than 1,000 parts, the response returns an IsTruncated field with the value of true, and a NextPartNumberMarker element. In subsequent ListParts requests you can include the part-number-marker query string parameter and set its value to the NextPartNumberMarker field value from the previous response.

For more information on multipart uploads, see Uploading Objects Using Multipart Upload.

For information on permissions required to use the multipart upload API, see Multipart Upload API and Permissions.

The following operations are related to ListParts:

" + "documentation":"

Lists the parts that have been uploaded for a specific multipart upload. This operation must include the upload ID, which you obtain by sending the initiate multipart upload request (see CreateMultipartUpload). This request returns a maximum of 1,000 uploaded parts. The default number of parts returned is 1,000 parts. You can restrict the number of parts returned by specifying the max-parts request parameter. If your multipart upload consists of more than 1,000 parts, the response returns an IsTruncated field with the value of true, and a NextPartNumberMarker element. In subsequent ListParts requests you can include the part-number-marker query string parameter and set its value to the NextPartNumberMarker field value from the previous response.

For more information on multipart uploads, see Uploading Objects Using Multipart Upload.

For information on permissions required to use the multipart upload API, see Multipart Upload and Permissions.

The following operations are related to ListParts:

" }, "PutBucketAccelerateConfiguration":{ "name":"PutBucketAccelerateConfiguration", @@ -734,7 +734,7 @@ "requestUri":"/{Bucket}?accelerate" }, "input":{"shape":"PutBucketAccelerateConfigurationRequest"}, - "documentation":"

Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to Amazon S3.

To use this operation, you must have permission to perform the s3:PutAccelerateConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

The Transfer Acceleration state of a bucket can be set to one of the following two values:

The GetBucketAccelerateConfiguration operation returns the transfer acceleration state of a bucket.

After setting the Transfer Acceleration state of a bucket to Enabled, it might take up to thirty minutes before the data transfer rates to the bucket increase.

The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods (\".\").

For more information about transfer acceleration, see Transfer Acceleration.

The following operations are related to PutBucketAccelerateConfiguration:

" + "documentation":"

Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to Amazon S3.

To use this operation, you must have permission to perform the s3:PutAccelerateConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

The Transfer Acceleration state of a bucket can be set to one of the following two values:

The GetBucketAccelerateConfiguration action returns the transfer acceleration state of a bucket.

After setting the Transfer Acceleration state of a bucket to Enabled, it might take up to thirty minutes before the data transfer rates to the bucket increase.

The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods (\".\").

For more information about transfer acceleration, see Transfer Acceleration.

The following operations are related to PutBucketAccelerateConfiguration:

" }, "PutBucketAcl":{ "name":"PutBucketAcl", @@ -754,7 +754,7 @@ "requestUri":"/{Bucket}?analytics" }, "input":{"shape":"PutBucketAnalyticsConfigurationRequest"}, - "documentation":"

Sets an analytics configuration for the bucket (specified by the analytics configuration ID). You can have up to 1,000 analytics configurations per bucket.

You can choose to have storage class analysis export analysis reports sent to a comma-separated values (CSV) flat file. See the DataExport request element. Reports are updated daily and are based on the object filters that you configure. When selecting data export, you specify a destination bucket and an optional destination prefix where the file is written. You can export the data to a destination bucket in a different account. However, the destination bucket must be in the same Region as the bucket that you are making the PUT analytics configuration to. For more information, see Amazon S3 Analytics – Storage Class Analysis.

You must create a bucket policy on the destination bucket where the exported file is written to grant permissions to Amazon S3 to write objects to the bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.

To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

Special Errors

Related Resources

" + "documentation":"

Sets an analytics configuration for the bucket (specified by the analytics configuration ID). You can have up to 1,000 analytics configurations per bucket.

You can choose to have storage class analysis export analysis reports sent to a comma-separated values (CSV) flat file. See the DataExport request element. Reports are updated daily and are based on the object filters that you configure. When selecting data export, you specify a destination bucket and an optional destination prefix where the file is written. You can export the data to a destination bucket in a different account. However, the destination bucket must be in the same Region as the bucket that you are making the PUT analytics configuration to. For more information, see Amazon S3 Analytics – Storage Class Analysis.

You must create a bucket policy on the destination bucket where the exported file is written to grant permissions to Amazon S3 to write objects to the bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.

To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

Special Errors

Related Resources

" }, "PutBucketCors":{ "name":"PutBucketCors", @@ -764,7 +764,7 @@ }, "input":{"shape":"PutBucketCorsRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTcors.html", - "documentation":"

Sets the cors configuration for your bucket. If the configuration exists, Amazon S3 replaces it.

To use this operation, you must be allowed to perform the s3:PutBucketCORS action. By default, the bucket owner has this permission and can grant it to others.

You set this configuration on a bucket so that the bucket can service cross-origin requests. For example, you might want to enable a request whose origin is http://www.example.com to access your Amazon S3 bucket at my.example.bucket.com by using the browser's XMLHttpRequest capability.

To enable cross-origin resource sharing (CORS) on a bucket, you add the cors subresource to the bucket. The cors subresource is an XML document in which you configure rules that identify origins and the HTTP methods that can be executed on your bucket. The document is limited to 64 KB in size.

When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) against a bucket, it evaluates the cors configuration on the bucket and uses the first CORSRule rule that matches the incoming browser request to enable a cross-origin request. For a rule to match, the following conditions must be met:

For more information about CORS, go to Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service Developer Guide.

Related Resources

", + "documentation":"

Sets the cors configuration for your bucket. If the configuration exists, Amazon S3 replaces it.

To use this operation, you must be allowed to perform the s3:PutBucketCORS action. By default, the bucket owner has this permission and can grant it to others.

You set this configuration on a bucket so that the bucket can service cross-origin requests. For example, you might want to enable a request whose origin is http://www.example.com to access your Amazon S3 bucket at my.example.bucket.com by using the browser's XMLHttpRequest capability.

To enable cross-origin resource sharing (CORS) on a bucket, you add the cors subresource to the bucket. The cors subresource is an XML document in which you configure rules that identify origins and the HTTP methods that can be executed on your bucket. The document is limited to 64 KB in size.

When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) against a bucket, it evaluates the cors configuration on the bucket and uses the first CORSRule rule that matches the incoming browser request to enable a cross-origin request. For a rule to match, the following conditions must be met:

For more information about CORS, go to Enabling Cross-Origin Resource Sharing in the Amazon S3 User Guide.

Related Resources

", "httpChecksumRequired":true }, "PutBucketEncryption":{ @@ -774,7 +774,7 @@ "requestUri":"/{Bucket}?encryption" }, "input":{"shape":"PutBucketEncryptionRequest"}, - "documentation":"

This operation uses the encryption subresource to configure default encryption and Amazon S3 Bucket Key for an existing bucket.

Default encryption for a bucket can use server-side encryption with Amazon S3-managed keys (SSE-S3) or AWS KMS customer master keys (SSE-KMS). If you specify default encryption using SSE-KMS, you can also configure Amazon S3 Bucket Key. For information about default encryption, see Amazon S3 default bucket encryption in the Amazon Simple Storage Service Developer Guide. For more information about S3 Bucket Keys, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service Developer Guide.

This operation requires AWS Signature Version 4. For more information, see Authenticating Requests (AWS Signature Version 4).

To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

Related Resources

", + "documentation":"

This action uses the encryption subresource to configure default encryption and Amazon S3 Bucket Key for an existing bucket.

Default encryption for a bucket can use server-side encryption with Amazon S3-managed keys (SSE-S3) or AWS KMS customer master keys (SSE-KMS). If you specify default encryption using SSE-KMS, you can also configure Amazon S3 Bucket Key. For information about default encryption, see Amazon S3 default bucket encryption in the Amazon S3 User Guide. For more information about S3 Bucket Keys, see Amazon S3 Bucket Keys in the Amazon S3 User Guide.

This action requires AWS Signature Version 4. For more information, see Authenticating Requests (AWS Signature Version 4).

To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.

Related Resources

", "httpChecksumRequired":true }, "PutBucketIntelligentTieringConfiguration":{ @@ -784,7 +784,7 @@ "requestUri":"/{Bucket}?intelligent-tiering" }, "input":{"shape":"PutBucketIntelligentTieringConfigurationRequest"}, - "documentation":"

Puts a S3 Intelligent-Tiering configuration to the specified bucket.

The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.

The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.

If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

Operations related to PutBucketIntelligentTieringConfiguration include:

" + "documentation":"

Puts a S3 Intelligent-Tiering configuration to the specified bucket. You can have up to 1,000 S3 Intelligent-Tiering configurations per bucket.

The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.

The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.

If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

Operations related to PutBucketIntelligentTieringConfiguration include:

You only need S3 Intelligent-Tiering enabled on a bucket if you want to automatically move objects stored in the S3 Intelligent-Tiering storage class to the Archive Access or Deep Archive Access tier.

Special Errors

" }, "PutBucketInventoryConfiguration":{ "name":"PutBucketInventoryConfiguration", @@ -793,7 +793,7 @@ "requestUri":"/{Bucket}?inventory" }, "input":{"shape":"PutBucketInventoryConfigurationRequest"}, - "documentation":"

This implementation of the PUT operation adds an inventory configuration (identified by the inventory ID) to the bucket. You can have up to 1,000 inventory configurations per bucket.

Amazon S3 inventory generates inventories of the objects in the bucket on a daily or weekly basis, and the results are published to a flat file. The bucket that is inventoried is called the source bucket, and the bucket where the inventory flat file is stored is called the destination bucket. The destination bucket must be in the same AWS Region as the source bucket.

When you configure an inventory for a source bucket, you specify the destination bucket where you want the inventory to be stored, and whether to generate the inventory daily or weekly. You can also configure what object metadata to include and whether to inventory all object versions or only current versions. For more information, see Amazon S3 Inventory in the Amazon Simple Storage Service Developer Guide.

You must create a bucket policy on the destination bucket to grant permissions to Amazon S3 to write objects to the bucket in the defined location. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.

To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

Special Errors

Related Resources

" + "documentation":"

This implementation of the PUT action adds an inventory configuration (identified by the inventory ID) to the bucket. You can have up to 1,000 inventory configurations per bucket.

Amazon S3 inventory generates inventories of the objects in the bucket on a daily or weekly basis, and the results are published to a flat file. The bucket that is inventoried is called the source bucket, and the bucket where the inventory flat file is stored is called the destination bucket. The destination bucket must be in the same AWS Region as the source bucket.

When you configure an inventory for a source bucket, you specify the destination bucket where you want the inventory to be stored, and whether to generate the inventory daily or weekly. You can also configure what object metadata to include and whether to inventory all object versions or only current versions. For more information, see Amazon S3 Inventory in the Amazon S3 User Guide.

You must create a bucket policy on the destination bucket to grant permissions to Amazon S3 to write objects to the bucket in the defined location. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.

To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.

Special Errors

Related Resources

" }, "PutBucketLifecycle":{ "name":"PutBucketLifecycle", @@ -803,7 +803,7 @@ }, "input":{"shape":"PutBucketLifecycleRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html", - "documentation":"

For an updated version of this API, see PutBucketLifecycleConfiguration. This version has been deprecated. Existing lifecycle configurations will work. For new lifecycle configurations, use the updated API.

Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For information about lifecycle configuration, see Object Lifecycle Management in the Amazon Simple Storage Service Developer Guide.

By default, all Amazon S3 resources, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration) are private. Only the resource owner, the AWS account that created the resource, can access it. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, users must get the s3:PutLifecycleConfiguration permission.

You can also explicitly deny permissions. Explicit denial also supersedes any other permissions. If you want to prevent users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:

For more information about permissions, see Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

For more examples of transitioning objects to storage classes such as STANDARD_IA or ONEZONE_IA, see Examples of Lifecycle Configuration.

Related Resources

", + "documentation":"

For an updated version of this API, see PutBucketLifecycleConfiguration. This version has been deprecated. Existing lifecycle configurations will work. For new lifecycle configurations, use the updated API.

Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For information about lifecycle configuration, see Object Lifecycle Management in the Amazon S3 User Guide.

By default, all Amazon S3 resources, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration) are private. Only the resource owner, the AWS account that created the resource, can access it. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, users must get the s3:PutLifecycleConfiguration permission.

You can also explicitly deny permissions. Explicit denial also supersedes any other permissions. If you want to prevent users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:

For more information about permissions, see Managing Access Permissions to your Amazon S3 Resources in the Amazon S3 User Guide.

For more examples of transitioning objects to storage classes such as STANDARD_IA or ONEZONE_IA, see Examples of Lifecycle Configuration.

Related Resources

", "deprecated":true, "httpChecksumRequired":true }, @@ -814,7 +814,7 @@ "requestUri":"/{Bucket}?lifecycle" }, "input":{"shape":"PutBucketLifecycleConfigurationRequest"}, - "documentation":"

Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For information about lifecycle configuration, see Managing Access Permissions to Your Amazon S3 Resources.

Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, or a combination of both. Accordingly, this section describes the latest API. The previous version of the API supported filtering based only on an object key name prefix, which is supported for backward compatibility. For the related API description, see PutBucketLifecycle.

Rules

You specify the lifecycle configuration in your request body. The lifecycle configuration is specified as XML consisting of one or more rules. Each rule consists of the following:

For more information, see Object Lifecycle Management and Lifecycle Configuration Elements.

Permissions

By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the AWS account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must get the s3:PutLifecycleConfiguration permission.

You can also explicitly deny permissions. Explicit deny also supersedes any other permissions. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:

For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.

The following are related to PutBucketLifecycleConfiguration:

", + "documentation":"

Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For information about lifecycle configuration, see Managing Access Permissions to Your Amazon S3 Resources.

Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, or a combination of both. Accordingly, this section describes the latest API. The previous version of the API supported filtering based only on an object key name prefix, which is supported for backward compatibility. For the related API description, see PutBucketLifecycle.

Rules

You specify the lifecycle configuration in your request body. The lifecycle configuration is specified as XML consisting of one or more rules. Each rule consists of the following:

For more information, see Object Lifecycle Management and Lifecycle Configuration Elements.

Permissions

By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the AWS account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must get the s3:PutLifecycleConfiguration permission.

You can also explicitly deny permissions. Explicit deny also supersedes any other permissions. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:

For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.

The following are related to PutBucketLifecycleConfiguration:

", "httpChecksumRequired":true }, "PutBucketLogging":{ @@ -835,7 +835,7 @@ "requestUri":"/{Bucket}?metrics" }, "input":{"shape":"PutBucketMetricsConfigurationRequest"}, - "documentation":"

Sets a metrics configuration (specified by the metrics configuration ID) for the bucket. You can have up to 1,000 metrics configurations per bucket. If you're updating an existing metrics configuration, note that this is a full replacement of the existing metrics configuration. If you don't include the elements you want to keep, they are erased.

To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon CloudWatch.

The following operations are related to PutBucketMetricsConfiguration:

GetBucketLifecycle has the following special error:

" + "documentation":"

Sets a metrics configuration (specified by the metrics configuration ID) for the bucket. You can have up to 1,000 metrics configurations per bucket. If you're updating an existing metrics configuration, note that this is a full replacement of the existing metrics configuration. If you don't include the elements you want to keep, they are erased.

To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon CloudWatch.

The following operations are related to PutBucketMetricsConfiguration:

GetBucketLifecycle has the following special error:

" }, "PutBucketNotification":{ "name":"PutBucketNotification", @@ -856,7 +856,7 @@ "requestUri":"/{Bucket}?notification" }, "input":{"shape":"PutBucketNotificationConfigurationRequest"}, - "documentation":"

Enables notifications of specified events for a bucket. For more information about event notifications, see Configuring Event Notifications.

Using this API, you can replace an existing notification configuration. The configuration is an XML file that defines the event types that you want Amazon S3 to publish and the destination where you want Amazon S3 to publish an event notification when it detects an event of the specified type.

By default, your bucket has no event notifications configured. That is, the notification configuration will be an empty NotificationConfiguration.

<NotificationConfiguration>

</NotificationConfiguration>

This operation replaces the existing notification configuration with the configuration you include in the request body.

After Amazon S3 receives this request, it first verifies that any Amazon Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) destination exists, and that the bucket owner has permission to publish to it by sending a test notification. In the case of AWS Lambda destinations, Amazon S3 verifies that the Lambda function permissions grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For more information, see Configuring Notifications for Amazon S3 Events.

You can disable notifications by adding the empty NotificationConfiguration element.

By default, only the bucket owner can configure notifications on a bucket. However, bucket owners can use a bucket policy to grant permission to other users to set this configuration with s3:PutBucketNotification permission.

The PUT notification is an atomic operation. For example, suppose your notification configuration includes SNS topic, SQS queue, and Lambda function configurations. When you send a PUT request with this configuration, Amazon S3 sends test messages to your SNS topic. If the message fails, the entire PUT operation will fail, and Amazon S3 will not add the configuration to your bucket.

Responses

If the configuration in the request body includes only one TopicConfiguration specifying only the s3:ReducedRedundancyLostObject event type, the response will also include the x-amz-sns-test-message-id header containing the message ID of the test notification sent to the topic.

The following operation is related to PutBucketNotificationConfiguration:

" + "documentation":"

Enables notifications of specified events for a bucket. For more information about event notifications, see Configuring Event Notifications.

Using this API, you can replace an existing notification configuration. The configuration is an XML file that defines the event types that you want Amazon S3 to publish and the destination where you want Amazon S3 to publish an event notification when it detects an event of the specified type.

By default, your bucket has no event notifications configured. That is, the notification configuration will be an empty NotificationConfiguration.

<NotificationConfiguration>

</NotificationConfiguration>

This action replaces the existing notification configuration with the configuration you include in the request body.

After Amazon S3 receives this request, it first verifies that any Amazon Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) destination exists, and that the bucket owner has permission to publish to it by sending a test notification. In the case of AWS Lambda destinations, Amazon S3 verifies that the Lambda function permissions grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For more information, see Configuring Notifications for Amazon S3 Events.

You can disable notifications by adding the empty NotificationConfiguration element.

By default, only the bucket owner can configure notifications on a bucket. However, bucket owners can use a bucket policy to grant permission to other users to set this configuration with s3:PutBucketNotification permission.

The PUT notification is an atomic operation. For example, suppose your notification configuration includes SNS topic, SQS queue, and Lambda function configurations. When you send a PUT request with this configuration, Amazon S3 sends test messages to your SNS topic. If the message fails, the entire PUT action will fail, and Amazon S3 will not add the configuration to your bucket.

Responses

If the configuration in the request body includes only one TopicConfiguration specifying only the s3:ReducedRedundancyLostObject event type, the response will also include the x-amz-sns-test-message-id header containing the message ID of the test notification sent to the topic.

The following action is related to PutBucketNotificationConfiguration:

" }, "PutBucketOwnershipControls":{ "name":"PutBucketOwnershipControls", @@ -886,7 +886,7 @@ "requestUri":"/{Bucket}?replication" }, "input":{"shape":"PutBucketReplicationRequest"}, - "documentation":"

Creates a replication configuration or replaces an existing one. For more information, see Replication in the Amazon S3 Developer Guide.

To perform this operation, the user or role performing the operation must have the iam:PassRole permission.

Specify the replication configuration in the request body. In the replication configuration, you provide the name of the destination bucket or buckets where you want Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to replicate objects on your behalf, and other relevant information.

A replication configuration must include at least one rule, and can contain a maximum of 1,000. Each rule identifies a subset of objects to replicate by filtering the objects in the source bucket. To choose additional subsets of objects to replicate, add a rule for each subset.

To specify a subset of the objects in the source bucket to apply a replication rule to, add the Filter element as a child of the Rule element. You can filter objects based on an object key prefix, one or more object tags, or both. When you add the Filter element in the configuration, you must also add the following elements: DeleteMarkerReplication, Status, and Priority.

If you are using an earlier version of the replication configuration, Amazon S3 handles replication of delete markers differently. For more information, see Backward Compatibility.

For information about enabling versioning on a bucket, see Using Versioning.

By default, a resource owner, in this case the AWS account that created the bucket, can perform this operation. The resource owner can also grant others permissions to perform the operation. For more information about permissions, see Specifying Permissions in a Policy and Managing Access Permissions to Your Amazon S3 Resources.

Handling Replication of Encrypted Objects

By default, Amazon S3 doesn't replicate objects that are stored at rest using server-side encryption with CMKs stored in AWS KMS. To replicate AWS KMS-encrypted objects, add the following: SourceSelectionCriteria, SseKmsEncryptedObjects, Status, EncryptionConfiguration, and ReplicaKmsKeyID. For information about replication configuration, see Replicating Objects Created with SSE Using CMKs stored in AWS KMS.

For information on PutBucketReplication errors, see List of replication-related error codes

The following operations are related to PutBucketReplication:

", + "documentation":"

Creates a replication configuration or replaces an existing one. For more information, see Replication in the Amazon S3 Developer Guide.

To perform this operation, the user or role performing the action must have the iam:PassRole permission.

Specify the replication configuration in the request body. In the replication configuration, you provide the name of the destination bucket or buckets where you want Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to replicate objects on your behalf, and other relevant information.

A replication configuration must include at least one rule, and can contain a maximum of 1,000. Each rule identifies a subset of objects to replicate by filtering the objects in the source bucket. To choose additional subsets of objects to replicate, add a rule for each subset.

To specify a subset of the objects in the source bucket to apply a replication rule to, add the Filter element as a child of the Rule element. You can filter objects based on an object key prefix, one or more object tags, or both. When you add the Filter element in the configuration, you must also add the following elements: DeleteMarkerReplication, Status, and Priority.

If you are using an earlier version of the replication configuration, Amazon S3 handles replication of delete markers differently. For more information, see Backward Compatibility.

For information about enabling versioning on a bucket, see Using Versioning.

By default, a resource owner, in this case the AWS account that created the bucket, can perform this operation. The resource owner can also grant others permissions to perform the operation. For more information about permissions, see Specifying Permissions in a Policy and Managing Access Permissions to Your Amazon S3 Resources.

Handling Replication of Encrypted Objects

By default, Amazon S3 doesn't replicate objects that are stored at rest using server-side encryption with CMKs stored in AWS KMS. To replicate AWS KMS-encrypted objects, add the following: SourceSelectionCriteria, SseKmsEncryptedObjects, Status, EncryptionConfiguration, and ReplicaKmsKeyID. For information about replication configuration, see Replicating Objects Created with SSE Using CMKs stored in AWS KMS.

For information on PutBucketReplication errors, see List of replication-related error codes

The following operations are related to PutBucketReplication:

", "httpChecksumRequired":true }, "PutBucketRequestPayment":{ @@ -908,7 +908,7 @@ }, "input":{"shape":"PutBucketTaggingRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTtagging.html", - "documentation":"

Sets the tags for a bucket.

Use tags to organize your AWS bill to reflect your own cost structure. To do this, sign up to get your AWS account bill with tag key values included. Then, to see the cost of combined resources, organize your billing information according to resources with the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information to see the total cost of that application across several services. For more information, see Cost Allocation and Tagging.

Within a bucket, if you add a tag that has the same key as an existing tag, the new value overwrites the old value. For more information, see Using Cost Allocation in Amazon S3 Bucket Tags.

To use this operation, you must have permissions to perform the s3:PutBucketTagging action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

PutBucketTagging has the following special errors:

The following operations are related to PutBucketTagging:

", + "documentation":"

Sets the tags for a bucket.

Use tags to organize your AWS bill to reflect your own cost structure. To do this, sign up to get your AWS account bill with tag key values included. Then, to see the cost of combined resources, organize your billing information according to resources with the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information to see the total cost of that application across several services. For more information, see Cost Allocation and Tagging.

Within a bucket, if you add a tag that has the same key as an existing tag, the new value overwrites the old value. For more information, see Using Cost Allocation in Amazon S3 Bucket Tags.

To use this operation, you must have permissions to perform the s3:PutBucketTagging action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

PutBucketTagging has the following special errors:

The following operations are related to PutBucketTagging:

", "httpChecksumRequired":true }, "PutBucketVersioning":{ @@ -930,7 +930,7 @@ }, "input":{"shape":"PutBucketWebsiteRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTwebsite.html", - "documentation":"

Sets the configuration of the website that is specified in the website subresource. To configure a bucket as a website, you can add this subresource on the bucket with website configuration information such as the file name of the index document and any redirect rules. For more information, see Hosting Websites on Amazon S3.

This PUT operation requires the S3:PutBucketWebsite permission. By default, only the bucket owner can configure the website attached to a bucket; however, bucket owners can allow other users to set the website configuration by writing a bucket policy that grants them the S3:PutBucketWebsite permission.

To redirect all website requests sent to the bucket's website endpoint, you add a website configuration with the following elements. Because all requests are sent to another website, you don't need to provide index document name for the bucket.

If you want granular control over redirects, you can use the following elements to add routing rules that describe conditions for redirecting requests and information about the redirect destination. In this case, the website configuration must provide an index document for the bucket, because some requests might not be redirected.

Amazon S3 has a limitation of 50 routing rules per website configuration. If you require more than 50 routing rules, you can use object redirect. For more information, see Configuring an Object Redirect in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

Sets the configuration of the website that is specified in the website subresource. To configure a bucket as a website, you can add this subresource on the bucket with website configuration information such as the file name of the index document and any redirect rules. For more information, see Hosting Websites on Amazon S3.

This PUT action requires the S3:PutBucketWebsite permission. By default, only the bucket owner can configure the website attached to a bucket; however, bucket owners can allow other users to set the website configuration by writing a bucket policy that grants them the S3:PutBucketWebsite permission.

To redirect all website requests sent to the bucket's website endpoint, you add a website configuration with the following elements. Because all requests are sent to another website, you don't need to provide index document name for the bucket.

If you want granular control over redirects, you can use the following elements to add routing rules that describe conditions for redirecting requests and information about the redirect destination. In this case, the website configuration must provide an index document for the bucket, because some requests might not be redirected.

Amazon S3 has a limitation of 50 routing rules per website configuration. If you require more than 50 routing rules, you can use object redirect. For more information, see Configuring an Object Redirect in the Amazon S3 User Guide.

", "httpChecksumRequired":true }, "PutObject":{ @@ -942,7 +942,7 @@ "input":{"shape":"PutObjectRequest"}, "output":{"shape":"PutObjectOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectPUT.html", - "documentation":"

Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object to it.

Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket.

Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. Amazon S3 does not provide object locking; if you need this, make sure to build it into your application layer or use versioning instead.

To ensure that data is not corrupted traversing the network, use the Content-MD5 header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, returns an error. Additionally, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to the calculated MD5 value.

The Content-MD5 header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the Amazon Simple Storage Service Developer Guide.

Server-side Encryption

You can optionally request server-side encryption. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts the data when you access it. You have the option to provide your own encryption key or use AWS managed encryption keys (SSE-S3 or SSE-KMS). For more information, see Using Server-Side Encryption.

If you request server-side encryption using AWS Key Management Service (SSE-KMS), you can enable an S3 Bucket Key at the object-level. For more information, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service Developer Guide.

Access Control List (ACL)-Specific Request Headers

You can use headers to grant ACL- based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.

Storage Class Options

By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage Classes in the Amazon S3 Service Developer Guide.

Versioning

If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being stored. Amazon S3 returns this ID in the response. When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all of the objects.

For more information about versioning, see Adding Objects to Versioning Enabled Buckets. For information about returning the versioning state of a bucket, see GetBucketVersioning.

Related Resources

" + "documentation":"

Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object to it.

Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket.

Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. Amazon S3 does not provide object locking; if you need this, make sure to build it into your application layer or use versioning instead.

To ensure that data is not corrupted traversing the network, use the Content-MD5 header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, returns an error. Additionally, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to the calculated MD5 value.

The Content-MD5 header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the Amazon Simple Storage Service Developer Guide.

Server-side Encryption

You can optionally request server-side encryption. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts the data when you access it. You have the option to provide your own encryption key or use AWS managed encryption keys (SSE-S3 or SSE-KMS). For more information, see Using Server-Side Encryption.

If you request server-side encryption using AWS Key Management Service (SSE-KMS), you can enable an S3 Bucket Key at the object-level. For more information, see Amazon S3 Bucket Keys in the Amazon S3 User Guide.

Access Control List (ACL)-Specific Request Headers

You can use headers to grant ACL- based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.

Storage Class Options

By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage Classes in the Amazon S3 Service Developer Guide.

Versioning

If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being stored. Amazon S3 returns this ID in the response. When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all of the objects.

For more information about versioning, see Adding Objects to Versioning Enabled Buckets. For information about returning the versioning state of a bucket, see GetBucketVersioning.

Related Resources

" }, "PutObjectAcl":{ "name":"PutObjectAcl", @@ -956,7 +956,7 @@ {"shape":"NoSuchKey"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectPUTacl.html", - "documentation":"

Uses the acl subresource to set the access control list (ACL) permissions for a new or existing object in an S3 bucket. You must have WRITE_ACP permission to set the ACL of an object. For more information, see What permissions can I grant? in the Amazon Simple Storage Service Developer Guide.

This action is not supported by Amazon S3 on Outposts.

Depending on your application needs, you can choose to set the ACL on an object using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, you can continue to use that approach. For more information, see Access Control List (ACL) Overview in the Amazon S3 Developer Guide.

Access Permissions

You can set access permissions using one of the following methods:

You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

Grantee Values

You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:

Versioning

The ACL of an object is set at the object version level. By default, PUT sets the ACL of the current version of an object. To set the ACL of a different version, use the versionId subresource.

Related Resources

", + "documentation":"

Uses the acl subresource to set the access control list (ACL) permissions for a new or existing object in an S3 bucket. You must have WRITE_ACP permission to set the ACL of an object. For more information, see What permissions can I grant? in the Amazon S3 User Guide.

This action is not supported by Amazon S3 on Outposts.

Depending on your application needs, you can choose to set the ACL on an object using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, you can continue to use that approach. For more information, see Access Control List (ACL) Overview in the Amazon S3 Developer Guide.

Access Permissions

You can set access permissions using one of the following methods:

You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

Grantee Values

You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:

Versioning

The ACL of an object is set at the object version level. By default, PUT sets the ACL of the current version of an object. To set the ACL of a different version, use the versionId subresource.

Related Resources

", "httpChecksumRequired":true }, "PutObjectLegalHold":{ @@ -1000,7 +1000,7 @@ }, "input":{"shape":"PutObjectTaggingRequest"}, "output":{"shape":"PutObjectTaggingOutput"}, - "documentation":"

Sets the supplied tag-set to an object that already exists in a bucket.

A tag is a key-value pair. You can associate tags with an object by sending a PUT request against the tagging subresource that is associated with the object. You can retrieve tags by sending a GET request. For more information, see GetObjectTagging.

For tagging-related restrictions related to characters and encodings, see Tag Restrictions. Note that Amazon S3 limits the maximum number of tags to 10 tags per object.

To use this operation, you must have permission to perform the s3:PutObjectTagging action. By default, the bucket owner has this permission and can grant this permission to others.

To put tags of any other version, use the versionId query parameter. You also need permission for the s3:PutObjectVersionTagging action.

For information about the Amazon S3 object tagging feature, see Object Tagging.

Special Errors

Related Resources

", + "documentation":"

Sets the supplied tag-set to an object that already exists in a bucket.

A tag is a key-value pair. You can associate tags with an object by sending a PUT request against the tagging subresource that is associated with the object. You can retrieve tags by sending a GET request. For more information, see GetObjectTagging.

For tagging-related restrictions related to characters and encodings, see Tag Restrictions. Note that Amazon S3 limits the maximum number of tags to 10 tags per object.

To use this operation, you must have permission to perform the s3:PutObjectTagging action. By default, the bucket owner has this permission and can grant this permission to others.

To put tags of any other version, use the versionId query parameter. You also need permission for the s3:PutObjectVersionTagging action.

For information about the Amazon S3 object tagging feature, see Object Tagging.

Special Errors

Related Resources

", "httpChecksumRequired":true }, "PutPublicAccessBlock":{ @@ -1025,7 +1025,7 @@ {"shape":"ObjectAlreadyInActiveTierError"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectRestore.html", - "documentation":"

Restores an archived copy of an object back into Amazon S3

This action is not supported by Amazon S3 on Outposts.

This action performs the following types of requests:

To use this operation, you must have permissions to perform the s3:RestoreObject action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

Querying Archives with Select Requests

You use a select type of request to perform SQL queries on archived objects. The archived objects that are being queried by the select request must be formatted as uncompressed comma-separated values (CSV) files. You can run queries and custom analytics on your archived data without having to restore your data to a hotter Amazon S3 tier. For an overview about select requests, see Querying Archived Objects in the Amazon Simple Storage Service Developer Guide.

When making a select request, do the following:

For more information about using SQL with S3 Glacier Select restore, see SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon Simple Storage Service Developer Guide.

When making a select request, you can also do the following:

The following are additional important facts about the select feature:

Restoring objects

Objects that you archive to the S3 Glacier or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers are not accessible in real time. For objects in Archive Access or Deep Archive Access tiers you must first initiate a restore request, and then wait until the object is moved into the Frequent Access tier. For objects in S3 Glacier or S3 Glacier Deep Archive storage classes you must first initiate a restore request, and then wait until a temporary copy of the object is available. To access an archived object, you must restore the object for the duration (number of days) that you specify.

To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version.

When restoring an archived object (or using a select request), you can specify one of the following data access tier options in the Tier element of the request body:

For more information about archive retrieval options and provisioned capacity for Expedited data access, see Restoring Archived Objects in the Amazon Simple Storage Service Developer Guide.

You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the Amazon Simple Storage Service Developer Guide.

To get the status of object restoration, you can send a HEAD request. Operations return the x-amz-restore header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon Simple Storage Service Developer Guide.

After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object.

If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon Simple Storage Service Developer Guide.

Responses

A successful operation returns either the 200 OK or 202 Accepted status code.

Special Errors

Related Resources

", + "documentation":"

Restores an archived copy of an object back into Amazon S3

This action is not supported by Amazon S3 on Outposts.

This action performs the following types of requests:

To use this operation, you must have permissions to perform the s3:RestoreObject action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.

Querying Archives with Select Requests

You use a select type of request to perform SQL queries on archived objects. The archived objects that are being queried by the select request must be formatted as uncompressed comma-separated values (CSV) files. You can run queries and custom analytics on your archived data without having to restore your data to a hotter Amazon S3 tier. For an overview about select requests, see Querying Archived Objects in the Amazon S3 User Guide.

When making a select request, do the following:

For more information about using SQL with S3 Glacier Select restore, see SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon S3 User Guide.

When making a select request, you can also do the following:

The following are additional important facts about the select feature:

Restoring objects

Objects that you archive to the S3 Glacier or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers are not accessible in real time. For objects in Archive Access or Deep Archive Access tiers you must first initiate a restore request, and then wait until the object is moved into the Frequent Access tier. For objects in S3 Glacier or S3 Glacier Deep Archive storage classes you must first initiate a restore request, and then wait until a temporary copy of the object is available. To access an archived object, you must restore the object for the duration (number of days) that you specify.

To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version.

When restoring an archived object (or using a select request), you can specify one of the following data access tier options in the Tier element of the request body:

For more information about archive retrieval options and provisioned capacity for Expedited data access, see Restoring Archived Objects in the Amazon S3 User Guide.

You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the Amazon S3 User Guide.

To get the status of object restoration, you can send a HEAD request. Operations return the x-amz-restore header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon S3 User Guide.

After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object.

If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon S3 User Guide.

Responses

A successful action returns either the 200 OK or 202 Accepted status code.

Special Errors

Related Resources

", "alias":"PostObjectRestore" }, "SelectObjectContent":{ @@ -1040,7 +1040,7 @@ "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} }, "output":{"shape":"SelectObjectContentOutput"}, - "documentation":"

This operation filters the contents of an Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response.

This action is not supported by Amazon S3 on Outposts.

For more information about Amazon S3 Select, see Selecting Content from Objects in the Amazon Simple Storage Service Developer Guide.

For more information about using SQL with Amazon S3 Select, see SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon Simple Storage Service Developer Guide.

Permissions

You must have s3:GetObject permission for this operation. Amazon S3 Select does not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy in the Amazon Simple Storage Service Developer Guide.

Object Data Formats

You can use Amazon S3 Select to query objects that have the following format properties:

Working with the Response Body

Given the response size is unknown, Amazon S3 Select streams the response as a series of messages and includes a Transfer-Encoding header with chunked as its value in the response. For more information, see Appendix: SelectObjectContent Response .

GetObject Support

The SelectObjectContent operation does not support the following GetObject functionality. For more information, see GetObject.

Special Errors

For a list of special errors for this operation, see List of SELECT Object Content Error Codes

Related Resources

" + "documentation":"

This action filters the contents of an Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response.

This action is not supported by Amazon S3 on Outposts.

For more information about Amazon S3 Select, see Selecting Content from Objects in the Amazon S3 User Guide.

For more information about using SQL with Amazon S3 Select, see SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon S3 User Guide.

Permissions

You must have s3:GetObject permission for this operation. Amazon S3 Select does not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy in the Amazon S3 User Guide.

Object Data Formats

You can use Amazon S3 Select to query objects that have the following format properties:

Working with the Response Body

Given the response size is unknown, Amazon S3 Select streams the response as a series of messages and includes a Transfer-Encoding header with chunked as its value in the response. For more information, see Appendix: SelectObjectContent Response .

GetObject Support

The SelectObjectContent action does not support the following GetObject functionality. For more information, see GetObject.

Special Errors

For a list of special errors for this operation, see List of SELECT Object Content Error Codes

Related Resources

" }, "UploadPart":{ "name":"UploadPart", @@ -1051,7 +1051,7 @@ "input":{"shape":"UploadPartRequest"}, "output":{"shape":"UploadPartOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadUploadPart.html", - "documentation":"

Uploads a part in a multipart upload.

In this operation, you provide part data in your request. However, you have an option to specify your existing Amazon S3 object as a data source for the part you are uploading. To upload a part from an existing object, you use the UploadPartCopy operation.

You must initiate a multipart upload (see CreateMultipartUpload) before you can upload any part. In response to your initiate request, Amazon S3 returns an upload ID, a unique identifier, that you must include in your upload part request.

Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely identifies a part and also defines its position within the object being created. If you upload a new part using the same part number that was used with a previous part, the previously uploaded part is overwritten. Each part must be at least 5 MB in size, except the last part. There is no size limit on the last part of your multipart upload.

To ensure that data is not corrupted when traversing the network, specify the Content-MD5 header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error.

If the upload request is signed with Signature Version 4, then AWS S3 uses the x-amz-content-sha256 header as a checksum instead of Content-MD5. For more information see Authenticating Requests: Using the Authorization Header (AWS Signature Version 4).

Note: After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.

For more information on multipart uploads, go to Multipart Upload Overview in the Amazon Simple Storage Service Developer Guide .

For information on the permissions required to use the multipart upload API, go to Multipart Upload API and Permissions in the Amazon Simple Storage Service Developer Guide.

You can optionally request server-side encryption where Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it for you when you access it. You have the option of providing your own encryption key, or you can use the AWS managed encryption keys. If you choose to provide your own encryption key, the request headers you provide in the request must match the headers you used in the request to initiate the upload by using CreateMultipartUpload. For more information, go to Using Server-Side Encryption in the Amazon Simple Storage Service Developer Guide.

Server-side encryption is supported by the S3 Multipart Upload actions. Unless you are using a customer-provided encryption key, you don't need to specify the encryption parameters in each UploadPart request. Instead, you only need to specify the server-side encryption parameters in the initial Initiate Multipart request. For more information, see CreateMultipartUpload.

If you requested server-side encryption using a customer-provided encryption key in your initiate multipart upload request, you must provide identical encryption information in each part upload using the following headers.

Special Errors

Related Resources

" + "documentation":"

Uploads a part in a multipart upload.

In this operation, you provide part data in your request. However, you have an option to specify your existing Amazon S3 object as a data source for the part you are uploading. To upload a part from an existing object, you use the UploadPartCopy operation.

You must initiate a multipart upload (see CreateMultipartUpload) before you can upload any part. In response to your initiate request, Amazon S3 returns an upload ID, a unique identifier, that you must include in your upload part request.

Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely identifies a part and also defines its position within the object being created. If you upload a new part using the same part number that was used with a previous part, the previously uploaded part is overwritten. Each part must be at least 5 MB in size, except the last part. There is no size limit on the last part of your multipart upload.

To ensure that data is not corrupted when traversing the network, specify the Content-MD5 header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error.

If the upload request is signed with Signature Version 4, then AWS S3 uses the x-amz-content-sha256 header as a checksum instead of Content-MD5. For more information see Authenticating Requests: Using the Authorization Header (AWS Signature Version 4).

Note: After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.

For more information on multipart uploads, go to Multipart Upload Overview in the Amazon S3 User Guide .

For information on the permissions required to use the multipart upload API, go to Multipart Upload and Permissions in the Amazon S3 User Guide.

You can optionally request server-side encryption where Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it for you when you access it. You have the option of providing your own encryption key, or you can use the AWS managed encryption keys. If you choose to provide your own encryption key, the request headers you provide in the request must match the headers you used in the request to initiate the upload by using CreateMultipartUpload. For more information, go to Using Server-Side Encryption in the Amazon S3 User Guide.

Server-side encryption is supported by the S3 Multipart Upload actions. Unless you are using a customer-provided encryption key, you don't need to specify the encryption parameters in each UploadPart request. Instead, you only need to specify the server-side encryption parameters in the initial Initiate Multipart request. For more information, see CreateMultipartUpload.

If you requested server-side encryption using a customer-provided encryption key in your initiate multipart upload request, you must provide identical encryption information in each part upload using the following headers.

Special Errors

Related Resources

" }, "UploadPartCopy":{ "name":"UploadPartCopy", @@ -1062,7 +1062,20 @@ "input":{"shape":"UploadPartCopyRequest"}, "output":{"shape":"UploadPartCopyOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html", - "documentation":"

Uploads a part by copying data from an existing object as data source. You specify the data source by adding the request header x-amz-copy-source in your request and a byte range by adding the request header x-amz-copy-source-range in your request.

The minimum allowable part size for a multipart upload is 5 MB. For more information about multipart upload limits, go to Quick Facts in the Amazon Simple Storage Service Developer Guide.

Instead of using an existing object as part data, you might use the UploadPart operation and provide data in your request.

You must initiate a multipart upload before you can upload any part. In response to your initiate request. Amazon S3 returns a unique identifier, the upload ID, that you must include in your upload part request.

For more information about using the UploadPartCopy operation, see the following:

Note the following additional considerations about the request headers x-amz-copy-source-if-match, x-amz-copy-source-if-none-match, x-amz-copy-source-if-unmodified-since, and x-amz-copy-source-if-modified-since:

Versioning

If your bucket has versioning enabled, you could have multiple versions of the same object. By default, x-amz-copy-source identifies the current version of the object to copy. If the current version is a delete marker and you don't specify a versionId in the x-amz-copy-source, Amazon S3 returns a 404 error, because the object does not exist. If you specify versionId in the x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns an HTTP 400 error, because you are not allowed to specify a delete marker as a version for the x-amz-copy-source.

You can optionally specify a specific version of the source object to copy by adding the versionId subresource as shown in the following example:

x-amz-copy-source: /bucket/object?versionId=version id

Special Errors

Related Resources

" + "documentation":"

Uploads a part by copying data from an existing object as data source. You specify the data source by adding the request header x-amz-copy-source in your request and a byte range by adding the request header x-amz-copy-source-range in your request.

The minimum allowable part size for a multipart upload is 5 MB. For more information about multipart upload limits, go to Quick Facts in the Amazon S3 User Guide.

Instead of using an existing object as part data, you might use the UploadPart action and provide data in your request.

You must initiate a multipart upload before you can upload any part. In response to your initiate request. Amazon S3 returns a unique identifier, the upload ID, that you must include in your upload part request.

For more information about using the UploadPartCopy operation, see the following:

Note the following additional considerations about the request headers x-amz-copy-source-if-match, x-amz-copy-source-if-none-match, x-amz-copy-source-if-unmodified-since, and x-amz-copy-source-if-modified-since:

Versioning

If your bucket has versioning enabled, you could have multiple versions of the same object. By default, x-amz-copy-source identifies the current version of the object to copy. If the current version is a delete marker and you don't specify a versionId in the x-amz-copy-source, Amazon S3 returns a 404 error, because the object does not exist. If you specify versionId in the x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns an HTTP 400 error, because you are not allowed to specify a delete marker as a version for the x-amz-copy-source.

You can optionally specify a specific version of the source object to copy by adding the versionId subresource as shown in the following example:

x-amz-copy-source: /bucket/object?versionId=version id

Special Errors

Related Resources

" + }, + "WriteGetObjectResponse":{ + "name":"WriteGetObjectResponse", + "http":{ + "method":"POST", + "requestUri":"/WriteGetObjectResponse" + }, + "input":{"shape":"WriteGetObjectResponseRequest"}, + "documentation":"

Passes transformed objects to a GetObject operation when using Object Lambda Access Points. For information about Object Lambda Access Points, see Transforming objects with Object Lambda Access Points in the Amazon S3 User Guide.

This operation supports metadata that can be returned by GetObject, in addition to RequestRoute, RequestToken, StatusCode, ErrorCode, and ErrorMessage. The GetObject response metadata is supported so that the WriteGetObjectResponse caller, typically an AWS Lambda function, can provide the same metadata when it internally invokes GetObject. When WriteGetObjectResponse is called by a customer-owned Lambda function, the metadata returned to the end user GetObject call might differ from what Amazon S3 would normally return.

", + "authtype":"v4-unsigned-body", + "endpoint":{ + "hostPrefix":"{RequestRoute}." + } } }, "shapes":{ @@ -1097,7 +1110,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket name to which the upload was taking place.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket name to which the upload was taking place.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -1120,7 +1133,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -1435,6 +1448,10 @@ "AllowedOrigins" ], "members":{ + "ID":{ + "shape":"ID", + "documentation":"

Unique identifier for the rule. The value cannot be longer than 255 characters.

" + }, "AllowedHeaders":{ "shape":"AllowedHeaders", "documentation":"

Headers that are specified in the Access-Control-Request-Headers header. These headers are allowed in a preflight OPTIONS request. In response to any preflight OPTIONS request, Amazon S3 returns any requested headers that are allowed.

", @@ -1580,7 +1597,7 @@ }, "Bucket":{ "shape":"BucketName", - "documentation":"

The name of the bucket that contains the newly created object.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

" + "documentation":"

The name of the bucket that contains the newly created object.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

" }, "Key":{ "shape":"ObjectKey", @@ -1666,7 +1683,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -1720,7 +1737,7 @@ }, "KeyPrefixEquals":{ "shape":"KeyPrefixEquals", - "documentation":"

The object key name prefix when the redirect is applied. For example, to redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. To redirect request for all pages with the prefix docs/, the key prefix will be /docs, which identifies all objects in the docs/ folder. Required when the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals is not specified. If both conditions are specified, both must be true for the redirect to be applied.

" + "documentation":"

The object key name prefix when the redirect is applied. For example, to redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. To redirect request for all pages with the prefix docs/, the key prefix will be /docs, which identifies all objects in the docs/ folder. Required when the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals is not specified. If both conditions are specified, both must be true for the redirect to be applied.

Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.

" } }, "documentation":"

A container for describing a condition that must be met for the specified redirect to apply. For example, 1. If request is for pages in the /docs folder, redirect to the /documents folder. 2. If request results in HTTP error 4xx, redirect request to another host where you might process the error.

" @@ -1825,7 +1842,7 @@ }, "Bucket":{ "shape":"BucketName", - "documentation":"

The name of the destination bucket.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The name of the destination bucket.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -1861,7 +1878,7 @@ }, "CopySource":{ "shape":"CopySource", - "documentation":"

Specifies the source object for the copy operation. You specify the value in one of two formats, depending on whether you want to access the source object through an access point:

To copy a specific version of an object, append ?versionId=<version-id> to the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). If you don't specify a version ID, Amazon S3 copies the latest version of the source object.

", + "documentation":"

Specifies the source object for the copy operation. You specify the value in one of two formats, depending on whether you want to access the source object through an access point:

To copy a specific version of an object, append ?versionId=<version-id> to the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). If you don't specify a version ID, Amazon S3 copies the latest version of the source object.

", "location":"header", "locationName":"x-amz-copy-source" }, @@ -1993,7 +2010,7 @@ }, "BucketKeyEnabled":{ "shape":"BucketKeyEnabled", - "documentation":"

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

Specifying this header with a COPY operation doesn’t affect bucket-level settings for S3 Bucket Key.

", + "documentation":"

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

Specifying this header with a COPY action doesn’t affect bucket-level settings for S3 Bucket Key.

", "location":"header", "locationName":"x-amz-server-side-encryption-bucket-key-enabled" }, @@ -2046,13 +2063,13 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected destination bucket owner. If the destination bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected destination bucket owner. If the destination bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" }, "ExpectedSourceBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected source bucket owner. If the source bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected source bucket owner. If the source bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-source-expected-bucket-owner" } @@ -2063,11 +2080,11 @@ "members":{ "ETag":{ "shape":"ETag", - "documentation":"

Returns the ETag of the new object. The ETag reflects only changes to the contents of an object, not its metadata. The source and destination ETag is identical for a successfully copied object.

" + "documentation":"

Returns the ETag of the new object. The ETag reflects only changes to the contents of an object, not its metadata. The source and destination ETag is identical for a successfully copied non-multipart object.

" }, "LastModified":{ "shape":"LastModified", - "documentation":"

Returns the date that the object was last modified.

" + "documentation":"

Creation date of the object.

" } }, "documentation":"

Container for all response elements.

" @@ -2201,7 +2218,7 @@ }, "Bucket":{ "shape":"BucketName", - "documentation":"

The name of the bucket to which the multipart upload was initiated.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The name of the bucket to which the multipart upload was initiated.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "locationName":"Bucket" }, "Key":{ @@ -2270,7 +2287,7 @@ }, "Bucket":{ "shape":"BucketName", - "documentation":"

The name of the bucket to which to initiate the upload

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The name of the bucket to which to initiate the upload

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -2384,7 +2401,7 @@ }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

Specifies the ID of the symmetric customer managed AWS KMS CMK to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. For information about configuring using any of the officially supported AWS SDKs and AWS CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 Developer Guide.

", + "documentation":"

Specifies the ID of the symmetric customer managed AWS KMS CMK to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. For information about configuring using any of the officially supported AWS SDKs and AWS CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 Developer Guide.

", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -2396,7 +2413,7 @@ }, "BucketKeyEnabled":{ "shape":"BucketKeyEnabled", - "documentation":"

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

Specifying this header with an object operation doesn’t affect bucket-level settings for S3 Bucket Key.

", + "documentation":"

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

Specifying this header with an object action doesn’t affect bucket-level settings for S3 Bucket Key.

", "location":"header", "locationName":"x-amz-server-side-encryption-bucket-key-enabled" }, @@ -2431,7 +2448,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -2499,7 +2516,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -2517,7 +2534,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -2535,7 +2552,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -2583,7 +2600,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -2601,7 +2618,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -2628,7 +2645,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -2646,7 +2663,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -2664,7 +2681,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -2682,7 +2699,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -2700,7 +2717,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -2718,7 +2735,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -2736,7 +2753,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -2823,7 +2840,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket name of the bucket containing the object.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket name of the bucket containing the object.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -2858,7 +2875,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -2884,13 +2901,13 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket name containing the objects from which to remove the tags.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket name containing the objects from which to remove the tags.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, "Key":{ "shape":"ObjectKey", - "documentation":"

Name of the object key.

", + "documentation":"

The key that identifies the object in the bucket from which to remove all tags.

", "location":"uri", "locationName":"Key" }, @@ -2902,7 +2919,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -2922,7 +2939,7 @@ }, "Errors":{ "shape":"Errors", - "documentation":"

Container for a failed delete operation that describes the object that Amazon S3 attempted to delete and the error it encountered.

", + "documentation":"

Container for a failed delete action that describes the object that Amazon S3 attempted to delete and the error it encountered.

", "locationName":"Error" } } @@ -2936,7 +2953,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket name containing the objects to delete.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket name containing the objects to delete.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -2965,7 +2982,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -2984,7 +3001,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -3113,7 +3130,7 @@ }, "Code":{ "shape":"Code", - "documentation":"

The error code is a string that uniquely identifies an error condition. It is meant to be read and understood by programs that detect and handle errors by type.

Amazon S3 error codes

" + "documentation":"

The error code is a string that uniquely identifies an error condition. It is meant to be read and understood by programs that detect and handle errors by type.

Amazon S3 error codes

" }, "Message":{ "shape":"Message", @@ -3122,17 +3139,19 @@ }, "documentation":"

Container for all error elements.

" }, + "ErrorCode":{"type":"string"}, "ErrorDocument":{ "type":"structure", "required":["Key"], "members":{ "Key":{ "shape":"ObjectKey", - "documentation":"

The object key name to use when a 4XX class error occurs.

" + "documentation":"

The object key name to use when a 4XX class error occurs.

Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.

" } }, "documentation":"

The error information.

" }, + "ErrorMessage":{"type":"string"}, "Errors":{ "type":"list", "member":{"shape":"Error"}, @@ -3175,7 +3194,7 @@ "documentation":"

" } }, - "documentation":"

Optional configuration to replicate existing source bucket objects. For more information, see Replicating Existing Objects in the Amazon S3 Developer Guide.

" + "documentation":"

Optional configuration to replicate existing source bucket objects. For more information, see Replicating Existing Objects in the Amazon S3 Developer Guide.

" }, "ExistingObjectReplicationStatus":{ "type":"string", @@ -3264,7 +3283,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -3296,7 +3315,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -3333,7 +3352,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -3361,7 +3380,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -3386,7 +3405,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -3454,7 +3473,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -3482,7 +3501,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -3510,7 +3529,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -3537,7 +3556,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -3561,7 +3580,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -3598,7 +3617,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -3616,7 +3635,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -3644,7 +3663,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -3672,7 +3691,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -3700,7 +3719,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -3725,7 +3744,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -3752,7 +3771,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -3780,7 +3799,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -3812,7 +3831,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -3851,7 +3870,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -3885,7 +3904,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket name that contains the object for which to get the ACL information.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket name that contains the object for which to get the ACL information.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -3908,7 +3927,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -3933,7 +3952,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket name containing the object whose Legal Hold status you want to retrieve.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket name containing the object whose Legal Hold status you want to retrieve.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -3956,7 +3975,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -3978,13 +3997,13 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket whose Object Lock configuration you want to retrieve.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket whose Object Lock configuration you want to retrieve.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -4018,13 +4037,13 @@ }, "Restore":{ "shape":"Restore", - "documentation":"

Provides information about object restoration operation and expiration time of the restored object copy.

", + "documentation":"

Provides information about object restoration action and expiration time of the restored object copy.

", "location":"header", "locationName":"x-amz-restore" }, "LastModified":{ "shape":"LastModified", - "documentation":"

Last modified date of the object

", + "documentation":"

Creation date of the object.

", "location":"header", "locationName":"Last-Modified" }, @@ -4195,7 +4214,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket name containing the object.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket name containing the object.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -4279,13 +4298,13 @@ }, "SSECustomerAlgorithm":{ "shape":"SSECustomerAlgorithm", - "documentation":"

Specifies the algorithm to use to when encrypting the object (for example, AES256).

", + "documentation":"

Specifies the algorithm to use to when decrypting the object (for example, AES256).

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-algorithm" }, "SSECustomerKey":{ "shape":"SSECustomerKey", - "documentation":"

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm header.

", + "documentation":"

Specifies the customer-provided encryption key for Amazon S3 used to encrypt the data. This value is used to decrypt the object when recovering it and must match the one used when storing the data. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm header.

", "location":"header", "locationName":"x-amz-server-side-encryption-customer-key" }, @@ -4308,12 +4327,13 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } } }, + "GetObjectResponseStatusCode":{"type":"integer"}, "GetObjectRetentionOutput":{ "type":"structure", "members":{ @@ -4333,7 +4353,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket name containing the object whose retention settings you want to retrieve.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket name containing the object whose retention settings you want to retrieve.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -4356,7 +4376,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -4387,7 +4407,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket name containing the object for which to get the tagging information.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket name containing the object for which to get the tagging information.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -4405,9 +4425,14 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" } } }, @@ -4453,7 +4478,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -4481,7 +4506,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -4563,13 +4588,13 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket name.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket name.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -4610,7 +4635,7 @@ }, "LastModified":{ "shape":"LastModified", - "documentation":"

Last modified date of the object

", + "documentation":"

Creation date of the object.

", "location":"header", "locationName":"Last-Modified" }, @@ -4768,7 +4793,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The name of the bucket containing the object.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The name of the bucket containing the object.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -4845,7 +4870,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -4865,7 +4890,7 @@ "members":{ "Suffix":{ "shape":"Suffix", - "documentation":"

A suffix that is appended to a request that is for a directory on the website endpoint (for example,if the suffix is index.html and you make a request to samplebucket/images/ the data that is returned will be for the object with the key name images/index.html) The suffix must not be empty and must not include a slash character.

" + "documentation":"

A suffix that is appended to a request that is for a directory on the website endpoint (for example,if the suffix is index.html and you make a request to samplebucket/images/ the data that is returned will be for the object with the key name images/index.html) The suffix must not be empty and must not include a slash character.

Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.

" } }, "documentation":"

Container for the Suffix element.

" @@ -4969,7 +4994,7 @@ "members":{ "Prefix":{ "shape":"Prefix", - "documentation":"

An object key name prefix that identifies the subset of objects to which the rule applies.

" + "documentation":"

An object key name prefix that identifies the subset of objects to which the rule applies.

Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.

" }, "Tag":{"shape":"Tag"}, "And":{ @@ -5275,7 +5300,7 @@ }, "Prefix":{ "shape":"Prefix", - "documentation":"

Prefix identifying one or more objects to which the rule applies. This is No longer used; use Filter instead.

", + "documentation":"

Prefix identifying one or more objects to which the rule applies. This is No longer used; use Filter instead.

Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.

", "deprecated":true }, "Filter":{"shape":"LifecycleRuleFilter"}, @@ -5319,7 +5344,7 @@ "members":{ "Prefix":{ "shape":"Prefix", - "documentation":"

Prefix identifying one or more objects to which the rule applies.

" + "documentation":"

Prefix identifying one or more objects to which the rule applies.

Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.

" }, "Tag":{ "shape":"Tag", @@ -5374,7 +5399,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -5460,7 +5485,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -5506,7 +5531,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -5585,7 +5610,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The name of the bucket to which the multipart upload was initiated.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The name of the bucket to which the multipart upload was initiated.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -5626,7 +5651,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -5720,7 +5745,7 @@ }, "MaxKeys":{ "shape":"MaxKeys", - "documentation":"

Sets the maximum number of keys returned in the response. By default the API returns up to 1,000 key names. The response might contain fewer keys but will never contain more. If additional keys satisfy the search criteria, but were not returned because max-keys was exceeded, the response contains <isTruncated>true</isTruncated>. To return the additional keys, see key-marker and version-id-marker.

", + "documentation":"

Sets the maximum number of keys returned in the response. By default the action returns up to 1,000 key names. The response might contain fewer keys but will never contain more. If additional keys satisfy the search criteria, but were not returned because max-keys was exceeded, the response contains <isTruncated>true</isTruncated>. To return the additional keys, see key-marker and version-id-marker.

", "location":"querystring", "locationName":"max-keys" }, @@ -5738,7 +5763,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -5781,7 +5806,7 @@ }, "CommonPrefixes":{ "shape":"CommonPrefixList", - "documentation":"

All of the keys rolled up in a common prefix count as a single return when calculating the number of returns.

A response can contain CommonPrefixes only if you specify a delimiter.

CommonPrefixes contains all (if there are any) keys between Prefix and the next occurrence of the string specified by the delimiter.

CommonPrefixes lists keys that act like subdirectories in the directory specified by Prefix.

For example, if the prefix is notes/ and the delimiter is a slash (/) as in notes/summer/july, the common prefix is notes/summer/. All of the keys that roll up into a common prefix count as a single return when calculating the number of returns.

" + "documentation":"

All of the keys (up to 1,000) rolled up in a common prefix count as a single return when calculating the number of returns.

A response can contain CommonPrefixes only if you specify a delimiter.

CommonPrefixes contains all (if there are any) keys between Prefix and the next occurrence of the string specified by the delimiter.

CommonPrefixes lists keys that act like subdirectories in the directory specified by Prefix.

For example, if the prefix is notes/ and the delimiter is a slash (/) as in notes/summer/july, the common prefix is notes/summer/. All of the keys that roll up into a common prefix count as a single return when calculating the number of returns.

" }, "EncodingType":{ "shape":"EncodingType", @@ -5795,7 +5820,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The name of the bucket containing the objects.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The name of the bucket containing the objects.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -5818,7 +5843,7 @@ }, "MaxKeys":{ "shape":"MaxKeys", - "documentation":"

Sets the maximum number of keys returned in the response. By default the API returns up to 1,000 key names. The response might contain fewer keys but will never contain more.

", + "documentation":"

Sets the maximum number of keys returned in the response. By default the action returns up to 1,000 key names. The response might contain fewer keys but will never contain more.

", "location":"querystring", "locationName":"max-keys" }, @@ -5836,7 +5861,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -5855,7 +5880,7 @@ }, "Name":{ "shape":"BucketName", - "documentation":"

The bucket name.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

" + "documentation":"

The bucket name.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

" }, "Prefix":{ "shape":"Prefix", @@ -5867,11 +5892,11 @@ }, "MaxKeys":{ "shape":"MaxKeys", - "documentation":"

Sets the maximum number of keys returned in the response. By default the API returns up to 1,000 key names. The response might contain fewer keys but will never contain more.

" + "documentation":"

Sets the maximum number of keys returned in the response. By default the action returns up to 1,000 key names. The response might contain fewer keys but will never contain more.

" }, "CommonPrefixes":{ "shape":"CommonPrefixList", - "documentation":"

All of the keys rolled up into a common prefix count as a single return when calculating the number of returns.

A response can contain CommonPrefixes only if you specify a delimiter.

CommonPrefixes contains all (if there are any) keys between Prefix and the next occurrence of the string specified by a delimiter.

CommonPrefixes lists keys that act like subdirectories in the directory specified by Prefix.

For example, if the prefix is notes/ and the delimiter is a slash (/) as in notes/summer/july, the common prefix is notes/summer/. All of the keys that roll up into a common prefix count as a single return when calculating the number of returns.

" + "documentation":"

All of the keys (up to 1,000) rolled up into a common prefix count as a single return when calculating the number of returns.

A response can contain CommonPrefixes only if you specify a delimiter.

CommonPrefixes contains all (if there are any) keys between Prefix and the next occurrence of the string specified by a delimiter.

CommonPrefixes lists keys that act like subdirectories in the directory specified by Prefix.

For example, if the prefix is notes/ and the delimiter is a slash (/) as in notes/summer/july, the common prefix is notes/summer/. All of the keys that roll up into a common prefix count as a single return when calculating the number of returns.

" }, "EncodingType":{ "shape":"EncodingType", @@ -5879,7 +5904,7 @@ }, "KeyCount":{ "shape":"KeyCount", - "documentation":"

KeyCount is the number of keys returned with this request. KeyCount will always be less than equals to MaxKeys field. Say you ask for 50 keys, your result will include less than equals 50 keys

" + "documentation":"

KeyCount is the number of keys returned with this request. KeyCount will always be less than or equals to MaxKeys field. Say you ask for 50 keys, your result will include less than equals 50 keys

" }, "ContinuationToken":{ "shape":"Token", @@ -5901,7 +5926,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

Bucket name to list.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

Bucket name to list.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -5919,7 +5944,7 @@ }, "MaxKeys":{ "shape":"MaxKeys", - "documentation":"

Sets the maximum number of keys returned in the response. By default the API returns up to 1,000 key names. The response might contain fewer keys but will never contain more.

", + "documentation":"

Sets the maximum number of keys returned in the response. By default the action returns up to 1,000 key names. The response might contain fewer keys but will never contain more.

", "location":"querystring", "locationName":"max-keys" }, @@ -5955,7 +5980,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -6038,7 +6063,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The name of the bucket to which the parts are being uploaded.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The name of the bucket to which the parts are being uploaded.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -6073,7 +6098,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -6384,7 +6409,7 @@ }, "LastModified":{ "shape":"LastModified", - "documentation":"

The date the Object was Last Modified

" + "documentation":"

Creation date of the object.

" }, "ETag":{ "shape":"ETag", @@ -6409,7 +6434,7 @@ "type":"structure", "members":{ }, - "documentation":"

This operation is not allowed against this storage tier.

", + "documentation":"

This action is not allowed against this storage tier.

", "exception":true }, "ObjectCannedACL":{ @@ -6430,7 +6455,7 @@ "members":{ "Key":{ "shape":"ObjectKey", - "documentation":"

Key name of the object to delete.

" + "documentation":"

Key name of the object.

Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.

" }, "VersionId":{ "shape":"ObjectVersionId", @@ -6536,7 +6561,7 @@ "type":"structure", "members":{ }, - "documentation":"

The source object of the COPY operation is not in the active tier and is only stored in Amazon S3 Glacier.

", + "documentation":"

The source object of the COPY action is not in the active tier and is only stored in Amazon S3 Glacier.

", "exception":true }, "ObjectOwnership":{ @@ -6803,7 +6828,7 @@ "locationName":"RestrictPublicBuckets" } }, - "documentation":"

The PublicAccessBlock configuration that you want to apply to this Amazon S3 bucket. You can enable the configuration options in any combination. For more information about when Amazon S3 considers a bucket or object public, see The Meaning of \"Public\" in the Amazon Simple Storage Service Developer Guide.

" + "documentation":"

The PublicAccessBlock configuration that you want to apply to this Amazon S3 bucket. You can enable the configuration options in any combination. For more information about when Amazon S3 considers a bucket or object public, see The Meaning of \"Public\" in the Amazon S3 User Guide.

" }, "PutBucketAccelerateConfigurationRequest":{ "type":"structure", @@ -6826,7 +6851,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -6893,7 +6918,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -6928,7 +6953,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -6950,7 +6975,7 @@ }, "CORSConfiguration":{ "shape":"CORSConfiguration", - "documentation":"

Describes the cross-origin access configuration for objects in an Amazon S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

Describes the cross-origin access configuration for objects in an Amazon S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing in the Amazon S3 User Guide.

", "locationName":"CORSConfiguration", "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} }, @@ -6962,7 +6987,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -6995,7 +7020,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -7059,7 +7084,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -7084,7 +7109,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -7115,7 +7140,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -7149,7 +7174,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -7184,7 +7209,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -7211,7 +7236,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -7245,7 +7270,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -7273,7 +7298,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" }, @@ -7317,7 +7342,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -7356,7 +7381,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -7378,7 +7403,7 @@ }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

>The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.

For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

", + "documentation":"

The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.

For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

", "location":"header", "locationName":"Content-MD5" }, @@ -7390,7 +7415,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -7424,7 +7449,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -7464,7 +7489,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -7498,7 +7523,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -7536,7 +7561,7 @@ }, "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket name that contains the object to which you want to attach the ACL.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket name that contains the object to which you want to attach the ACL.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -7578,7 +7603,7 @@ }, "Key":{ "shape":"ObjectKey", - "documentation":"

Key for which the PUT operation was initiated.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

Key for which the PUT action was initiated.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Key" }, @@ -7595,7 +7620,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -7621,7 +7646,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket name containing the object that you want to place a Legal Hold on.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket name containing the object that you want to place a Legal Hold on.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -7656,7 +7681,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -7708,7 +7733,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -7799,7 +7824,7 @@ }, "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket name to which the PUT operation was initiated.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket name to which the PUT action was initiated.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -7877,7 +7902,7 @@ }, "Key":{ "shape":"ObjectKey", - "documentation":"

Object key for which the PUT operation was initiated.

", + "documentation":"

Object key for which the PUT action was initiated.

", "location":"uri", "locationName":"Key" }, @@ -7937,7 +7962,7 @@ }, "BucketKeyEnabled":{ "shape":"BucketKeyEnabled", - "documentation":"

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

Specifying this header with a PUT operation doesn’t affect bucket-level settings for S3 Bucket Key.

", + "documentation":"

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

Specifying this header with a PUT action doesn’t affect bucket-level settings for S3 Bucket Key.

", "location":"header", "locationName":"x-amz-server-side-encryption-bucket-key-enabled" }, @@ -7972,7 +7997,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -7998,7 +8023,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket name that contains the object you want to apply this Object Retention configuration to.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket name that contains the object you want to apply this Object Retention configuration to.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -8027,7 +8052,7 @@ }, "BypassGovernanceRetention":{ "shape":"BypassGovernanceRetention", - "documentation":"

Indicates whether this operation should bypass Governance-mode restrictions.

", + "documentation":"

Indicates whether this action should bypass Governance-mode restrictions.

", "location":"header", "locationName":"x-amz-bypass-governance-retention" }, @@ -8039,7 +8064,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -8067,7 +8092,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket name containing the object.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket name containing the object.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -8097,9 +8122,14 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" } }, "payload":"Tagging" @@ -8131,7 +8161,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -8171,7 +8201,7 @@ }, "Events":{ "shape":"EventList", - "documentation":"

A collection of bucket events for which to send notifications

", + "documentation":"

A collection of bucket events for which to send notifications.

", "locationName":"Event" }, "Queue":{ @@ -8227,11 +8257,11 @@ }, "ReplaceKeyPrefixWith":{ "shape":"ReplaceKeyPrefixWith", - "documentation":"

The object key prefix to use in the redirect request. For example, to redirect requests for all pages with prefix docs/ (objects in the docs/ folder) to documents/, you can set a condition block with KeyPrefixEquals set to docs/ and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required if one of the siblings is present. Can be present only if ReplaceKeyWith is not provided.

" + "documentation":"

The object key prefix to use in the redirect request. For example, to redirect requests for all pages with prefix docs/ (objects in the docs/ folder) to documents/, you can set a condition block with KeyPrefixEquals set to docs/ and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required if one of the siblings is present. Can be present only if ReplaceKeyWith is not provided.

Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.

" }, "ReplaceKeyWith":{ "shape":"ReplaceKeyWith", - "documentation":"

The specific object key to use in the redirect request. For example, redirect request to error.html. Not required if one of the siblings is present. Can be present only if ReplaceKeyPrefixWith is not provided.

" + "documentation":"

The specific object key to use in the redirect request. For example, redirect request to error.html. Not required if one of the siblings is present. Can be present only if ReplaceKeyPrefixWith is not provided.

Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.

" } }, "documentation":"

Specifies how requests are redirected. In the event of an error, you can specify a different error code to return.

" @@ -8308,7 +8338,7 @@ }, "Prefix":{ "shape":"Prefix", - "documentation":"

An object key name prefix that identifies the object or objects to which the rule applies. The maximum prefix length is 1,024 characters. To include all objects in a bucket, specify an empty string.

", + "documentation":"

An object key name prefix that identifies the object or objects to which the rule applies. The maximum prefix length is 1,024 characters. To include all objects in a bucket, specify an empty string.

Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.

", "deprecated":true }, "Filter":{"shape":"ReplicationRuleFilter"}, @@ -8346,14 +8376,14 @@ "locationName":"Tag" } }, - "documentation":"

A container for specifying rule filters. The filters determine the subset of objects to which the rule applies. This element is required only if you specify more than one filter.

For example:

" + "documentation":"

A container for specifying rule filters. The filters determine the subset of objects to which the rule applies. This element is required only if you specify more than one filter.

For example:

" }, "ReplicationRuleFilter":{ "type":"structure", "members":{ "Prefix":{ "shape":"Prefix", - "documentation":"

An object key name prefix that identifies the subset of objects to which the rule applies.

" + "documentation":"

An object key name prefix that identifies the subset of objects to which the rule applies.

Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.

" }, "Tag":{ "shape":"Tag", @@ -8453,6 +8483,8 @@ }, "documentation":"

Container for specifying if periodic QueryProgress messages should be sent.

" }, + "RequestRoute":{"type":"string"}, + "RequestToken":{"type":"string"}, "ResponseCacheControl":{"type":"string"}, "ResponseContentDisposition":{"type":"string"}, "ResponseContentEncoding":{"type":"string"}, @@ -8488,13 +8520,13 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket name containing the object to restore.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket name containing the object to restore.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, "Key":{ "shape":"ObjectKey", - "documentation":"

Object key for which the operation was initiated.

", + "documentation":"

Object key for which the action was initiated.

", "location":"uri", "locationName":"Key" }, @@ -8516,7 +8548,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -8576,7 +8608,7 @@ "documentation":"

Container for redirect information. You can redirect requests to another host, to another page, or with another protocol. In the event of an error, you can specify a different error code to return.

" } }, - "documentation":"

Specifies the redirect behavior and when a redirect is applied. For more information about routing rules, see Configuring advanced conditional redirects in the Amazon Simple Storage Service Developer Guide.

" + "documentation":"

Specifies the redirect behavior and when a redirect is applied. For more information about routing rules, see Configuring advanced conditional redirects in the Amazon S3 User Guide.

" }, "RoutingRules":{ "type":"list", @@ -8602,7 +8634,7 @@ }, "Prefix":{ "shape":"Prefix", - "documentation":"

Object key prefix that identifies one or more objects to which this rule applies.

" + "documentation":"

Object key prefix that identifies one or more objects to which this rule applies.

Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.

" }, "Status":{ "shape":"ExpirationStatus", @@ -8610,13 +8642,13 @@ }, "Transition":{ "shape":"Transition", - "documentation":"

Specifies when an object transitions to a specified storage class. For more information about Amazon S3 lifecycle configuration rules, see Transitioning Objects Using Amazon S3 Lifecycle in the Amazon Simple Storage Service Developer Guide.

" + "documentation":"

Specifies when an object transitions to a specified storage class. For more information about Amazon S3 lifecycle configuration rules, see Transitioning Objects Using Amazon S3 Lifecycle in the Amazon S3 User Guide.

" }, "NoncurrentVersionTransition":{"shape":"NoncurrentVersionTransition"}, "NoncurrentVersionExpiration":{"shape":"NoncurrentVersionExpiration"}, "AbortIncompleteMultipartUpload":{"shape":"AbortIncompleteMultipartUpload"} }, - "documentation":"

Specifies lifecycle rules for an Amazon S3 bucket. For more information, see Put Bucket Lifecycle Configuration in the Amazon Simple Storage Service API Reference. For examples, see Put Bucket Lifecycle Configuration Examples

" + "documentation":"

Specifies lifecycle rules for an Amazon S3 bucket. For more information, see Put Bucket Lifecycle Configuration in the Amazon Simple Storage Service API Reference. For examples, see Put Bucket Lifecycle Configuration Examples.

" }, "Rules":{ "type":"list", @@ -8823,7 +8855,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -8901,7 +8933,7 @@ }, "BucketKeyEnabled":{ "shape":"BucketKeyEnabled", - "documentation":"

Specifies whether Amazon S3 should use an S3 Bucket Key with server-side encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects are not affected. Setting the BucketKeyEnabled element to true causes Amazon S3 to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled.

For more information, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service Developer Guide.

" + "documentation":"

Specifies whether Amazon S3 should use an S3 Bucket Key with server-side encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects are not affected. Setting the BucketKeyEnabled element to true causes Amazon S3 to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled.

For more information, see Amazon S3 Bucket Keys in the Amazon S3 User Guide.

" } }, "documentation":"

Specifies the default server-side encryption configuration.

" @@ -9139,7 +9171,7 @@ }, "Events":{ "shape":"EventList", - "documentation":"

The Amazon S3 bucket event about which to send notifications. For more information, see Supported Event Types in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The Amazon S3 bucket event about which to send notifications. For more information, see Supported Event Types in the Amazon S3 User Guide.

", "locationName":"Event" }, "Filter":{"shape":"NotificationConfigurationFilter"} @@ -9188,7 +9220,7 @@ "documentation":"

The storage class to which you want the object to transition.

" } }, - "documentation":"

Specifies when an object transitions to a specified storage class. For more information about Amazon S3 lifecycle configuration rules, see Transitioning Objects Using Amazon S3 Lifecycle in the Amazon Simple Storage Service Developer Guide.

" + "documentation":"

Specifies when an object transitions to a specified storage class. For more information about Amazon S3 lifecycle configuration rules, see Transitioning Objects Using Amazon S3 Lifecycle in the Amazon S3 User Guide.

" }, "TransitionList":{ "type":"list", @@ -9278,13 +9310,13 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket name.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket name.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, "CopySource":{ "shape":"CopySource", - "documentation":"

Specifies the source object for the copy operation. You specify the value in one of two formats, depending on whether you want to access the source object through an access point:

To copy a specific version of an object, append ?versionId=<version-id> to the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). If you don't specify a version ID, Amazon S3 copies the latest version of the source object.

", + "documentation":"

Specifies the source object for the copy operation. You specify the value in one of two formats, depending on whether you want to access the source object through an access point:

To copy a specific version of an object, append ?versionId=<version-id> to the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). If you don't specify a version ID, Amazon S3 copies the latest version of the source object.

", "location":"header", "locationName":"x-amz-copy-source" }, @@ -9379,13 +9411,13 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected destination bucket owner. If the destination bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected destination bucket owner. If the destination bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" }, "ExpectedSourceBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected source bucket owner. If the source bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected source bucket owner. If the source bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-source-expected-bucket-owner" } @@ -9453,7 +9485,7 @@ }, "Bucket":{ "shape":"BucketName", - "documentation":"

The name of the bucket to which the multipart upload was initiated.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The name of the bucket to which the multipart upload was initiated.

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -9512,7 +9544,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", - "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "documentation":"

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -9566,6 +9598,231 @@ "documentation":"

Specifies website configuration parameters for an Amazon S3 bucket.

" }, "WebsiteRedirectLocation":{"type":"string"}, + "WriteGetObjectResponseRequest":{ + "type":"structure", + "required":[ + "RequestRoute", + "RequestToken" + ], + "members":{ + "RequestRoute":{ + "shape":"RequestRoute", + "documentation":"

Route prefix to the HTTP URL generated.

", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-request-route" + }, + "RequestToken":{ + "shape":"RequestToken", + "documentation":"

A single use encrypted token that maps WriteGetObjectResponse to the end user GetObject request.

", + "location":"header", + "locationName":"x-amz-request-token" + }, + "Body":{ + "shape":"Body", + "documentation":"

The object data.

", + "streaming":true + }, + "StatusCode":{ + "shape":"GetObjectResponseStatusCode", + "documentation":"

The integer status code for an HTTP response of a corresponding GetObject request.

Status Codes

", + "location":"header", + "locationName":"x-amz-fwd-status" + }, + "ErrorCode":{ + "shape":"ErrorCode", + "documentation":"

A string that uniquely identifies an error condition. Returned in the <Code> tag of the error XML response for a corresponding GetObject call. Cannot be used with a successful StatusCode header or when the transformed object is provided in the body.

", + "location":"header", + "locationName":"x-amz-fwd-error-code" + }, + "ErrorMessage":{ + "shape":"ErrorMessage", + "documentation":"

Contains a generic description of the error condition. Returned in the <Message> tag of the error XML response for a corresponding GetObject call. Cannot be used with a successful StatusCode header or when the transformed object is provided in body.

", + "location":"header", + "locationName":"x-amz-fwd-error-message" + }, + "AcceptRanges":{ + "shape":"AcceptRanges", + "documentation":"

Indicates that a range of bytes was specified.

", + "location":"header", + "locationName":"x-amz-fwd-header-accept-ranges" + }, + "CacheControl":{ + "shape":"CacheControl", + "documentation":"

Specifies caching behavior along the request/reply chain.

", + "location":"header", + "locationName":"x-amz-fwd-header-Cache-Control" + }, + "ContentDisposition":{ + "shape":"ContentDisposition", + "documentation":"

Specifies presentational information for the object.

", + "location":"header", + "locationName":"x-amz-fwd-header-Content-Disposition" + }, + "ContentEncoding":{ + "shape":"ContentEncoding", + "documentation":"

Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.

", + "location":"header", + "locationName":"x-amz-fwd-header-Content-Encoding" + }, + "ContentLanguage":{ + "shape":"ContentLanguage", + "documentation":"

The language the content is in.

", + "location":"header", + "locationName":"x-amz-fwd-header-Content-Language" + }, + "ContentLength":{ + "shape":"ContentLength", + "documentation":"

The size of the content body in bytes.

", + "location":"header", + "locationName":"Content-Length" + }, + "ContentRange":{ + "shape":"ContentRange", + "documentation":"

The portion of the object returned in the response.

", + "location":"header", + "locationName":"x-amz-fwd-header-Content-Range" + }, + "ContentType":{ + "shape":"ContentType", + "documentation":"

A standard MIME type describing the format of the object data.

", + "location":"header", + "locationName":"x-amz-fwd-header-Content-Type" + }, + "DeleteMarker":{ + "shape":"DeleteMarker", + "documentation":"

Specifies whether an object stored in Amazon S3 is (true) or is not (false) a delete marker.

", + "location":"header", + "locationName":"x-amz-fwd-header-x-amz-delete-marker" + }, + "ETag":{ + "shape":"ETag", + "documentation":"

An opaque identifier assigned by a web server to a specific version of a resource found at a URL.

", + "location":"header", + "locationName":"x-amz-fwd-header-ETag" + }, + "Expires":{ + "shape":"Expires", + "documentation":"

The date and time at which the object is no longer cacheable.

", + "location":"header", + "locationName":"x-amz-fwd-header-Expires" + }, + "Expiration":{ + "shape":"Expiration", + "documentation":"

If object stored in Amazon S3 expiration is configured (see PUT Bucket lifecycle) it includes expiry-date and rule-id key-value pairs providing object expiration information. The value of the rule-id is URL encoded.

", + "location":"header", + "locationName":"x-amz-fwd-header-x-amz-expiration" + }, + "LastModified":{ + "shape":"LastModified", + "documentation":"

The date and time that the object was last modified.

", + "location":"header", + "locationName":"x-amz-fwd-header-Last-Modified" + }, + "MissingMeta":{ + "shape":"MissingMeta", + "documentation":"

Set to the number of metadata entries not returned in x-amz-meta headers. This can happen if you create metadata using an API like SOAP that supports more flexible metadata than the REST API. For example, using SOAP, you can create metadata whose values are not legal HTTP headers.

", + "location":"header", + "locationName":"x-amz-fwd-header-x-amz-missing-meta" + }, + "Metadata":{ + "shape":"Metadata", + "documentation":"

A map of metadata to store with the object in S3.

", + "location":"headers", + "locationName":"x-amz-meta-" + }, + "ObjectLockMode":{ + "shape":"ObjectLockMode", + "documentation":"

Indicates whether an object stored in Amazon S3 has Object Lock enabled. For more information about S3 Object Lock, see Object Lock.

", + "location":"header", + "locationName":"x-amz-fwd-header-x-amz-object-lock-mode" + }, + "ObjectLockLegalHoldStatus":{ + "shape":"ObjectLockLegalHoldStatus", + "documentation":"

Indicates whether an object stored in Amazon S3 has an active legal hold.

", + "location":"header", + "locationName":"x-amz-fwd-header-x-amz-object-lock-legal-hold" + }, + "ObjectLockRetainUntilDate":{ + "shape":"ObjectLockRetainUntilDate", + "documentation":"

The date and time when Object Lock is configured to expire.

", + "location":"header", + "locationName":"x-amz-fwd-header-x-amz-object-lock-retain-until-date" + }, + "PartsCount":{ + "shape":"PartsCount", + "documentation":"

The count of parts this object has.

", + "location":"header", + "locationName":"x-amz-fwd-header-x-amz-mp-parts-count" + }, + "ReplicationStatus":{ + "shape":"ReplicationStatus", + "documentation":"

Indicates if request involves bucket that is either a source or destination in a Replication rule. For more information about S3 Replication, see Replication.

", + "location":"header", + "locationName":"x-amz-fwd-header-x-amz-replication-status" + }, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-fwd-header-x-amz-request-charged" + }, + "Restore":{ + "shape":"Restore", + "documentation":"

Provides information about object restoration operation and expiration time of the restored object copy.

", + "location":"header", + "locationName":"x-amz-fwd-header-x-amz-restore" + }, + "ServerSideEncryption":{ + "shape":"ServerSideEncryption", + "documentation":"

The server-side encryption algorithm used when storing requested object in Amazon S3 (for example, AES256, aws:kms).

", + "location":"header", + "locationName":"x-amz-fwd-header-x-amz-server-side-encryption" + }, + "SSECustomerAlgorithm":{ + "shape":"SSECustomerAlgorithm", + "documentation":"

Encryption algorithm used if server-side encryption with a customer-provided encryption key was specified for object stored in Amazon S3.

", + "location":"header", + "locationName":"x-amz-fwd-header-x-amz-server-side-encryption-customer-algorithm" + }, + "SSEKMSKeyId":{ + "shape":"SSEKMSKeyId", + "documentation":"

If present, specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer managed customer master key (CMK) that was used for stored in Amazon S3 object.

", + "location":"header", + "locationName":"x-amz-fwd-header-x-amz-server-side-encryption-aws-kms-key-id" + }, + "SSECustomerKeyMD5":{ + "shape":"SSECustomerKeyMD5", + "documentation":"

128-bit MD5 digest of customer-provided encryption key used in Amazon S3 to encrypt data stored in S3. For more information, see Protecting data using server-side encryption with customer-provided encryption keys (SSE-C).

", + "location":"header", + "locationName":"x-amz-fwd-header-x-amz-server-side-encryption-customer-key-MD5" + }, + "StorageClass":{ + "shape":"StorageClass", + "documentation":"

The class of storage used to store object in Amazon S3.

", + "location":"header", + "locationName":"x-amz-fwd-header-x-amz-storage-class" + }, + "TagCount":{ + "shape":"TagCount", + "documentation":"

The number of tags, if any, on the object.

", + "location":"header", + "locationName":"x-amz-fwd-header-x-amz-tagging-count" + }, + "VersionId":{ + "shape":"ObjectVersionId", + "documentation":"

An ID used to reference a specific version of the object.

", + "location":"header", + "locationName":"x-amz-fwd-header-x-amz-version-id" + }, + "BucketKeyEnabled":{ + "shape":"BucketKeyEnabled", + "documentation":"

Indicates whether the object stored in Amazon S3 uses an S3 bucket key for server-side encryption with AWS KMS (SSE-KMS).

", + "location":"header", + "locationName":"x-amz-fwd-header-x-amz-server-side-encryption-bucket-key-enabled" + } + }, + "payload":"Body" + }, "Years":{"type":"integer"} }, "documentation":"

" diff --git a/botocore/data/s3control/2018-08-20/paginators-1.json b/botocore/data/s3control/2018-08-20/paginators-1.json index ea142457..873eb23b 100644 --- a/botocore/data/s3control/2018-08-20/paginators-1.json +++ b/botocore/data/s3control/2018-08-20/paginators-1.json @@ -1,3 +1,10 @@ { - "pagination": {} + "pagination": { + "ListAccessPointsForObjectLambda": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ObjectLambdaAccessPointList" + } + } } diff --git a/botocore/data/s3control/2018-08-20/service-2.json b/botocore/data/s3control/2018-08-20/service-2.json index ea443762..59b68bec 100644 --- a/botocore/data/s3control/2018-08-20/service-2.json +++ b/botocore/data/s3control/2018-08-20/service-2.json @@ -23,7 +23,24 @@ "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, "output":{"shape":"CreateAccessPointResult"}, - "documentation":"

Creates an access point and associates it with the specified bucket. For more information, see Managing Data Access with Amazon S3 Access Points in the Amazon Simple Storage Service Developer Guide.

Using this action with Amazon S3 on Outposts

This action:

For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide .

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to CreateAccessPoint:

", + "documentation":"

Creates an access point and associates it with the specified bucket. For more information, see Managing Data Access with Amazon S3 Access Points in the Amazon Simple Storage Service User Guide.

S3 on Outposts only supports VPC-style Access Points.

For more information, see Accessing Amazon S3 on Outposts using virtual private cloud (VPC) only Access Points in the Amazon Simple Storage Service User Guide.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to CreateAccessPoint:

", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "CreateAccessPointForObjectLambda":{ + "name":"CreateAccessPointForObjectLambda", + "http":{ + "method":"PUT", + "requestUri":"/v20180820/accesspointforobjectlambda/{name}" + }, + "input":{ + "shape":"CreateAccessPointForObjectLambdaRequest", + "locationName":"CreateAccessPointForObjectLambdaRequest", + "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} + }, + "output":{"shape":"CreateAccessPointForObjectLambdaResult"}, + "documentation":"

Creates an Object Lambda Access Point. For more information, see Transforming objects with Object Lambda Access Points in the Amazon Simple Storage Service User Guide.

The following actions are related to CreateAccessPointForObjectLambda:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -40,7 +57,7 @@ {"shape":"BucketAlreadyExists"}, {"shape":"BucketAlreadyOwnedByYou"} ], - "documentation":"

This API operation creates an Amazon S3 on Outposts bucket. To create an S3 bucket, see Create Bucket in the Amazon Simple Storage Service API.

Creates a new Outposts bucket. By creating the bucket, you become the bucket owner. To create an Outposts bucket, you must have S3 on Outposts. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service Developer Guide.

Not every string is an acceptable bucket name. For information on bucket naming restrictions, see Working with Amazon S3 Buckets.

S3 on Outposts buckets do not support

For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and x-amz-outpost-id in your API request, see the Examples section.

The following actions are related to CreateBucket for Amazon S3 on Outposts:

", + "documentation":"

This action creates an Amazon S3 on Outposts bucket. To create an S3 bucket, see Create Bucket in the Amazon Simple Storage Service API.

Creates a new Outposts bucket. By creating the bucket, you become the bucket owner. To create an Outposts bucket, you must have S3 on Outposts. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service User Guide.

Not every string is an acceptable bucket name. For information on bucket naming restrictions, see Working with Amazon S3 Buckets.

S3 on Outposts buckets support:

For a complete list of restrictions and Amazon S3 feature limitations on S3 on Outposts, see Amazon S3 on Outposts Restrictions and Limitations.

For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and x-amz-outpost-id in your API request, see the Examples section.

The following actions are related to CreateBucket for Amazon S3 on Outposts:

", "httpChecksumRequired":true }, "CreateJob":{ @@ -61,7 +78,7 @@ {"shape":"IdempotencyException"}, {"shape":"InternalServiceException"} ], - "documentation":"

You can use S3 Batch Operations to perform large-scale batch operations on Amazon S3 objects. Batch Operations can run a single operation on lists of Amazon S3 objects that you specify. For more information, see S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

This operation creates a S3 Batch Operations job.

Related actions include:

", + "documentation":"

You can use S3 Batch Operations to perform large-scale batch actions on Amazon S3 objects. Batch Operations can run a single action on lists of Amazon S3 objects that you specify. For more information, see S3 Batch Operations in the Amazon Simple Storage Service User Guide.

This action creates a S3 Batch Operations job.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -78,6 +95,18 @@ "hostPrefix":"{AccountId}." } }, + "DeleteAccessPointForObjectLambda":{ + "name":"DeleteAccessPointForObjectLambda", + "http":{ + "method":"DELETE", + "requestUri":"/v20180820/accesspointforobjectlambda/{name}" + }, + "input":{"shape":"DeleteAccessPointForObjectLambdaRequest"}, + "documentation":"

Deletes the specified Object Lambda Access Point.

The following actions are related to DeleteAccessPointForObjectLambda:

", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, "DeleteAccessPointPolicy":{ "name":"DeleteAccessPointPolicy", "http":{ @@ -90,6 +119,18 @@ "hostPrefix":"{AccountId}." } }, + "DeleteAccessPointPolicyForObjectLambda":{ + "name":"DeleteAccessPointPolicyForObjectLambda", + "http":{ + "method":"DELETE", + "requestUri":"/v20180820/accesspointforobjectlambda/{name}/policy" + }, + "input":{"shape":"DeleteAccessPointPolicyForObjectLambdaRequest"}, + "documentation":"

Removes the resource policy for an Object Lambda Access Point.

The following actions are related to DeleteAccessPointPolicyForObjectLambda:

", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, "DeleteBucket":{ "name":"DeleteBucket", "http":{ @@ -97,7 +138,7 @@ "requestUri":"/v20180820/bucket/{name}" }, "input":{"shape":"DeleteBucketRequest"}, - "documentation":"

This API operation deletes an Amazon S3 on Outposts bucket. To delete an S3 bucket, see DeleteBucket in the Amazon Simple Storage Service API.

Deletes the Amazon S3 on Outposts bucket. All objects (including all object versions and delete markers) in the bucket must be deleted before the bucket itself can be deleted. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service Developer Guide.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

Related Resources

", + "documentation":"

This action deletes an Amazon S3 on Outposts bucket. To delete an S3 bucket, see DeleteBucket in the Amazon Simple Storage Service API.

Deletes the Amazon S3 on Outposts bucket. All objects (including all object versions and delete markers) in the bucket must be deleted before the bucket itself can be deleted. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service User Guide.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

Related Resources

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -109,7 +150,7 @@ "requestUri":"/v20180820/bucket/{name}/lifecycleconfiguration" }, "input":{"shape":"DeleteBucketLifecycleConfigurationRequest"}, - "documentation":"

This API action deletes an Amazon S3 on Outposts bucket's lifecycle configuration. To delete an S3 bucket's lifecycle configuration, see DeleteBucketLifecycle in the Amazon Simple Storage Service API.

Deletes the lifecycle configuration from the specified Outposts bucket. Amazon S3 on Outposts removes all the lifecycle configuration rules in the lifecycle subresource associated with the bucket. Your objects never expire, and Amazon S3 on Outposts no longer automatically deletes any objects on the basis of rules contained in the deleted lifecycle configuration. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service Developer Guide.

To use this operation, you must have permission to perform the s3-outposts:DeleteLifecycleConfiguration action. By default, the bucket owner has this permission and the Outposts bucket owner can grant this permission to others.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

For more information about object expiration, see Elements to Describe Lifecycle Actions.

Related actions include:

", + "documentation":"

This action deletes an Amazon S3 on Outposts bucket's lifecycle configuration. To delete an S3 bucket's lifecycle configuration, see DeleteBucketLifecycle in the Amazon Simple Storage Service API.

Deletes the lifecycle configuration from the specified Outposts bucket. Amazon S3 on Outposts removes all the lifecycle configuration rules in the lifecycle subresource associated with the bucket. Your objects never expire, and Amazon S3 on Outposts no longer automatically deletes any objects on the basis of rules contained in the deleted lifecycle configuration. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service User Guide.

To use this action, you must have permission to perform the s3-outposts:DeleteLifecycleConfiguration action. By default, the bucket owner has this permission and the Outposts bucket owner can grant this permission to others.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

For more information about object expiration, see Elements to Describe Lifecycle Actions.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -121,7 +162,7 @@ "requestUri":"/v20180820/bucket/{name}/policy" }, "input":{"shape":"DeleteBucketPolicyRequest"}, - "documentation":"

This API operation deletes an Amazon S3 on Outposts bucket policy. To delete an S3 bucket policy, see DeleteBucketPolicy in the Amazon Simple Storage Service API.

This implementation of the DELETE operation uses the policy subresource to delete the policy of a specified Amazon S3 on Outposts bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the s3-outposts:DeleteBucketPolicy permissions on the specified Outposts bucket and belong to the bucket owner's account to use this operation. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service Developer Guide.

If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and User Policies.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to DeleteBucketPolicy:

", + "documentation":"

This action deletes an Amazon S3 on Outposts bucket policy. To delete an S3 bucket policy, see DeleteBucketPolicy in the Amazon Simple Storage Service API.

This implementation of the DELETE action uses the policy subresource to delete the policy of a specified Amazon S3 on Outposts bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the s3-outposts:DeleteBucketPolicy permissions on the specified Outposts bucket and belong to the bucket owner's account to use this action. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service User Guide.

If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

As a security precaution, the root user of the AWS account that owns a bucket can always use this action, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and User Policies.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to DeleteBucketPolicy:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -134,7 +175,7 @@ "responseCode":204 }, "input":{"shape":"DeleteBucketTaggingRequest"}, - "documentation":"

This operation deletes an Amazon S3 on Outposts bucket's tags. To delete an S3 bucket tags, see DeleteBucketTagging in the Amazon Simple Storage Service API.

Deletes the tags from the Outposts bucket. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service Developer Guide.

To use this operation, you must have permission to perform the PutBucketTagging action. By default, the bucket owner has this permission and can grant this permission to others.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to DeleteBucketTagging:

", + "documentation":"

This action deletes an Amazon S3 on Outposts bucket's tags. To delete an S3 bucket tags, see DeleteBucketTagging in the Amazon Simple Storage Service API.

Deletes the tags from the Outposts bucket. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service User Guide.

To use this action, you must have permission to perform the PutBucketTagging action. By default, the bucket owner has this permission and can grant this permission to others.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to DeleteBucketTagging:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -152,7 +193,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"NotFoundException"} ], - "documentation":"

Removes the entire tag set from the specified S3 Batch Operations job. To use this operation, you must have permission to perform the s3:DeleteJobTagging action. For more information, see Controlling access and labeling jobs using tags in the Amazon Simple Storage Service Developer Guide.

Related actions include:

", + "documentation":"

Removes the entire tag set from the specified S3 Batch Operations job. To use this operation, you must have permission to perform the s3:DeleteJobTagging action. For more information, see Controlling access and labeling jobs using tags in the Amazon Simple Storage Service User Guide.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -176,7 +217,7 @@ "requestUri":"/v20180820/storagelens/{storagelensid}" }, "input":{"shape":"DeleteStorageLensConfigurationRequest"}, - "documentation":"

Deletes the Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

To use this action, you must have permission to perform the s3:DeleteStorageLensConfiguration action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

Deletes the Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.

To use this action, you must have permission to perform the s3:DeleteStorageLensConfiguration action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -189,7 +230,7 @@ }, "input":{"shape":"DeleteStorageLensConfigurationTaggingRequest"}, "output":{"shape":"DeleteStorageLensConfigurationTaggingResult"}, - "documentation":"

Deletes the Amazon S3 Storage Lens configuration tags. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

To use this action, you must have permission to perform the s3:DeleteStorageLensConfigurationTagging action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

Deletes the Amazon S3 Storage Lens configuration tags. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.

To use this action, you must have permission to perform the s3:DeleteStorageLensConfigurationTagging action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -208,7 +249,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Retrieves the configuration parameters and status for a Batch Operations job. For more information, see S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

Related actions include:

", + "documentation":"

Retrieves the configuration parameters and status for a Batch Operations job. For more information, see S3 Batch Operations in the Amazon Simple Storage Service User Guide.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -226,6 +267,32 @@ "hostPrefix":"{AccountId}." } }, + "GetAccessPointConfigurationForObjectLambda":{ + "name":"GetAccessPointConfigurationForObjectLambda", + "http":{ + "method":"GET", + "requestUri":"/v20180820/accesspointforobjectlambda/{name}/configuration" + }, + "input":{"shape":"GetAccessPointConfigurationForObjectLambdaRequest"}, + "output":{"shape":"GetAccessPointConfigurationForObjectLambdaResult"}, + "documentation":"

Returns configuration for an Object Lambda Access Point.

The following actions are related to GetAccessPointConfigurationForObjectLambda:

", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "GetAccessPointForObjectLambda":{ + "name":"GetAccessPointForObjectLambda", + "http":{ + "method":"GET", + "requestUri":"/v20180820/accesspointforobjectlambda/{name}" + }, + "input":{"shape":"GetAccessPointForObjectLambdaRequest"}, + "output":{"shape":"GetAccessPointForObjectLambdaResult"}, + "documentation":"

Returns configuration information about the specified Object Lambda Access Point

The following actions are related to GetAccessPointForObjectLambda:

", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, "GetAccessPointPolicy":{ "name":"GetAccessPointPolicy", "http":{ @@ -239,6 +306,19 @@ "hostPrefix":"{AccountId}." } }, + "GetAccessPointPolicyForObjectLambda":{ + "name":"GetAccessPointPolicyForObjectLambda", + "http":{ + "method":"GET", + "requestUri":"/v20180820/accesspointforobjectlambda/{name}/policy" + }, + "input":{"shape":"GetAccessPointPolicyForObjectLambdaRequest"}, + "output":{"shape":"GetAccessPointPolicyForObjectLambdaResult"}, + "documentation":"

Returns the resource policy for an Object Lambda Access Point.

The following actions are related to GetAccessPointPolicyForObjectLambda:

", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, "GetAccessPointPolicyStatus":{ "name":"GetAccessPointPolicyStatus", "http":{ @@ -247,7 +327,20 @@ }, "input":{"shape":"GetAccessPointPolicyStatusRequest"}, "output":{"shape":"GetAccessPointPolicyStatusResult"}, - "documentation":"

Indicates whether the specified access point currently has a policy that allows public access. For more information about public access through access points, see Managing Data Access with Amazon S3 Access Points in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

Indicates whether the specified access point currently has a policy that allows public access. For more information about public access through access points, see Managing Data Access with Amazon S3 Access Points in the Amazon Simple Storage Service Developer Guide.

", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "GetAccessPointPolicyStatusForObjectLambda":{ + "name":"GetAccessPointPolicyStatusForObjectLambda", + "http":{ + "method":"GET", + "requestUri":"/v20180820/accesspointforobjectlambda/{name}/policyStatus" + }, + "input":{"shape":"GetAccessPointPolicyStatusForObjectLambdaRequest"}, + "output":{"shape":"GetAccessPointPolicyStatusForObjectLambdaResult"}, + "documentation":"

Returns the status of the resource policy associated with an Object Lambda Access Point.

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -260,7 +353,7 @@ }, "input":{"shape":"GetBucketRequest"}, "output":{"shape":"GetBucketResult"}, - "documentation":"

Gets an Amazon S3 on Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the s3-outposts:GetBucket permissions on the specified bucket and belong to the bucket owner's account in order to use this operation. Only users from Outposts bucket owner account with the right permissions can perform actions on an Outposts bucket.

If you don't have s3-outposts:GetBucket permissions or you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 403 Access Denied error.

The following actions are related to GetBucket for Amazon S3 on Outposts:

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

", + "documentation":"

Gets an Amazon S3 on Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service User Guide.

If you are using an identity other than the root user of the AWS account that owns the Outposts bucket, the calling identity must have the s3-outposts:GetBucket permissions on the specified Outposts bucket and belong to the Outposts bucket owner's account in order to use this action. Only users from Outposts bucket owner account with the right permissions can perform actions on an Outposts bucket.

If you don't have s3-outposts:GetBucket permissions or you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 403 Access Denied error.

The following actions are related to GetBucket for Amazon S3 on Outposts:

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -273,7 +366,7 @@ }, "input":{"shape":"GetBucketLifecycleConfigurationRequest"}, "output":{"shape":"GetBucketLifecycleConfigurationResult"}, - "documentation":"

This operation gets an Amazon S3 on Outposts bucket's lifecycle configuration. To get an S3 bucket's lifecycle configuration, see GetBucketLifecycleConfiguration in the Amazon Simple Storage Service API.

Returns the lifecycle configuration information set on the Outposts bucket. For more information, see Using Amazon S3 on Outposts and for information about lifecycle configuration, see Object Lifecycle Management in Amazon Simple Storage Service Developer Guide.

To use this operation, you must have permission to perform the s3-outposts:GetLifecycleConfiguration action. The Outposts bucket owner has this permission, by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

GetBucketLifecycleConfiguration has the following special error:

The following actions are related to GetBucketLifecycleConfiguration:

", + "documentation":"

This action gets an Amazon S3 on Outposts bucket's lifecycle configuration. To get an S3 bucket's lifecycle configuration, see GetBucketLifecycleConfiguration in the Amazon Simple Storage Service API.

Returns the lifecycle configuration information set on the Outposts bucket. For more information, see Using Amazon S3 on Outposts and for information about lifecycle configuration, see Object Lifecycle Management in Amazon Simple Storage Service User Guide.

To use this action, you must have permission to perform the s3-outposts:GetLifecycleConfiguration action. The Outposts bucket owner has this permission, by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

GetBucketLifecycleConfiguration has the following special error:

The following actions are related to GetBucketLifecycleConfiguration:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -286,7 +379,7 @@ }, "input":{"shape":"GetBucketPolicyRequest"}, "output":{"shape":"GetBucketPolicyResult"}, - "documentation":"

This action gets a bucket policy for an Amazon S3 on Outposts bucket. To get a policy for an S3 bucket, see GetBucketPolicy in the Amazon Simple Storage Service API.

Returns the policy of a specified Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the GetBucketPolicy permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.

Only users from Outposts bucket owner account with the right permissions can perform actions on an Outposts bucket. If you don't have s3-outposts:GetBucketPolicy permissions or you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 403 Access Denied error.

As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and User Policies.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to GetBucketPolicy:

", + "documentation":"

This action gets a bucket policy for an Amazon S3 on Outposts bucket. To get a policy for an S3 bucket, see GetBucketPolicy in the Amazon Simple Storage Service API.

Returns the policy of a specified Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service User Guide.

If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the GetBucketPolicy permissions on the specified bucket and belong to the bucket owner's account in order to use this action.

Only users from Outposts bucket owner account with the right permissions can perform actions on an Outposts bucket. If you don't have s3-outposts:GetBucketPolicy permissions or you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 403 Access Denied error.

As a security precaution, the root user of the AWS account that owns a bucket can always use this action, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and User Policies.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to GetBucketPolicy:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -299,7 +392,7 @@ }, "input":{"shape":"GetBucketTaggingRequest"}, "output":{"shape":"GetBucketTaggingResult"}, - "documentation":"

This operation gets an Amazon S3 on Outposts bucket's tags. To get an S3 bucket tags, see GetBucketTagging in the Amazon Simple Storage Service API.

Returns the tag set associated with the Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

To use this operation, you must have permission to perform the GetBucketTagging action. By default, the bucket owner has this permission and can grant this permission to others.

GetBucketTagging has the following special error:

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to GetBucketTagging:

", + "documentation":"

This action gets an Amazon S3 on Outposts bucket's tags. To get an S3 bucket tags, see GetBucketTagging in the Amazon Simple Storage Service API.

Returns the tag set associated with the Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service User Guide.

To use this action, you must have permission to perform the GetBucketTagging action. By default, the bucket owner has this permission and can grant this permission to others.

GetBucketTagging has the following special error:

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to GetBucketTagging:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -317,7 +410,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"NotFoundException"} ], - "documentation":"

Returns the tags on an S3 Batch Operations job. To use this operation, you must have permission to perform the s3:GetJobTagging action. For more information, see Controlling access and labeling jobs using tags in the Amazon Simple Storage Service Developer Guide.

Related actions include:

", + "documentation":"

Returns the tags on an S3 Batch Operations job. To use this operation, you must have permission to perform the s3:GetJobTagging action. For more information, see Controlling access and labeling jobs using tags in the Amazon Simple Storage Service User Guide.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -346,7 +439,7 @@ }, "input":{"shape":"GetStorageLensConfigurationRequest"}, "output":{"shape":"GetStorageLensConfigurationResult"}, - "documentation":"

Gets the Amazon S3 Storage Lens configuration. For more information, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

To use this action, you must have permission to perform the s3:GetStorageLensConfiguration action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

Gets the Amazon S3 Storage Lens configuration. For more information, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.

To use this action, you must have permission to perform the s3:GetStorageLensConfiguration action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -359,7 +452,7 @@ }, "input":{"shape":"GetStorageLensConfigurationTaggingRequest"}, "output":{"shape":"GetStorageLensConfigurationTaggingResult"}, - "documentation":"

Gets the tags of Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

To use this action, you must have permission to perform the s3:GetStorageLensConfigurationTagging action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

Gets the tags of Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.

To use this action, you must have permission to perform the s3:GetStorageLensConfigurationTagging action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -377,6 +470,19 @@ "hostPrefix":"{AccountId}." } }, + "ListAccessPointsForObjectLambda":{ + "name":"ListAccessPointsForObjectLambda", + "http":{ + "method":"GET", + "requestUri":"/v20180820/accesspointforobjectlambda" + }, + "input":{"shape":"ListAccessPointsForObjectLambdaRequest"}, + "output":{"shape":"ListAccessPointsForObjectLambdaResult"}, + "documentation":"

Returns a list of the access points associated with the Object Lambda Access Point. You can retrieve up to 1000 access points per call. If there are more than 1,000 access points (or the number specified in maxResults, whichever is less), the response will include a continuation token that you can use to list the additional access points.

The following actions are related to ListAccessPointsForObjectLambda:

", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, "ListJobs":{ "name":"ListJobs", "http":{ @@ -390,7 +496,7 @@ {"shape":"InternalServiceException"}, {"shape":"InvalidNextTokenException"} ], - "documentation":"

Lists current S3 Batch Operations jobs and jobs that have ended within the last 30 days for the AWS account making the request. For more information, see S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

Related actions include:

", + "documentation":"

Lists current S3 Batch Operations jobs and jobs that have ended within the last 30 days for the AWS account making the request. For more information, see S3 Batch Operations in the Amazon Simple Storage Service User Guide.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -403,7 +509,7 @@ }, "input":{"shape":"ListRegionalBucketsRequest"}, "output":{"shape":"ListRegionalBucketsResult"}, - "documentation":"

Returns a list of all Outposts buckets in an Outpost that are owned by the authenticated sender of the request. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and x-amz-outpost-id in your request, see the Examples section.

", + "documentation":"

Returns a list of all Outposts buckets in an Outpost that are owned by the authenticated sender of the request. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service User Guide.

For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and x-amz-outpost-id in your request, see the Examples section.

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -416,7 +522,23 @@ }, "input":{"shape":"ListStorageLensConfigurationsRequest"}, "output":{"shape":"ListStorageLensConfigurationsResult"}, - "documentation":"

Gets a list of Amazon S3 Storage Lens configurations. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

To use this action, you must have permission to perform the s3:ListStorageLensConfigurations action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

Gets a list of Amazon S3 Storage Lens configurations. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.

To use this action, you must have permission to perform the s3:ListStorageLensConfigurations action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.

", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "PutAccessPointConfigurationForObjectLambda":{ + "name":"PutAccessPointConfigurationForObjectLambda", + "http":{ + "method":"PUT", + "requestUri":"/v20180820/accesspointforobjectlambda/{name}/configuration" + }, + "input":{ + "shape":"PutAccessPointConfigurationForObjectLambdaRequest", + "locationName":"PutAccessPointConfigurationForObjectLambdaRequest", + "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} + }, + "documentation":"

Replaces configuration for an Object Lambda Access Point.

The following actions are related to PutAccessPointConfigurationForObjectLambda:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -437,6 +559,22 @@ "hostPrefix":"{AccountId}." } }, + "PutAccessPointPolicyForObjectLambda":{ + "name":"PutAccessPointPolicyForObjectLambda", + "http":{ + "method":"PUT", + "requestUri":"/v20180820/accesspointforobjectlambda/{name}/policy" + }, + "input":{ + "shape":"PutAccessPointPolicyForObjectLambdaRequest", + "locationName":"PutAccessPointPolicyForObjectLambdaRequest", + "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} + }, + "documentation":"

Creates or replaces resource policy for an Object Lambda Access Point. For an example policy, see Creating Object Lambda Access Points in the Amazon Simple Storage Service User Guide.

The following actions are related to PutAccessPointPolicyForObjectLambda:

", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, "PutBucketLifecycleConfiguration":{ "name":"PutBucketLifecycleConfiguration", "http":{ @@ -444,7 +582,7 @@ "requestUri":"/v20180820/bucket/{name}/lifecycleconfiguration" }, "input":{"shape":"PutBucketLifecycleConfigurationRequest"}, - "documentation":"

This action puts a lifecycle configuration to an Amazon S3 on Outposts bucket. To put a lifecycle configuration to an S3 bucket, see PutBucketLifecycleConfiguration in the Amazon Simple Storage Service API.

Creates a new lifecycle configuration for the Outposts bucket or replaces an existing lifecycle configuration. Outposts buckets only support lifecycle configurations that delete/expire objects after a certain period of time and abort incomplete multipart uploads. For more information, see Managing Lifecycle Permissions for Amazon S3 on Outposts.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to PutBucketLifecycleConfiguration:

", + "documentation":"

This action puts a lifecycle configuration to an Amazon S3 on Outposts bucket. To put a lifecycle configuration to an S3 bucket, see PutBucketLifecycleConfiguration in the Amazon Simple Storage Service API.

Creates a new lifecycle configuration for the S3 on Outposts bucket or replaces an existing lifecycle configuration. Outposts buckets only support lifecycle configurations that delete/expire objects after a certain period of time and abort incomplete multipart uploads.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to PutBucketLifecycleConfiguration:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -461,7 +599,7 @@ "locationName":"PutBucketPolicyRequest", "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, - "documentation":"

This action puts a bucket policy to an Amazon S3 on Outposts bucket. To put a policy on an S3 bucket, see PutBucketPolicy in the Amazon Simple Storage Service API.

Applies an Amazon S3 bucket policy to an Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

If you are using an identity other than the root user of the AWS account that owns the Outposts bucket, the calling identity must have the PutBucketPolicy permissions on the specified Outposts bucket and belong to the bucket owner's account in order to use this operation.

If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and User Policies.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to PutBucketPolicy:

", + "documentation":"

This action puts a bucket policy to an Amazon S3 on Outposts bucket. To put a policy on an S3 bucket, see PutBucketPolicy in the Amazon Simple Storage Service API.

Applies an Amazon S3 bucket policy to an Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service User Guide.

If you are using an identity other than the root user of the AWS account that owns the Outposts bucket, the calling identity must have the PutBucketPolicy permissions on the specified Outposts bucket and belong to the bucket owner's account in order to use this action.

If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

As a security precaution, the root user of the AWS account that owns a bucket can always use this action, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and User Policies.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to PutBucketPolicy:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -474,7 +612,7 @@ "requestUri":"/v20180820/bucket/{name}/tagging" }, "input":{"shape":"PutBucketTaggingRequest"}, - "documentation":"

This action puts tags on an Amazon S3 on Outposts bucket. To put tags on an S3 bucket, see PutBucketTagging in the Amazon Simple Storage Service API.

Sets the tags for an Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

Use tags to organize your AWS bill to reflect your own cost structure. To do this, sign up to get your AWS account bill with tag key values included. Then, to see the cost of combined resources, organize your billing information according to resources with the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information to see the total cost of that application across several services. For more information, see Cost Allocation and Tagging.

Within a bucket, if you add a tag that has the same key as an existing tag, the new value overwrites the old value. For more information, see Using Cost Allocation in Amazon S3 Bucket Tags.

To use this operation, you must have permissions to perform the s3-outposts:PutBucketTagging action. The Outposts bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

PutBucketTagging has the following special errors:

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to PutBucketTagging:

", + "documentation":"

This action puts tags on an Amazon S3 on Outposts bucket. To put tags on an S3 bucket, see PutBucketTagging in the Amazon Simple Storage Service API.

Sets the tags for an S3 on Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service User Guide.

Use tags to organize your AWS bill to reflect your own cost structure. To do this, sign up to get your AWS account bill with tag key values included. Then, to see the cost of combined resources, organize your billing information according to resources with the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information to see the total cost of that application across several services. For more information, see Cost allocation and tagging.

Within a bucket, if you add a tag that has the same key as an existing tag, the new value overwrites the old value. For more information, see Using cost allocation in Amazon S3 bucket tags.

To use this action, you must have permissions to perform the s3-outposts:PutBucketTagging action. The Outposts bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing access permissions to your Amazon S3 resources.

PutBucketTagging has the following special errors:

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to PutBucketTagging:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -498,7 +636,7 @@ {"shape":"NotFoundException"}, {"shape":"TooManyTagsException"} ], - "documentation":"

Sets the supplied tag-set on an S3 Batch Operations job.

A tag is a key-value pair. You can associate S3 Batch Operations tags with any job by sending a PUT request against the tagging subresource that is associated with the job. To modify the existing tag set, you can either replace the existing tag set entirely, or make changes within the existing tag set by retrieving the existing tag set using GetJobTagging, modify that tag set, and use this action to replace the tag set with the one you modified. For more information, see Controlling access and labeling jobs using tags in the Amazon Simple Storage Service Developer Guide.

To use this operation, you must have permission to perform the s3:PutJobTagging action.

Related actions include:

", + "documentation":"

Sets the supplied tag-set on an S3 Batch Operations job.

A tag is a key-value pair. You can associate S3 Batch Operations tags with any job by sending a PUT request against the tagging subresource that is associated with the job. To modify the existing tag set, you can either replace the existing tag set entirely, or make changes within the existing tag set by retrieving the existing tag set using GetJobTagging, modify that tag set, and use this action to replace the tag set with the one you modified. For more information, see Controlling access and labeling jobs using tags in the Amazon Simple Storage Service User Guide.

To use this action, you must have permission to perform the s3:PutJobTagging action.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -526,7 +664,7 @@ "locationName":"PutStorageLensConfigurationRequest", "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, - "documentation":"

Puts an Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Working with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

To use this action, you must have permission to perform the s3:PutStorageLensConfiguration action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

Puts an Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Working with Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.

To use this action, you must have permission to perform the s3:PutStorageLensConfiguration action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -543,7 +681,7 @@ "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, "output":{"shape":"PutStorageLensConfigurationTaggingResult"}, - "documentation":"

Put or replace tags on an existing Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

To use this action, you must have permission to perform the s3:PutStorageLensConfigurationTagging action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

Put or replace tags on an existing Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.

To use this action, you must have permission to perform the s3:PutStorageLensConfigurationTagging action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -562,7 +700,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Updates an existing S3 Batch Operations job's priority. For more information, see S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

Related actions include:

", + "documentation":"

Updates an existing S3 Batch Operations job's priority. For more information, see S3 Batch Operations in the Amazon Simple Storage Service User Guide.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -582,7 +720,7 @@ {"shape":"JobStatusException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Updates the status for the specified job. Use this operation to confirm that you want to run a job or to cancel an existing job. For more information, see S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

Related actions include:

", + "documentation":"

Updates the status for the specified job. Use this action to confirm that you want to run a job or to cancel an existing job. For more information, see S3 Batch Operations in the Amazon Simple Storage Service User Guide.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -672,6 +810,22 @@ }, "documentation":"

A container for the activity metrics.

" }, + "AwsLambdaTransformation":{ + "type":"structure", + "required":["FunctionArn"], + "members":{ + "FunctionArn":{ + "shape":"FunctionArnString", + "documentation":"

The Amazon Resource Name (ARN) of the AWS Lambda function.

" + }, + "FunctionPayload":{ + "shape":"AwsLambdaTransformationPayload", + "documentation":"

Additional JSON that provides supplemental data to the Lambda function used to transform objects.

" + } + }, + "documentation":"

AWS Lambda function used to transform objects through an Object Lambda Access Point.

" + }, + "AwsLambdaTransformationPayload":{"type":"string"}, "AwsOrgArn":{ "type":"string", "max":1024, @@ -761,6 +915,42 @@ "ConfirmRemoveSelfBucketAccess":{"type":"boolean"}, "ConfirmationRequired":{"type":"boolean"}, "ContinuationToken":{"type":"string"}, + "CreateAccessPointForObjectLambdaRequest":{ + "type":"structure", + "required":[ + "AccountId", + "Name", + "Configuration" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

The AWS account ID for owner of the specified Object Lambda Access Point.

", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "Name":{ + "shape":"ObjectLambdaAccessPointName", + "documentation":"

The name you want to assign to this Object Lambda Access Point.

", + "location":"uri", + "locationName":"name" + }, + "Configuration":{ + "shape":"ObjectLambdaConfiguration", + "documentation":"

Object Lambda Access Point configuration as a JSON document.

" + } + } + }, + "CreateAccessPointForObjectLambdaResult":{ + "type":"structure", + "members":{ + "ObjectLambdaAccessPointArn":{ + "shape":"ObjectLambdaAccessPointArn", + "documentation":"

Specifies the ARN for the Object Lambda Access Point.

" + } + } + }, "CreateAccessPointRequest":{ "type":"structure", "required":[ @@ -920,7 +1110,7 @@ }, "Operation":{ "shape":"JobOperation", - "documentation":"

The operation that you want this job to perform on every object listed in the manifest. For more information about the available operations, see Operations in the Amazon Simple Storage Service Developer Guide.

" + "documentation":"

The action that you want this job to perform on every object listed in the manifest. For more information about the available actions, see Operations in the Amazon Simple Storage Service User Guide.

" }, "Report":{ "shape":"JobReport", @@ -946,7 +1136,7 @@ }, "RoleArn":{ "shape":"IAMRoleArn", - "documentation":"

The Amazon Resource Name (ARN) for the AWS Identity and Access Management (IAM) role that Batch Operations will use to run this job's operation on every object in the manifest.

" + "documentation":"

The Amazon Resource Name (ARN) for the AWS Identity and Access Management (IAM) role that Batch Operations will use to run this job's action on every object in the manifest.

" }, "Tags":{ "shape":"S3TagSet", @@ -967,6 +1157,50 @@ "Date":{"type":"timestamp"}, "Days":{"type":"integer"}, "DaysAfterInitiation":{"type":"integer"}, + "DeleteAccessPointForObjectLambdaRequest":{ + "type":"structure", + "required":[ + "AccountId", + "Name" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

The account ID for the account that owns the specified Object Lambda Access Point.

", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "Name":{ + "shape":"ObjectLambdaAccessPointName", + "documentation":"

The name of the access point you want to delete.

", + "location":"uri", + "locationName":"name" + } + } + }, + "DeleteAccessPointPolicyForObjectLambdaRequest":{ + "type":"structure", + "required":[ + "AccountId", + "Name" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

The account ID for the account that owns the specified Object Lambda Access Point.

", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "Name":{ + "shape":"ObjectLambdaAccessPointName", + "documentation":"

The name of the Object Lambda Access Point you want to delete the policy for.

", + "location":"uri", + "locationName":"name" + } + } + }, "DeleteAccessPointPolicyRequest":{ "type":"structure", "required":[ @@ -1197,7 +1431,7 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

", + "documentation":"

The AWS account ID associated with the S3 Batch Operations job.

", "hostLabel":true, "location":"header", "locationName":"x-amz-account-id" @@ -1259,6 +1493,107 @@ "min":1, "pattern":"(arn:(aws[a-zA-Z-]*)?:lambda:)?([a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:)?(\\d{12}:)?(function:)?([a-zA-Z0-9-_]+)(:(\\$LATEST|[a-zA-Z0-9-_]+))?" }, + "GetAccessPointConfigurationForObjectLambdaRequest":{ + "type":"structure", + "required":[ + "AccountId", + "Name" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

The account ID for the account that owns the specified Object Lambda Access Point.

", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "Name":{ + "shape":"ObjectLambdaAccessPointName", + "documentation":"

The name of the Object Lambda Access Point you want to return the configuration for.

", + "location":"uri", + "locationName":"name" + } + } + }, + "GetAccessPointConfigurationForObjectLambdaResult":{ + "type":"structure", + "members":{ + "Configuration":{ + "shape":"ObjectLambdaConfiguration", + "documentation":"

Object Lambda Access Point configuration document.

" + } + } + }, + "GetAccessPointForObjectLambdaRequest":{ + "type":"structure", + "required":[ + "AccountId", + "Name" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

The account ID for the account that owns the specified Object Lambda Access Point.

", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "Name":{ + "shape":"ObjectLambdaAccessPointName", + "documentation":"

The name of the Object Lambda Access Point.

", + "location":"uri", + "locationName":"name" + } + } + }, + "GetAccessPointForObjectLambdaResult":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"ObjectLambdaAccessPointName", + "documentation":"

The name of the Object Lambda Access Point.

" + }, + "PublicAccessBlockConfiguration":{ + "shape":"PublicAccessBlockConfiguration", + "documentation":"

Configuration to block all public access. This setting is turned on and can not be edited.

" + }, + "CreationDate":{ + "shape":"CreationDate", + "documentation":"

The date and time when the specified Object Lambda Access Point was created.

" + } + } + }, + "GetAccessPointPolicyForObjectLambdaRequest":{ + "type":"structure", + "required":[ + "AccountId", + "Name" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

The account ID for the account that owns the specified Object Lambda Access Point.

", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "Name":{ + "shape":"ObjectLambdaAccessPointName", + "documentation":"

The name of the Object Lambda Access Point.

", + "location":"uri", + "locationName":"name" + } + } + }, + "GetAccessPointPolicyForObjectLambdaResult":{ + "type":"structure", + "members":{ + "Policy":{ + "shape":"ObjectLambdaPolicy", + "documentation":"

Object Lambda Access Point resource policy document.

" + } + } + }, "GetAccessPointPolicyRequest":{ "type":"structure", "required":[ @@ -1290,6 +1625,34 @@ } } }, + "GetAccessPointPolicyStatusForObjectLambdaRequest":{ + "type":"structure", + "required":[ + "AccountId", + "Name" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

The account ID for the account that owns the specified Object Lambda Access Point.

", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "Name":{ + "shape":"ObjectLambdaAccessPointName", + "documentation":"

The name of the Object Lambda Access Point.

", + "location":"uri", + "locationName":"name" + } + } + }, + "GetAccessPointPolicyStatusForObjectLambdaResult":{ + "type":"structure", + "members":{ + "PolicyStatus":{"shape":"PolicyStatus"} + } + }, "GetAccessPointPolicyStatusRequest":{ "type":"structure", "required":[ @@ -1977,7 +2340,7 @@ "box":true } }, - "documentation":"

The operation that you want this job to perform on every object listed in the manifest. For more information about the available operations, see Operations in the Amazon Simple Storage Service Developer Guide.

" + "documentation":"

The operation that you want this job to perform on every object listed in the manifest. For more information about the available operations, see Operations in the Amazon Simple Storage Service User Guide.

" }, "JobPriority":{ "type":"integer", @@ -2205,6 +2568,44 @@ "locationName":"Rule" } }, + "ListAccessPointsForObjectLambdaRequest":{ + "type":"structure", + "required":["AccountId"], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

The account ID for the account that owns the specified Object Lambda Access Point.

", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "NextToken":{ + "shape":"NonEmptyMaxLength1024String", + "documentation":"

If the list has more access points than can be returned in one call to this API, this field contains a continuation token that you can provide in subsequent calls to this API to retrieve additional access points.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of access points that you want to include in the list. If there are more than this number of access points, then the response will include a continuation token in the NextToken field that you can use to retrieve the next page of access points.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListAccessPointsForObjectLambdaResult":{ + "type":"structure", + "members":{ + "ObjectLambdaAccessPointList":{ + "shape":"ObjectLambdaAccessPointList", + "documentation":"

Returns list of Object Lambda Access Points.

" + }, + "NextToken":{ + "shape":"NonEmptyMaxLength1024String", + "documentation":"

If the list has more access points than can be returned in one call to this API, this field contains a continuation token that you can provide in subsequent calls to this API to retrieve additional access points.

" + } + } + }, "ListAccessPointsRequest":{ "type":"structure", "required":["AccountId"], @@ -2255,7 +2656,7 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

", + "documentation":"

The AWS account ID associated with the S3 Batch Operations job.

", "hostLabel":true, "location":"header", "locationName":"x-amz-account-id" @@ -2488,6 +2889,134 @@ "documentation":"

", "exception":true }, + "ObjectLambdaAccessPoint":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"ObjectLambdaAccessPointName", + "documentation":"

The name of the Object Lambda Access Point.

" + }, + "ObjectLambdaAccessPointArn":{ + "shape":"ObjectLambdaAccessPointArn", + "documentation":"

Specifies the ARN for the Object Lambda Access Point.

" + } + }, + "documentation":"

An access point with an attached AWS Lambda function used to access transformed data from an Amazon S3 bucket.

" + }, + "ObjectLambdaAccessPointArn":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"arn:[^:]+:s3-object-lambda:[^:]*:\\d{12}:accesspoint/.*" + }, + "ObjectLambdaAccessPointList":{ + "type":"list", + "member":{ + "shape":"ObjectLambdaAccessPoint", + "locationName":"ObjectLambdaAccessPoint" + } + }, + "ObjectLambdaAccessPointName":{ + "type":"string", + "max":45, + "min":3, + "pattern":"^[a-z0-9]([a-z0-9\\-]*[a-z0-9])?$" + }, + "ObjectLambdaAllowedFeature":{ + "type":"string", + "enum":[ + "GetObject-Range", + "GetObject-PartNumber" + ] + }, + "ObjectLambdaAllowedFeaturesList":{ + "type":"list", + "member":{ + "shape":"ObjectLambdaAllowedFeature", + "locationName":"AllowedFeature" + } + }, + "ObjectLambdaConfiguration":{ + "type":"structure", + "required":[ + "SupportingAccessPoint", + "TransformationConfigurations" + ], + "members":{ + "SupportingAccessPoint":{ + "shape":"ObjectLambdaSupportingAccessPointArn", + "documentation":"

Standard access point associated with the Object Lambda Access Point.

" + }, + "CloudWatchMetricsEnabled":{ + "shape":"Boolean", + "documentation":"

A container for whether the CloudWatch metrics configuration is enabled.

" + }, + "AllowedFeatures":{ + "shape":"ObjectLambdaAllowedFeaturesList", + "documentation":"

A container for allowed features. Valid inputs are GetObject-Range and GetObject-PartNumber.

" + }, + "TransformationConfigurations":{ + "shape":"ObjectLambdaTransformationConfigurationsList", + "documentation":"

A container for transformation configurations for an Object Lambda Access Point.

" + } + }, + "documentation":"

A configuration used when creating an Object Lambda Access Point.

" + }, + "ObjectLambdaContentTransformation":{ + "type":"structure", + "members":{ + "AwsLambda":{ + "shape":"AwsLambdaTransformation", + "documentation":"

A container for an AWS Lambda function.

" + } + }, + "documentation":"

A container for AwsLambdaTransformation.

", + "union":true + }, + "ObjectLambdaPolicy":{"type":"string"}, + "ObjectLambdaSupportingAccessPointArn":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"arn:[^:]+:s3:[^:]*:\\d{12}:accesspoint/.*" + }, + "ObjectLambdaTransformationConfiguration":{ + "type":"structure", + "required":[ + "Actions", + "ContentTransformation" + ], + "members":{ + "Actions":{ + "shape":"ObjectLambdaTransformationConfigurationActionsList", + "documentation":"

A container for the action of an Object Lambda Access Point configuration.

" + }, + "ContentTransformation":{ + "shape":"ObjectLambdaContentTransformation", + "documentation":"

A container for the content transformation of an Object Lambda Access Point configuration.

" + } + }, + "documentation":"

A configuration used when creating an Object Lambda Access Point transformation.

" + }, + "ObjectLambdaTransformationConfigurationAction":{ + "type":"string", + "enum":["GetObject"] + }, + "ObjectLambdaTransformationConfigurationActionsList":{ + "type":"list", + "member":{ + "shape":"ObjectLambdaTransformationConfigurationAction", + "locationName":"Action" + } + }, + "ObjectLambdaTransformationConfigurationsList":{ + "type":"list", + "member":{ + "shape":"ObjectLambdaTransformationConfiguration", + "locationName":"TransformationConfiguration" + } + }, "ObjectLockEnabledForBucket":{"type":"boolean"}, "OperationName":{ "type":"string", @@ -2516,7 +3045,7 @@ "locationName":"IsPublic" } }, - "documentation":"

Indicates whether this access point policy is public. For more information about how Amazon S3 evaluates policies to determine whether they are public, see The Meaning of \"Public\" in the Amazon Simple Storage Service Developer Guide.

" + "documentation":"

Indicates whether this access point policy is public. For more information about how Amazon S3 evaluates policies to determine whether they are public, see The Meaning of \"Public\" in the Amazon Simple Storage Service User Guide.

" }, "Prefix":{"type":"string"}, "PrefixLevel":{ @@ -2568,6 +3097,60 @@ "documentation":"

The PublicAccessBlock configuration that you want to apply to this Amazon S3 account. You can enable the configuration options in any combination. For more information about when Amazon S3 considers a bucket or object public, see The Meaning of \"Public\" in the Amazon Simple Storage Service Developer Guide.

This is not supported for Amazon S3 on Outposts.

" }, "PublicAccessBlockEnabled":{"type":"boolean"}, + "PutAccessPointConfigurationForObjectLambdaRequest":{ + "type":"structure", + "required":[ + "AccountId", + "Name", + "Configuration" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

The account ID for the account that owns the specified Object Lambda Access Point.

", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "Name":{ + "shape":"ObjectLambdaAccessPointName", + "documentation":"

The name of the Object Lambda Access Point.

", + "location":"uri", + "locationName":"name" + }, + "Configuration":{ + "shape":"ObjectLambdaConfiguration", + "documentation":"

Object Lambda Access Point configuration document.

" + } + } + }, + "PutAccessPointPolicyForObjectLambdaRequest":{ + "type":"structure", + "required":[ + "AccountId", + "Name", + "Policy" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

The account ID for the account that owns the specified Object Lambda Access Point.

", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "Name":{ + "shape":"ObjectLambdaAccessPointName", + "documentation":"

The name of the Object Lambda Access Point.

", + "location":"uri", + "locationName":"name" + }, + "Policy":{ + "shape":"ObjectLambdaPolicy", + "documentation":"

Object Lambda Access Point resource policy document.

" + } + } + }, "PutAccessPointPolicyRequest":{ "type":"structure", "required":[ @@ -2591,7 +3174,7 @@ }, "Policy":{ "shape":"Policy", - "documentation":"

The policy that you want to apply to the specified access point. For more information about access point policies, see Managing data access with Amazon S3 Access Points in the Amazon Simple Storage Service Developer Guide.

" + "documentation":"

The policy that you want to apply to the specified access point. For more information about access point policies, see Managing data access with Amazon S3 Access Points in the Amazon Simple Storage Service User Guide.

" } } }, @@ -3253,7 +3836,7 @@ "documentation":"

The Object Lock retention mode to be applied to all objects in the Batch Operations job.

" } }, - "documentation":"

Contains the S3 Object Lock retention mode to be applied to all objects in the S3 Batch Operations job. If you don't provide Mode and RetainUntilDate data types in your operation, you will remove the retention from your objects. For more information, see Using S3 Object Lock retention with S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

" + "documentation":"

Contains the S3 Object Lock retention mode to be applied to all objects in the S3 Batch Operations job. If you don't provide Mode and RetainUntilDate data types in your operation, you will remove the retention from your objects. For more information, see Using S3 Object Lock retention with S3 Batch Operations in the Amazon Simple Storage Service User Guide.

" }, "S3SSEAlgorithm":{ "type":"string", @@ -3281,7 +3864,7 @@ "documentation":"

Contains the Object Lock legal hold status to be applied to all objects in the Batch Operations job.

" } }, - "documentation":"

Contains the configuration for an S3 Object Lock legal hold operation that an S3 Batch Operations job passes every object to the underlying PutObjectLegalHold API. For more information, see Using S3 Object Lock legal hold with S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

" + "documentation":"

Contains the configuration for an S3 Object Lock legal hold operation that an S3 Batch Operations job passes every object to the underlying PutObjectLegalHold API. For more information, see Using S3 Object Lock legal hold with S3 Batch Operations in the Amazon Simple Storage Service User Guide.

" }, "S3SetObjectRetentionOperation":{ "type":"structure", @@ -3294,10 +3877,10 @@ }, "Retention":{ "shape":"S3Retention", - "documentation":"

Contains the Object Lock retention mode to be applied to all objects in the Batch Operations job. For more information, see Using S3 Object Lock retention with S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

" + "documentation":"

Contains the Object Lock retention mode to be applied to all objects in the Batch Operations job. For more information, see Using S3 Object Lock retention with S3 Batch Operations in the Amazon Simple Storage Service User Guide.

" } }, - "documentation":"

Contains the configuration parameters for the Object Lock retention action for an S3 Batch Operations job. Batch Operations passes every object to the underlying PutObjectRetention API. For more information, see Using S3 Object Lock retention with S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

" + "documentation":"

Contains the configuration parameters for the Object Lock retention action for an S3 Batch Operations job. Batch Operations passes every object to the underlying PutObjectRetention API. For more information, see Using S3 Object Lock retention with S3 Batch Operations in the Amazon Simple Storage Service User Guide.

" }, "S3SetObjectTaggingOperation":{ "type":"structure", @@ -3532,12 +4115,12 @@ "type":"string", "max":1024, "min":1, - "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:=+\\-@%]*)$" + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@%]*)$" }, "TagValueString":{ "type":"string", "max":1024, - "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:=+\\-@%]*)$" + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@%]*)$" }, "Tagging":{ "type":"structure", @@ -3583,7 +4166,7 @@ "documentation":"

The storage class to which you want the object to transition.

" } }, - "documentation":"

Specifies when an object transitions to a specified storage class. For more information about Amazon S3 Lifecycle configuration rules, see Transitioning objects using Amazon S3 Lifecycle in the Amazon Simple Storage Service Developer Guide.

" + "documentation":"

Specifies when an object transitions to a specified storage class. For more information about Amazon S3 Lifecycle configuration rules, see Transitioning objects using Amazon S3 Lifecycle in the Amazon Simple Storage Service User Guide.

" }, "TransitionList":{ "type":"list", @@ -3612,7 +4195,7 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

", + "documentation":"

The AWS account ID associated with the S3 Batch Operations job.

", "hostLabel":true, "location":"header", "locationName":"x-amz-account-id" @@ -3658,7 +4241,7 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

", + "documentation":"

The AWS account ID associated with the S3 Batch Operations job.

", "hostLabel":true, "location":"header", "locationName":"x-amz-account-id" @@ -3717,5 +4300,5 @@ "min":1 } }, - "documentation":"

AWS S3 Control provides access to Amazon S3 control plane operations.

" + "documentation":"

AWS S3 Control provides access to Amazon S3 control plane actions.

" } diff --git a/botocore/data/sagemaker-runtime/2017-05-13/service-2.json b/botocore/data/sagemaker-runtime/2017-05-13/service-2.json index f47de7aa..b7df4a5c 100644 --- a/botocore/data/sagemaker-runtime/2017-05-13/service-2.json +++ b/botocore/data/sagemaker-runtime/2017-05-13/service-2.json @@ -115,6 +115,12 @@ "location":"header", "locationName":"X-Amzn-SageMaker-Target-Variant" }, + "TargetContainerHostname":{ + "shape":"TargetContainerHostnameHeader", + "documentation":"

If the endpoint hosts multiple containers and is configured to use direct invocation, this parameter specifies the host name of the container to invoke.

", + "location":"header", + "locationName":"X-Amzn-SageMaker-Target-Container-Hostname" + }, "InferenceId":{ "shape":"InferenceId", "documentation":"

If you provide a value, it is added to the captured data when you enable data capture on the endpoint. For information about data capture, see Capture Data.

", @@ -191,6 +197,11 @@ "synthetic":true }, "StatusCode":{"type":"integer"}, + "TargetContainerHostnameHeader":{ + "type":"string", + "max":63, + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*" + }, "TargetModelHeader":{ "type":"string", "max":1024, diff --git a/botocore/data/sagemaker/2017-07-24/service-2.json b/botocore/data/sagemaker/2017-07-24/service-2.json index 097cb6fa..59b04a2c 100644 --- a/botocore/data/sagemaker/2017-07-24/service-2.json +++ b/botocore/data/sagemaker/2017-07-24/service-2.json @@ -504,7 +504,7 @@ "errors":[ {"shape":"ResourceNotFound"} ], - "documentation":"

Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the user will be automatically signed in to Amazon SageMaker Studio, and granted access to all of the Apps and files associated with the Domain's Amazon Elastic File System (EFS) volume. This operation can only be called when the authentication mode equals IAM.

The URL that you get from a call to CreatePresignedDomainUrl is valid only for 5 minutes. If you try to use the URL after the 5-minute limit expires, you are directed to the AWS console sign-in page.

" + "documentation":"

Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the user will be automatically signed in to Amazon SageMaker Studio, and granted access to all of the Apps and files associated with the Domain's Amazon Elastic File System (EFS) volume. This operation can only be called when the authentication mode equals IAM.

The URL that you get from a call to CreatePresignedDomainUrl has a default timeout of 5 minutes. You can configure this value using ExpiresInSeconds. If you try to use the URL after the timeout limit expires, you are directed to the AWS console sign-in page.

" }, "CreatePresignedNotebookInstanceUrl":{ "name":"CreatePresignedNotebookInstanceUrl", @@ -1562,7 +1562,7 @@ "errors":[ {"shape":"ResourceNotFound"} ], - "documentation":"

Returns information about a training job.

" + "documentation":"

Returns information about a training job.

Some of the attributes below only appear if the training job successfully starts. If the training job fails, TrainingJobStatus is Failed and, depending on the FailureReason, attributes like TrainingStartTime, TrainingTimeInSeconds, TrainingEndTime, and BillableTimeInSeconds may not be present in the response.

" }, "DescribeTransformJob":{ "name":"DescribeTransformJob", @@ -2197,7 +2197,7 @@ }, "input":{"shape":"ListTrainingJobsRequest"}, "output":{"shape":"ListTrainingJobsResponse"}, - "documentation":"

Lists training jobs.

" + "documentation":"

Lists training jobs.

When StatusEquals and MaxResults are set at the same time, the MaxResults number of training jobs are first retrieved ignoring the StatusEquals parameter and then they are filtered by the StatusEquals parameter, which is returned as a response. For example, if ListTrainingJobs is invoked with the following parameters:

{ ... MaxResults: 100, StatusEquals: InProgress ... }

Then, 100 trainings jobs with any status including those other than InProgress are selected first (sorted according the creation time, from the latest to the oldest) and those with status InProgress are returned.

You can quickly test the API using the following AWS CLI code.

aws sagemaker list-training-jobs --max-results 100 --status-equals InProgress

" }, "ListTrainingJobsForHyperParameterTuningJob":{ "name":"ListTrainingJobsForHyperParameterTuningJob", @@ -3174,7 +3174,7 @@ "members":{ "AnnotationConsolidationLambdaArn":{ "shape":"LambdaFunctionArn", - "documentation":"

The Amazon Resource Name (ARN) of a Lambda function implements the logic for annotation consolidation and to process output data.

This parameter is required for all labeling jobs. For built-in task types, use one of the following Amazon SageMaker Ground Truth Lambda function ARNs for AnnotationConsolidationLambdaArn. For custom labeling workflows, see Post-annotation Lambda.

Bounding box - Finds the most similar boxes from different workers based on the Jaccard index of the boxes.

Image classification - Uses a variant of the Expectation Maximization approach to estimate the true class of an image based on annotations from individual workers.

Multi-label image classification - Uses a variant of the Expectation Maximization approach to estimate the true classes of an image based on annotations from individual workers.

Semantic segmentation - Treats each pixel in an image as a multi-class classification and treats pixel annotations from workers as \"votes\" for the correct label.

Text classification - Uses a variant of the Expectation Maximization approach to estimate the true class of text based on annotations from individual workers.

Multi-label text classification - Uses a variant of the Expectation Maximization approach to estimate the true classes of text based on annotations from individual workers.

Named entity recognition - Groups similar selections and calculates aggregate boundaries, resolving to most-assigned label.

Video Classification - Use this task type when you need workers to classify videos using predefined labels that you specify. Workers are shown videos and are asked to choose one label for each video.

Video Frame Object Detection - Use this task type to have workers identify and locate objects in a sequence of video frames (images extracted from a video) using bounding boxes. For example, you can use this task to ask workers to identify and localize various objects in a series of video frames, such as cars, bikes, and pedestrians.

Video Frame Object Tracking - Use this task type to have workers track the movement of objects in a sequence of video frames (images extracted from a video) using bounding boxes. For example, you can use this task to ask workers to track the movement of objects, such as cars, bikes, and pedestrians.

3D Point Cloud Object Detection - Use this task type when you want workers to classify objects in a 3D point cloud by drawing 3D cuboids around objects. For example, you can use this task type to ask workers to identify different types of objects in a point cloud, such as cars, bikes, and pedestrians.

3D Point Cloud Object Tracking - Use this task type when you want workers to draw 3D cuboids around objects that appear in a sequence of 3D point cloud frames. For example, you can use this task type to ask workers to track the movement of vehicles across multiple point cloud frames.

3D Point Cloud Semantic Segmentation - Use this task type when you want workers to create a point-level semantic segmentation masks by painting objects in a 3D point cloud using different colors where each color is assigned to one of the classes you specify.

Use the following ARNs for Label Verification and Adjustment Jobs

Use label verification and adjustment jobs to review and adjust labels. To learn more, see Verify and Adjust Labels .

Semantic Segmentation Adjustment - Treats each pixel in an image as a multi-class classification and treats pixel adjusted annotations from workers as \"votes\" for the correct label.

Semantic Segmentation Verification - Uses a variant of the Expectation Maximization approach to estimate the true class of verification judgment for semantic segmentation labels based on annotations from individual workers.

Bounding Box Adjustment - Finds the most similar boxes from different workers based on the Jaccard index of the adjusted annotations.

Bounding Box Verification - Uses a variant of the Expectation Maximization approach to estimate the true class of verification judgement for bounding box labels based on annotations from individual workers.

Video Frame Object Detection Adjustment - Use this task type when you want workers to adjust bounding boxes that workers have added to video frames to classify and localize objects in a sequence of video frames.

Video Frame Object Tracking Adjustment - Use this task type when you want workers to adjust bounding boxes that workers have added to video frames to track object movement across a sequence of video frames.

3D Point Cloud Object Detection Adjustment - Use this task type when you want workers to adjust 3D cuboids around objects in a 3D point cloud.

3D Point Cloud Object Tracking Adjustment - Use this task type when you want workers to adjust 3D cuboids around objects that appear in a sequence of 3D point cloud frames.

3D Point Cloud Semantic Segmentation Adjustment - Use this task type when you want workers to adjust a point-level semantic segmentation masks using a paint tool.

" + "documentation":"

The Amazon Resource Name (ARN) of a Lambda function implements the logic for annotation consolidation and to process output data.

This parameter is required for all labeling jobs. For built-in task types, use one of the following Amazon SageMaker Ground Truth Lambda function ARNs for AnnotationConsolidationLambdaArn. For custom labeling workflows, see Post-annotation Lambda.

Bounding box - Finds the most similar boxes from different workers based on the Jaccard index of the boxes.

Image classification - Uses a variant of the Expectation Maximization approach to estimate the true class of an image based on annotations from individual workers.

Multi-label image classification - Uses a variant of the Expectation Maximization approach to estimate the true classes of an image based on annotations from individual workers.

Semantic segmentation - Treats each pixel in an image as a multi-class classification and treats pixel annotations from workers as \"votes\" for the correct label.

Text classification - Uses a variant of the Expectation Maximization approach to estimate the true class of text based on annotations from individual workers.

Multi-label text classification - Uses a variant of the Expectation Maximization approach to estimate the true classes of text based on annotations from individual workers.

Named entity recognition - Groups similar selections and calculates aggregate boundaries, resolving to most-assigned label.

Video Classification - Use this task type when you need workers to classify videos using predefined labels that you specify. Workers are shown videos and are asked to choose one label for each video.

Video Frame Object Detection - Use this task type to have workers identify and locate objects in a sequence of video frames (images extracted from a video) using bounding boxes. For example, you can use this task to ask workers to identify and localize various objects in a series of video frames, such as cars, bikes, and pedestrians.

Video Frame Object Tracking - Use this task type to have workers track the movement of objects in a sequence of video frames (images extracted from a video) using bounding boxes. For example, you can use this task to ask workers to track the movement of objects, such as cars, bikes, and pedestrians.

3D Point Cloud Object Detection - Use this task type when you want workers to classify objects in a 3D point cloud by drawing 3D cuboids around objects. For example, you can use this task type to ask workers to identify different types of objects in a point cloud, such as cars, bikes, and pedestrians.

3D Point Cloud Object Tracking - Use this task type when you want workers to draw 3D cuboids around objects that appear in a sequence of 3D point cloud frames. For example, you can use this task type to ask workers to track the movement of vehicles across multiple point cloud frames.

3D Point Cloud Semantic Segmentation - Use this task type when you want workers to create a point-level semantic segmentation masks by painting objects in a 3D point cloud using different colors where each color is assigned to one of the classes you specify.

Use the following ARNs for Label Verification and Adjustment Jobs

Use label verification and adjustment jobs to review and adjust labels. To learn more, see Verify and Adjust Labels .

Semantic Segmentation Adjustment - Treats each pixel in an image as a multi-class classification and treats pixel adjusted annotations from workers as \"votes\" for the correct label.

Semantic Segmentation Verification - Uses a variant of the Expectation Maximization approach to estimate the true class of verification judgment for semantic segmentation labels based on annotations from individual workers.

Bounding Box Adjustment - Finds the most similar boxes from different workers based on the Jaccard index of the adjusted annotations.

Bounding Box Verification - Uses a variant of the Expectation Maximization approach to estimate the true class of verification judgement for bounding box labels based on annotations from individual workers.

Video Frame Object Detection Adjustment - Use this task type when you want workers to adjust bounding boxes that workers have added to video frames to classify and localize objects in a sequence of video frames.

Video Frame Object Tracking Adjustment - Use this task type when you want workers to adjust bounding boxes that workers have added to video frames to track object movement across a sequence of video frames.

3D Point Cloud Object Detection Adjustment - Use this task type when you want workers to adjust 3D cuboids around objects in a 3D point cloud.

3D Point Cloud Object Tracking Adjustment - Use this task type when you want workers to adjust 3D cuboids around objects that appear in a sequence of 3D point cloud frames.

3D Point Cloud Semantic Segmentation Adjustment - Use this task type when you want workers to adjust a point-level semantic segmentation masks using a paint tool.

" } }, "documentation":"

Configures how labels are consolidated across human workers and processes output data.

" @@ -5861,11 +5861,11 @@ }, "LabelAttributeName":{ "shape":"LabelAttributeName", - "documentation":"

The attribute name to use for the label in the output manifest file. This is the key for the key/value pair formed with the label that a worker assigns to the object. The name can't end with \"-metadata\". If you are running a semantic segmentation labeling job, the attribute name must end with \"-ref\". If you are running any other kind of labeling job, the attribute name must not end with \"-ref\".

" + "documentation":"

The attribute name to use for the label in the output manifest file. This is the key for the key/value pair formed with the label that a worker assigns to the object. The LabelAttributeName must meet the following requirements.

If you are creating an adjustment or verification labeling job, you must use a different LabelAttributeName than the one used in the original labeling job. The original labeling job is the Ground Truth labeling job that produced the labels that you want verified or adjusted. To learn more about adjustment and verification labeling jobs, see Verify and Adjust Labels.

" }, "InputConfig":{ "shape":"LabelingJobInputConfig", - "documentation":"

Input data for the labeling job, such as the Amazon S3 location of the data objects and the location of the manifest file that describes the data objects.

" + "documentation":"

Input data for the labeling job, such as the Amazon S3 location of the data objects and the location of the manifest file that describes the data objects.

You must specify at least one of the following: S3DataSource or SnsDataSource.

If you use the Amazon Mechanical Turk workforce, your input data should not include confidential information, personal information or protected health information. Use ContentClassifiers to specify that your data is free of personally identifiable information and adult content.

" }, "OutputConfig":{ "shape":"LabelingJobOutputConfig", @@ -5877,7 +5877,7 @@ }, "LabelCategoryConfigS3Uri":{ "shape":"S3Uri", - "documentation":"

The S3 URI of the file, referred to as a label category configuration file, that defines the categories used to label the data objects.

For 3D point cloud and video frame task types, you can add label category attributes and frame attributes to your label category configuration file. To learn how, see Create a Labeling Category Configuration File for 3D Point Cloud Labeling Jobs.

For all other built-in task types and custom tasks, your label category configuration file must be a JSON file in the following format. Identify the labels you want to use by replacing label_1, label_2,...,label_n with your label categories.

{

\"document-version\": \"2018-11-28\"

\"labels\": [

{

\"label\": \"label_1\"

},

{

\"label\": \"label_2\"

},

...

{

\"label\": \"label_n\"

}

]

}

" + "documentation":"

The S3 URI of the file, referred to as a label category configuration file, that defines the categories used to label the data objects.

For 3D point cloud and video frame task types, you can add label category attributes and frame attributes to your label category configuration file. To learn how, see Create a Labeling Category Configuration File for 3D Point Cloud Labeling Jobs.

For all other built-in task types and custom tasks, your label category configuration file must be a JSON file in the following format. Identify the labels you want to use by replacing label_1, label_2,...,label_n with your label categories.

{

\"document-version\": \"2018-11-28\",

\"labels\": [{\"label\": \"label_1\"},{\"label\": \"label_2\"},...{\"label\": \"label_n\"}]

}

Note the following about the label category configuration file:

" }, "StoppingConditions":{ "shape":"LabelingJobStoppingConditions", @@ -6034,6 +6034,10 @@ "shape":"ContainerDefinitionList", "documentation":"

Specifies the containers in the inference pipeline.

" }, + "InferenceExecutionConfig":{ + "shape":"InferenceExecutionConfig", + "documentation":"

Specifies details of how containers in a multi-container endpoint are called.

" + }, "ExecutionRoleArn":{ "shape":"RoleArn", "documentation":"

The Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker can assume to access model artifacts and docker image for deployment on ML compute instances or for batch transform jobs. Deploying on ML compute instances is part of model hosting. For more information, see Amazon SageMaker Roles.

To be able to pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission.

" @@ -6404,7 +6408,11 @@ }, "SessionExpirationDurationInSeconds":{ "shape":"SessionExpirationDurationInSeconds", - "documentation":"

The session expiration duration in seconds.

" + "documentation":"

The session expiration duration in seconds. This value defaults to 43200.

" + }, + "ExpiresInSeconds":{ + "shape":"ExpiresInSeconds", + "documentation":"

The number of seconds until the pre-signed URL expires. This value defaults to 300.

" } } }, @@ -6451,7 +6459,7 @@ "members":{ "ProcessingInputs":{ "shape":"ProcessingInputs", - "documentation":"

List of input configurations for the processing job.

" + "documentation":"

An array of inputs configuring the data to download into the processing container.

" }, "ProcessingOutputConfig":{ "shape":"ProcessingOutputConfig", @@ -6475,11 +6483,11 @@ }, "Environment":{ "shape":"ProcessingEnvironmentMap", - "documentation":"

Sets the environment variables in the Docker container.

" + "documentation":"

The environment variables to set in the Docker container. Up to 100 key and values entries in the map are supported.

" }, "NetworkConfig":{ "shape":"NetworkConfig", - "documentation":"

Networking options for a processing job.

" + "documentation":"

Networking options for a processing job, such as whether to allow inbound and outbound network calls to and from processing containers, and the VPC subnets and security groups to use for VPC-enabled processing jobs.

" }, "RoleArn":{ "shape":"RoleArn", @@ -8327,7 +8335,7 @@ }, "CompilationJobArn":{ "shape":"CompilationJobArn", - "documentation":"

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker assumes to perform the model compilation job.

" + "documentation":"

The Amazon Resource Name (ARN) of the model compilation job.

" }, "CompilationJobStatus":{ "shape":"CompilationJobStatus", @@ -8367,7 +8375,7 @@ }, "RoleArn":{ "shape":"RoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the model compilation job.

" + "documentation":"

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker assumes to perform the model compilation job.

" }, "InputConfig":{ "shape":"InputConfig", @@ -9554,6 +9562,10 @@ "shape":"ContainerDefinitionList", "documentation":"

The containers in the inference pipeline.

" }, + "InferenceExecutionConfig":{ + "shape":"InferenceExecutionConfig", + "documentation":"

Specifies details of how containers in a multi-container endpoint are called.

" + }, "ExecutionRoleArn":{ "shape":"RoleArn", "documentation":"

The Amazon Resource Name (ARN) of the IAM role that you specified for the model.

" @@ -10408,7 +10420,7 @@ }, "BillableTimeInSeconds":{ "shape":"BillableTimeInSeconds", - "documentation":"

The billable time in seconds.

You can calculate the savings from using managed spot training using the formula (1 - BillableTimeInSeconds / TrainingTimeInSeconds) * 100. For example, if BillableTimeInSeconds is 100 and TrainingTimeInSeconds is 500, the savings is 80%.

" + "documentation":"

The billable time in seconds. Billable time refers to the absolute wall-clock time.

Multiply BillableTimeInSeconds by the number of instances (InstanceCount) in your training cluster to get the total compute time Amazon SageMaker will bill you if you run distributed training. The formula is as follows: BillableTimeInSeconds * InstanceCount .

You can calculate the savings from using managed spot training using the formula (1 - BillableTimeInSeconds / TrainingTimeInSeconds) * 100. For example, if BillableTimeInSeconds is 100 and TrainingTimeInSeconds is 500, the savings is 80%.

" }, "DebugHookConfig":{"shape":"DebugHookConfig"}, "ExperimentConfig":{"shape":"ExperimentConfig"}, @@ -11685,6 +11697,11 @@ }, "documentation":"

A summary of the properties of an experiment. To get the complete set of properties, call the DescribeExperiment API and provide the ExperimentName.

" }, + "ExpiresInSeconds":{ + "type":"integer", + "max":300, + "min":5 + }, "Explainability":{ "type":"structure", "members":{ @@ -12034,7 +12051,7 @@ "members":{ "S3OutputPath":{ "shape":"S3Uri", - "documentation":"

The Amazon S3 path where the object containing human output will be made available.

" + "documentation":"

The Amazon S3 path where the object containing human output will be made available.

To learn more about the format of Amazon A2I output data, see Amazon A2I Output Data.

" }, "KmsKeyId":{ "shape":"KmsKeyId", @@ -12141,6 +12158,12 @@ "SKLEARN" ] }, + "FrameworkVersion":{ + "type":"string", + "max":10, + "min":3, + "pattern":"[0-9]\\.[A-Za-z0-9.]+" + }, "GenerateCandidateDefinitionsOnly":{"type":"boolean"}, "GetDeviceFleetReportRequest":{ "type":"structure", @@ -12341,11 +12364,11 @@ "members":{ "WorkteamArn":{ "shape":"WorkteamArn", - "documentation":"

Amazon Resource Name (ARN) of a team of workers.

" + "documentation":"

Amazon Resource Name (ARN) of a team of workers. To learn more about the types of workforces and work teams you can create and use with Amazon A2I, see Create and Manage Workforces.

" }, "HumanTaskUiArn":{ "shape":"HumanTaskUiArn", - "documentation":"

The Amazon Resource Name (ARN) of the human task user interface.

" + "documentation":"

The Amazon Resource Name (ARN) of the human task user interface.

You can use standard HTML and Crowd HTML Elements to create a custom worker task template. You use this template to create a human task UI.

To learn how to create a custom HTML template, see Create Custom Worker Task Template.

To learn how to create a human task UI, which is a worker task template that can be used in a flow definition, see Create and Delete a Worker Task Templates.

" }, "TaskTitle":{ "shape":"FlowDefinitionTaskTitle", @@ -12365,7 +12388,7 @@ }, "TaskTimeLimitInSeconds":{ "shape":"FlowDefinitionTaskTimeLimitInSeconds", - "documentation":"

The amount of time that a worker has to complete a task. The default value is 3,600 seconds (1 hour)

" + "documentation":"

The amount of time that a worker has to complete a task. The default value is 3,600 seconds (1 hour).

" }, "TaskKeywords":{ "shape":"FlowDefinitionTaskKeywords", @@ -12429,11 +12452,11 @@ }, "TaskTimeLimitInSeconds":{ "shape":"TaskTimeLimitInSeconds", - "documentation":"

The amount of time that a worker has to complete a task.

" + "documentation":"

The amount of time that a worker has to complete a task.

If you create a custom labeling job, the maximum value for this parameter is 8 hours (28,800 seconds).

If you create a labeling job using a built-in task type the maximum for this parameter depends on the task type you use:

" }, "TaskAvailabilityLifetimeInSeconds":{ "shape":"TaskAvailabilityLifetimeInSeconds", - "documentation":"

The length of time that a task remains available for labeling by human workers. If you choose the Amazon Mechanical Turk workforce, the maximum is 12 hours (43200). The default value is 864000 seconds (10 days). For private and vendor workforces, the maximum is as listed.

" + "documentation":"

The length of time that a task remains available for labeling by human workers. The default and maximum values for this parameter depend on the type of workforce you use.

" }, "MaxConcurrentTaskCount":{ "shape":"MaxConcurrentTaskCount", @@ -12981,6 +13004,10 @@ "RepositoryAccessMode":{ "shape":"RepositoryAccessMode", "documentation":"

Set this to one of the following values:

" + }, + "RepositoryAuthConfig":{ + "shape":"RepositoryAuthConfig", + "documentation":"

(Optional) Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication.

" } }, "documentation":"

Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC).

" @@ -13145,6 +13172,24 @@ "type":"list", "member":{"shape":"Image"} }, + "InferenceExecutionConfig":{ + "type":"structure", + "required":["Mode"], + "members":{ + "Mode":{ + "shape":"InferenceExecutionMode", + "documentation":"

How containers in a multi-container are run. The following values are valid.

" + } + }, + "documentation":"

Specifies details about how containers in a multi-container endpoint are run.

" + }, + "InferenceExecutionMode":{ + "type":"string", + "enum":[ + "Serial", + "Direct" + ] + }, "InferenceSpecification":{ "type":"structure", "required":[ @@ -13190,11 +13235,15 @@ }, "DataInputConfig":{ "shape":"DataInputConfig", - "documentation":"

Specifies the name and shape of the expected data inputs for your trained model with a JSON dictionary form. The data inputs are InputConfig$Framework specific.

DataInputConfig supports the following parameters for CoreML OutputConfig$TargetDevice (ML Model format):

CoreML ClassifierConfig parameters can be specified using OutputConfig$CompilerOptions. CoreML converter supports Tensorflow and PyTorch models. CoreML conversion examples:

" + "documentation":"

Specifies the name and shape of the expected data inputs for your trained model with a JSON dictionary form. The data inputs are InputConfig$Framework specific.

DataInputConfig supports the following parameters for CoreML OutputConfig$TargetDevice (ML Model format):

CoreML ClassifierConfig parameters can be specified using OutputConfig$CompilerOptions. CoreML converter supports Tensorflow and PyTorch models. CoreML conversion examples:

Depending on the model format, DataInputConfig requires the following parameters for ml_eia2 OutputConfig:TargetDevice.

" }, "Framework":{ "shape":"Framework", "documentation":"

Identifies the framework in which the model was trained. For example: TENSORFLOW.

" + }, + "FrameworkVersion":{ + "shape":"FrameworkVersion", + "documentation":"

Specifies the framework version to use.

This API field is only supported for PyTorch framework versions 1.4, 1.5, and 1.6 for cloud instance target devices: ml_c4, ml_c5, ml_m4, ml_m5, ml_p2, ml_p3, and ml_g4dn.

" } }, "documentation":"

Contains information about the location of input model artifacts, the name and shape of the expected data inputs, and the framework in which the model was trained.

" @@ -13651,7 +13700,7 @@ "members":{ "ManifestS3Uri":{ "shape":"S3Uri", - "documentation":"

The Amazon S3 location of the manifest file that describes the input data objects.

" + "documentation":"

The Amazon S3 location of the manifest file that describes the input data objects.

The input manifest file referenced in ManifestS3Uri must contain one of the following keys: source-ref or source. The value of the keys are interpreted as follows:

If you are a new user of Ground Truth, it is recommended you review Use an Input Manifest File in the Amazon SageMaker Developer Guide to learn how to create an input manifest file.

" } }, "documentation":"

The Amazon S3 location of the input data objects.

" @@ -18450,7 +18499,7 @@ }, "CompilerOptions":{ "shape":"CompilerOptions", - "documentation":"

Specifies additional parameters for compiler options in JSON format. The compiler options are TargetPlatform specific. It is required for NVIDIA accelerators and highly recommended for CPU compilations. For any other cases, it is optional to specify CompilerOptions.

" + "documentation":"

Specifies additional parameters for compiler options in JSON format. The compiler options are TargetPlatform specific. It is required for NVIDIA accelerators and highly recommended for CPU compilations. For any other cases, it is optional to specify CompilerOptions.

" }, "KmsKeyId":{ "shape":"KmsKeyId", @@ -18465,7 +18514,7 @@ "members":{ "KmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats:

If you use a KMS key ID or an alias of your master key, the Amazon SageMaker execution role must include permissions to call kms:Encrypt. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. Amazon SageMaker uses server-side encryption with KMS-managed keys for OutputDataConfig. If you use a bucket policy with an s3:PutObject permission that only allows objects with server-side encryption, set the condition key of s3:x-amz-server-side-encryption to \"aws:kms\". For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

The KMS key policy must grant permission to the IAM role that you specify in your CreateTrainingJob, CreateTransformJob, or CreateHyperParameterTuningJob requests. For more information, see Using Key Policies in AWS KMS in the AWS Key Management Service Developer Guide.

" + "documentation":"

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats:

If you use a KMS key ID or an alias of your master key, the Amazon SageMaker execution role must include permissions to call kms:Encrypt. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. Amazon SageMaker uses server-side encryption with KMS-managed keys for OutputDataConfig. If you use a bucket policy with an s3:PutObject permission that only allows objects with server-side encryption, set the condition key of s3:x-amz-server-side-encryption to \"aws:kms\". For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

The KMS key policy must grant permission to the IAM role that you specify in your CreateTrainingJob, CreateTransformJob, or CreateHyperParameterTuningJob requests. For more information, see Using Key Policies in AWS KMS in the AWS Key Management Service Developer Guide.

" }, "S3OutputPath":{ "shape":"S3Uri", @@ -18962,7 +19011,7 @@ "members":{ "FeatureGroupName":{ "shape":"FeatureGroupName", - "documentation":"

The name of the Amazon SageMaker FeatureGroup to use as the destination for processing job output.

" + "documentation":"

The name of the Amazon SageMaker FeatureGroup to use as the destination for processing job output. Note that your processing script is responsible for putting records into your Feature Store.

" } }, "documentation":"

Configuration for processing job outputs in Amazon SageMaker Feature Store.

" @@ -18973,7 +19022,7 @@ "members":{ "InputName":{ "shape":"String", - "documentation":"

The name of the inputs for the processing job.

" + "documentation":"

The name for the processing job input.

" }, "AppManaged":{ "shape":"AppManaged", @@ -18981,7 +19030,7 @@ }, "S3Input":{ "shape":"ProcessingS3Input", - "documentation":"

Configuration for processing job inputs in Amazon S3.

" + "documentation":"

Configuration for downloading input data from Amazon S3 into the processing container.

" }, "DatasetDefinition":{ "shape":"DatasetDefinition", @@ -19238,14 +19287,14 @@ "members":{ "Outputs":{ "shape":"ProcessingOutputs", - "documentation":"

List of output configurations for the processing job.

" + "documentation":"

An array of outputs configuring the data to upload from the processing container.

" }, "KmsKeyId":{ "shape":"KmsKeyId", "documentation":"

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the processing job output. KmsKeyId can be an ID of a KMS key, ARN of a KMS key, alias of a KMS key, or alias of a KMS key. The KmsKeyId is applied to all outputs.

" } }, - "documentation":"

The output configuration for the processing job.

" + "documentation":"

Configuration for uploading output from the processing container.

" }, "ProcessingOutputs":{ "type":"list", @@ -19294,11 +19343,11 @@ "members":{ "S3Uri":{ "shape":"S3Uri", - "documentation":"

The URI for the Amazon S3 storage where you want Amazon SageMaker to download the artifacts needed to run a processing job.

" + "documentation":"

The URI of the Amazon S3 prefix Amazon SageMaker downloads data required to run a processing job.

" }, "LocalPath":{ "shape":"ProcessingLocalPath", - "documentation":"

The local path to the Amazon S3 bucket where you want Amazon SageMaker to download the inputs to run a processing job. LocalPath is an absolute path to the input data. This is a required parameter when AppManaged is False (default).

" + "documentation":"

The local path in your container where you want Amazon SageMaker to write input data to. LocalPath is an absolute path to the input data and must begin with /opt/ml/processing/. LocalPath is a required parameter when AppManaged is False (default).

" }, "S3DataType":{ "shape":"ProcessingS3DataType", @@ -19306,18 +19355,18 @@ }, "S3InputMode":{ "shape":"ProcessingS3InputMode", - "documentation":"

Whether to use File or Pipe input mode. In File mode, Amazon SageMaker copies the data from the input source onto the local Amazon Elastic Block Store (Amazon EBS) volumes before starting your training algorithm. This is the most commonly used input mode. In Pipe mode, Amazon SageMaker streams input data from the source directly to your algorithm without using the EBS volume.This is a required parameter when AppManaged is False (default).

" + "documentation":"

Whether to use File or Pipe input mode. In File mode, Amazon SageMaker copies the data from the input source onto the local ML storage volume before starting your processing container. This is the most commonly used input mode. In Pipe mode, Amazon SageMaker streams input data from the source directly to your processing container into named pipes without using the ML storage volume.

" }, "S3DataDistributionType":{ "shape":"ProcessingS3DataDistributionType", - "documentation":"

Whether the data stored in Amazon S3 is FullyReplicated or ShardedByS3Key.

" + "documentation":"

Whether to distribute the data from Amazon S3 to all processing instances with FullyReplicated, or whether the data from Amazon S3 is shared by Amazon S3 key, downloading one shard of data to each processing instance.

" }, "S3CompressionType":{ "shape":"ProcessingS3CompressionType", - "documentation":"

Whether to use Gzip compression for Amazon S3 storage.

" + "documentation":"

Whether to GZIP-decompress the data in Amazon S3 as it is streamed into the processing container. Gzip can only be used when Pipe mode is specified as the S3InputMode. In Pipe mode, Amazon SageMaker streams input data from the source directly to your container without using the EBS volume.

" } }, - "documentation":"

Configuration for processing job inputs in Amazon S3.

" + "documentation":"

Configuration for downloading input data from Amazon S3 into the processing container.

" }, "ProcessingS3InputMode":{ "type":"string", @@ -19340,14 +19389,14 @@ }, "LocalPath":{ "shape":"ProcessingLocalPath", - "documentation":"

The local path to the Amazon S3 bucket where you want Amazon SageMaker to save the results of an processing job. LocalPath is an absolute path to the input data.

" + "documentation":"

The local path of a directory where you want Amazon SageMaker to upload its contents to Amazon S3. LocalPath is an absolute path to a directory containing output files. This directory will be created by the platform and exist when your container's entrypoint is invoked.

" }, "S3UploadMode":{ "shape":"ProcessingS3UploadMode", "documentation":"

Whether to upload the results of the processing job continuously or after the job completes.

" } }, - "documentation":"

Configuration for processing job outputs in Amazon S3.

" + "documentation":"

Configuration for uploading output data to Amazon S3 from the processing container.

" }, "ProcessingS3UploadMode":{ "type":"string", @@ -19365,7 +19414,7 @@ "documentation":"

Specifies the maximum runtime in seconds.

" } }, - "documentation":"

Specifies a time limit for how long the processing job is allowed to run.

" + "documentation":"

Configures conditions under which the processing job should be stopped, such as how long the processing job has been running. After the condition is met, the processing job is stopped.

" }, "ProcessingVolumeSizeInGB":{ "type":"integer", @@ -19413,6 +19462,10 @@ "AcceleratorType":{ "shape":"ProductionVariantAcceleratorType", "documentation":"

The size of the Elastic Inference (EI) instance to use for the production variant. EI instances provide on-demand GPU computing for inference. For more information, see Using Elastic Inference in Amazon SageMaker.

" + }, + "CoreDumpConfig":{ + "shape":"ProductionVariantCoreDumpConfig", + "documentation":"

Specifies configuration for a core dump from the model container when the process crashes.

" } }, "documentation":"

Identifies a model that you want to host and the resources to deploy for hosting it. If you are deploying multiple models, tell Amazon SageMaker how to distribute traffic among the models by specifying variant weights.

" @@ -19428,6 +19481,21 @@ "ml.eia2.xlarge" ] }, + "ProductionVariantCoreDumpConfig":{ + "type":"structure", + "required":["DestinationS3Uri"], + "members":{ + "DestinationS3Uri":{ + "shape":"DestinationS3Uri", + "documentation":"

The Amazon S3 bucket to send the core dump to.

" + }, + "KmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the core dump data at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats:

If you use a KMS key ID or an alias of your master key, the Amazon SageMaker execution role must include permissions to call kms:Encrypt. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. Amazon SageMaker uses server-side encryption with KMS-managed keys for OutputDataConfig. If you use a bucket policy with an s3:PutObject permission that only allows objects with server-side encryption, set the condition key of s3:x-amz-server-side-encryption to \"aws:kms\". For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

The KMS key policy must grant permission to the IAM role that you specify in your CreateEndpoint and UpdateEndpoint requests. For more information, see Using Key Policies in AWS KMS in the AWS Key Management Service Developer Guide.

" + } + }, + "documentation":"

Specifies configuration for a core dump from the model container when the process crashes.

" + }, "ProductionVariantInstanceType":{ "type":"string", "enum":[ @@ -20063,6 +20131,23 @@ "Vpc" ] }, + "RepositoryAuthConfig":{ + "type":"structure", + "required":["RepositoryCredentialsProviderArn"], + "members":{ + "RepositoryCredentialsProviderArn":{ + "shape":"RepositoryCredentialsProviderArn", + "documentation":"

The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.

" + } + }, + "documentation":"

Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field of the ImageConfig object that you passed to a call to CreateModel and the private Docker registry where the model image is hosted requires authentication.

" + }, + "RepositoryCredentialsProviderArn":{ + "type":"string", + "max":2048, + "min":1, + "pattern":".*" + }, "ResolvedAttributes":{ "type":"structure", "members":{ @@ -20307,6 +20392,10 @@ "KmsKeyId":{ "shape":"KmsKeyId", "documentation":"

The AWS Key Management Service (KMS) key ID of the key used to encrypt any objects written into the OfflineStore S3 location.

The IAM roleARN that is passed as a parameter to CreateFeatureGroup must have below permissions to the KmsKeyId:

" + }, + "ResolvedOutputS3Uri":{ + "shape":"S3Uri", + "documentation":"

The S3 path where offline records are written.

" } }, "documentation":"

The Amazon Simple Storage (Amazon S3) location and and security configuration for OfflineStore.

" @@ -21126,6 +21215,7 @@ "ml_p3", "ml_g4dn", "ml_inf1", + "ml_eia2", "jetson_tx1", "jetson_tx2", "jetson_nano", @@ -21197,7 +21287,6 @@ }, "TaskAvailabilityLifetimeInSeconds":{ "type":"integer", - "max":864000, "min":60 }, "TaskCount":{ @@ -21230,7 +21319,6 @@ }, "TaskTimeLimitInSeconds":{ "type":"integer", - "max":604800, "min":30 }, "TaskTitle":{ @@ -22517,7 +22605,7 @@ }, "HumanTaskUiArn":{ "shape":"HumanTaskUiArn", - "documentation":"

The ARN of the worker task template used to render the worker UI and tools for labeling job tasks.

Use this parameter when you are creating a labeling job for 3D point cloud and video fram labeling jobs. Use your labeling job task type to select one of the following ARN's and use it with this parameter when you create a labeling job. Replace aws-region with the AWS region you are creating your labeling job in.

3D Point Cloud HumanTaskUiArns

Use this HumanTaskUiArn for 3D point cloud object detection and 3D point cloud object detection adjustment labeling jobs.

Use this HumanTaskUiArn for 3D point cloud object tracking and 3D point cloud object tracking adjustment labeling jobs.

Use this HumanTaskUiArn for 3D point cloud semantic segmentation and 3D point cloud semantic segmentation adjustment labeling jobs.

Video Frame HumanTaskUiArns

Use this HumanTaskUiArn for video frame object detection and video frame object detection adjustment labeling jobs.

Use this HumanTaskUiArn for video frame object tracking and video frame object tracking adjustment labeling jobs.

" + "documentation":"

The ARN of the worker task template used to render the worker UI and tools for labeling job tasks.

Use this parameter when you are creating a labeling job for 3D point cloud and video fram labeling jobs. Use your labeling job task type to select one of the following ARNs and use it with this parameter when you create a labeling job. Replace aws-region with the AWS region you are creating your labeling job in.

3D Point Cloud HumanTaskUiArns

Use this HumanTaskUiArn for 3D point cloud object detection and 3D point cloud object detection adjustment labeling jobs.

Use this HumanTaskUiArn for 3D point cloud object tracking and 3D point cloud object tracking adjustment labeling jobs.

Use this HumanTaskUiArn for 3D point cloud semantic segmentation and 3D point cloud semantic segmentation adjustment labeling jobs.

Video Frame HumanTaskUiArns

Use this HumanTaskUiArn for video frame object detection and video frame object detection adjustment labeling jobs.

Use this HumanTaskUiArn for video frame object tracking and video frame object tracking adjustment labeling jobs.

" } }, "documentation":"

Provided configuration information for the worker UI for a labeling job.

" diff --git a/botocore/data/secretsmanager/2017-10-17/service-2.json b/botocore/data/secretsmanager/2017-10-17/service-2.json index 62311e41..5b572e32 100644 --- a/botocore/data/secretsmanager/2017-10-17/service-2.json +++ b/botocore/data/secretsmanager/2017-10-17/service-2.json @@ -61,9 +61,10 @@ "errors":[ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServiceError"}, - {"shape":"InvalidRequestException"} + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"} ], - "documentation":"

Deletes the resource-based permission policy attached to the secret.

Minimum permissions

To run this command, you must have the following permissions:

Related operations

" + "documentation":"

Deletes the resource-based permission policy attached to the secret.

Minimum permissions

To run this command, you must have the following permissions:

Related operations

" }, "DeleteSecret":{ "name":"DeleteSecret", @@ -79,7 +80,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalServiceError"} ], - "documentation":"

Deletes an entire secret and all of its versions. You can optionally include a recovery window during which you can restore the secret. If you don't specify a recovery window value, the operation defaults to 30 days. Secrets Manager attaches a DeletionDate stamp to the secret that specifies the end of the recovery window. At the end of the recovery window, Secrets Manager deletes the secret permanently.

At any time before recovery window ends, you can use RestoreSecret to remove the DeletionDate and cancel the deletion of the secret.

You cannot access the encrypted secret information in any secret that is scheduled for deletion. If you need to access that information, you must cancel the deletion with RestoreSecret and then retrieve the information.

  • There is no explicit operation to delete a version of a secret. Instead, remove all staging labels from the VersionStage field of a version. That marks the version as deprecated and allows Secrets Manager to delete it as needed. Versions that do not have any staging labels do not show up in ListSecretVersionIds unless you specify IncludeDeprecated.

  • The permanent secret deletion at the end of the waiting period is performed as a background task with low priority. There is no guarantee of a specific time after the recovery window for the actual delete operation to occur.

Minimum permissions

To run this command, you must have the following permissions:

Related operations

" + "documentation":"

Deletes an entire secret and all of the versions. You can optionally include a recovery window during which you can restore the secret. If you don't specify a recovery window value, the operation defaults to 30 days. Secrets Manager attaches a DeletionDate stamp to the secret that specifies the end of the recovery window. At the end of the recovery window, Secrets Manager deletes the secret permanently.

At any time before recovery window ends, you can use RestoreSecret to remove the DeletionDate and cancel the deletion of the secret.

You cannot access the encrypted secret information in any secret scheduled for deletion. If you need to access that information, you must cancel the deletion with RestoreSecret and then retrieve the information.

  • There is no explicit operation to delete a version of a secret. Instead, remove all staging labels from the VersionStage field of a version. That marks the version as deprecated and allows Secrets Manager to delete it as needed. Versions without any staging labels do not show up in ListSecretVersionIds unless you specify IncludeDeprecated.

  • The permanent secret deletion at the end of the waiting period is performed as a background task with low priority. There is no guarantee of a specific time after the recovery window for the actual delete operation to occur.

Minimum permissions

To run this command, you must have the following permissions:

Related operations

" }, "DescribeSecret":{ "name":"DescribeSecret", @@ -188,7 +189,7 @@ {"shape":"InvalidRequestException"}, {"shape":"PublicPolicyException"} ], - "documentation":"

Attaches the contents of the specified resource-based permission policy to a secret. A resource-based policy is optional. Alternatively, you can use IAM identity-based policies that specify the secret's Amazon Resource Name (ARN) in the policy statement's Resources element. You can also use a combination of both identity-based and resource-based policies. The affected users and roles receive the permissions that are permitted by all of the relevant policies. For more information, see Using Resource-Based Policies for AWS Secrets Manager. For the complete description of the AWS policy syntax and grammar, see IAM JSON Policy Reference in the IAM User Guide.

Minimum permissions

To run this command, you must have the following permissions:

Related operations

" + "documentation":"

Attaches the contents of the specified resource-based permission policy to a secret. A resource-based policy is optional. Alternatively, you can use IAM identity-based policies that specify the secret's Amazon Resource Name (ARN) in the policy statement's Resources element. You can also use a combination of both identity-based and resource-based policies. The affected users and roles receive the permissions that are permitted by all of the relevant policies. For more information, see Using Resource-Based Policies for AWS Secrets Manager. For the complete description of the AWS policy syntax and grammar, see IAM JSON Policy Reference in the IAM User Guide.

Minimum permissions

To run this command, you must have the following permissions:

Related operations

" }, "PutSecretValue":{ "name":"PutSecretValue", @@ -207,7 +208,39 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServiceError"} ], - "documentation":"

Stores a new encrypted secret value in the specified secret. To do this, the operation creates a new version and attaches it to the secret. The version can contain a new SecretString value or a new SecretBinary value. You can also specify the staging labels that are initially attached to the new version.

The Secrets Manager console uses only the SecretString field. To add binary data to a secret with the SecretBinary field you must use the AWS CLI or one of the AWS SDKs.

  • If you call an operation to encrypt or decrypt the SecretString or SecretBinary for a secret in the same account as the calling user and that secret doesn't specify a AWS KMS encryption key, Secrets Manager uses the account's default AWS managed customer master key (CMK) with the alias aws/secretsmanager. If this key doesn't already exist in your account then Secrets Manager creates it for you automatically. All users and roles in the same AWS account automatically have access to use the default CMK. Note that if an Secrets Manager API call results in AWS creating the account's AWS-managed CMK, it can result in a one-time significant delay in returning the result.

  • If the secret resides in a different AWS account from the credentials calling an API that requires encryption or decryption of the secret value then you must create and use a custom AWS KMS CMK because you can't access the default CMK for the account using credentials from a different AWS account. Store the ARN of the CMK in the secret when you create the secret or when you update it by including it in the KMSKeyId. If you call an API that must encrypt or decrypt SecretString or SecretBinary using credentials from a different account then the AWS KMS key policy must grant cross-account access to that other account's user or role for both the kms:GenerateDataKey and kms:Decrypt operations.

Minimum permissions

To run this command, you must have the following permissions:

Related operations

" + "documentation":"

Stores a new encrypted secret value in the specified secret. To do this, the operation creates a new version and attaches it to the secret. The version can contain a new SecretString value or a new SecretBinary value. You can also specify the staging labels that are initially attached to the new version.

The Secrets Manager console uses only the SecretString field. To add binary data to a secret with the SecretBinary field you must use the AWS CLI or one of the AWS SDKs.

  • If you call an operation to encrypt or decrypt the SecretString or SecretBinary for a secret in the same account as the calling user and that secret doesn't specify a AWS KMS encryption key, Secrets Manager uses the account's default AWS managed customer master key (CMK) with the alias aws/secretsmanager. If this key doesn't already exist in your account then Secrets Manager creates it for you automatically. All users and roles in the same AWS account automatically have access to use the default CMK. Note that if an Secrets Manager API call results in AWS creating the account's AWS-managed CMK, it can result in a one-time significant delay in returning the result.

  • If the secret resides in a different AWS account from the credentials calling an API that requires encryption or decryption of the secret value then you must create and use a custom AWS KMS CMK because you can't access the default CMK for the account using credentials from a different AWS account. Store the ARN of the CMK in the secret when you create the secret or when you update it by including it in the KMSKeyId. If you call an API that must encrypt or decrypt SecretString or SecretBinary using credentials from a different account then the AWS KMS key policy must grant cross-account access to that other account's user or role for both the kms:GenerateDataKey and kms:Decrypt operations.

Minimum permissions

To run this command, you must have the following permissions:

Related operations

" + }, + "RemoveRegionsFromReplication":{ + "name":"RemoveRegionsFromReplication", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveRegionsFromReplicationRequest"}, + "output":{"shape":"RemoveRegionsFromReplicationResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InternalServiceError"} + ], + "documentation":"

Remove regions from replication.

" + }, + "ReplicateSecretToRegions":{ + "name":"ReplicateSecretToRegions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ReplicateSecretToRegionsRequest"}, + "output":{"shape":"ReplicateSecretToRegionsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InternalServiceError"} + ], + "documentation":"

Converts an existing secret to a multi-Region secret and begins replication the secret to a list of new regions.

" }, "RestoreSecret":{ "name":"RestoreSecret", @@ -241,6 +274,22 @@ ], "documentation":"

Configures and starts the asynchronous process of rotating this secret. If you include the configuration parameters, the operation sets those values for the secret and then immediately starts a rotation. If you do not include the configuration parameters, the operation starts a rotation with the values already stored in the secret. After the rotation completes, the protected service and its clients all use the new version of the secret.

This required configuration information includes the ARN of an AWS Lambda function and the time between scheduled rotations. The Lambda rotation function creates a new version of the secret and creates or updates the credentials on the protected service to match. After testing the new credentials, the function marks the new secret with the staging label AWSCURRENT so that your clients all immediately begin to use the new version. For more information about rotating secrets and how to configure a Lambda function to rotate the secrets for your protected service, see Rotating Secrets in AWS Secrets Manager in the AWS Secrets Manager User Guide.

Secrets Manager schedules the next rotation when the previous one completes. Secrets Manager schedules the date by adding the rotation interval (number of days) to the actual date of the last rotation. The service chooses the hour within that 24-hour date window randomly. The minute is also chosen somewhat randomly, but weighted towards the top of the hour and influenced by a variety of factors that help distribute load.

The rotation function must end with the versions of the secret in one of two states:

If the AWSPENDING staging label is present but not attached to the same version as AWSCURRENT then any later invocation of RotateSecret assumes that a previous rotation request is still in progress and returns an error.

Minimum permissions

To run this command, you must have the following permissions:

Related operations

" }, + "StopReplicationToReplica":{ + "name":"StopReplicationToReplica", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopReplicationToReplicaRequest"}, + "output":{"shape":"StopReplicationToReplicaResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InternalServiceError"} + ], + "documentation":"

Removes the secret from replication and promotes the secret to a regional secret in the replica Region.

" + }, "TagResource":{ "name":"TagResource", "http":{ @@ -324,10 +373,15 @@ {"shape":"InternalServiceError"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Validates the JSON text of the resource-based policy document attached to the specified secret. The JSON request string input and response output displays formatted code with white space and line breaks for better readability. Submit your input as a single line JSON string. A resource-based policy is optional.

" + "documentation":"

Validates that the resource policy does not grant a wide range of IAM principals access to your secret. The JSON request string input and response output displays formatted code with white space and line breaks for better readability. Submit your input as a single line JSON string. A resource-based policy is optional for secrets.

The API performs three checks when validating the secret:

Minimum Permissions

You must have the permissions required to access the following APIs:

" } }, "shapes":{ + "AddReplicaRegionListType":{ + "type":"list", + "member":{"shape":"ReplicaRegionType"}, + "min":1 + }, "AutomaticallyRotateAfterDaysType":{ "type":"long", "max":1000, @@ -376,7 +430,7 @@ }, "ClientRequestToken":{ "shape":"ClientRequestTokenType", - "documentation":"

(Optional) If you include SecretString or SecretBinary, then an initial version is created as part of the secret, and this parameter specifies a unique identifier for the new version.

If you use the AWS CLI or one of the AWS SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes it as the value for this parameter in the request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a ClientRequestToken yourself for the new version and include the value in the request.

This value helps ensure idempotency. Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during a rotation. We recommend that you generate a UUID-type value to ensure uniqueness of your versions within the specified secret.

This value becomes the VersionId of the new version.

", + "documentation":"

(Optional) If you include SecretString or SecretBinary, then an initial version is created as part of the secret, and this parameter specifies a unique identifier for the new version.

If you use the AWS CLI or one of the AWS SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes it as the value for this parameter in the request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a ClientRequestToken yourself for the new version and include the value in the request.

This value helps ensure idempotency. Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during a rotation. We recommend that you generate a UUID-type value to ensure uniqueness of your versions within the specified secret.

This value becomes the VersionId of the new version.

", "idempotencyToken":true }, "Description":{ @@ -398,6 +452,14 @@ "Tags":{ "shape":"TagListType", "documentation":"

(Optional) Specifies a list of user-defined tags that are attached to the secret. Each tag is a \"Key\" and \"Value\" pair of strings. This operation only appends tags to the existing list of tags. To remove tags, you must use UntagResource.

  • Secrets Manager tag key names are case sensitive. A tag with the key \"ABC\" is a different tag from one with key \"abc\".

  • If you check tags in IAM policy Condition elements as part of your security strategy, then adding or removing a tag can change permissions. If the successful completion of this operation would result in you losing your permissions for this secret, then this operation is blocked and returns an Access Denied error.

This parameter requires a JSON text string argument. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide. For example:

[{\"Key\":\"CostCenter\",\"Value\":\"12345\"},{\"Key\":\"environment\",\"Value\":\"production\"}]

If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text.

The following basic restrictions apply to tags:

" + }, + "AddReplicaRegions":{ + "shape":"AddReplicaRegionListType", + "documentation":"

(Optional) Add a list of regions to replicate secrets. Secrets Manager replicates the KMSKeyID objects to the list of regions specified in the parameter.

" + }, + "ForceOverwriteReplicaSecret":{ + "shape":"BooleanType", + "documentation":"

(Optional) If set, the replication overwrites a secret with the same name in the destination region.

" } } }, @@ -415,6 +477,10 @@ "VersionId":{ "shape":"SecretVersionIdType", "documentation":"

The unique identifier associated with the version of the secret you just created.

" + }, + "ReplicationStatus":{ + "shape":"ReplicationStatusListType", + "documentation":"

Describes a list of replication status objects as InProgress, Failed or InSync.

" } } }, @@ -456,16 +522,16 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

Specifies the secret that you want to delete. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names ending with a hyphen followed by six characters.

If you specify an incomplete ARN without the random suffix, and instead provide the 'friendly name', you must not include the random suffix. If you do include the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException or an AccessDeniedException error, depending on your permissions.

" + "documentation":"

Specifies the secret to delete. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names ending with a hyphen followed by six characters.

If you specify an incomplete ARN without the random suffix, and instead provide the 'friendly name', you must not include the random suffix. If you do include the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException or an AccessDeniedException error, depending on your permissions.

" }, "RecoveryWindowInDays":{ "shape":"RecoveryWindowInDaysType", - "documentation":"

(Optional) Specifies the number of days that Secrets Manager waits before it can delete the secret. You can't use both this parameter and the ForceDeleteWithoutRecovery parameter in the same API call.

This value can range from 7 to 30 days. The default value is 30.

", + "documentation":"

(Optional) Specifies the number of days that Secrets Manager waits before Secrets Manager can delete the secret. You can't use both this parameter and the ForceDeleteWithoutRecovery parameter in the same API call.

This value can range from 7 to 30 days with a default value of 30.

", "box":true }, "ForceDeleteWithoutRecovery":{ "shape":"BooleanType", - "documentation":"

(Optional) Specifies that the secret is to be deleted without any recovery window. You can't use both this parameter and the RecoveryWindowInDays parameter in the same API call.

An asynchronous background process performs the actual deletion, so there can be a short delay before the operation completes. If you write code to delete and then immediately recreate a secret with the same name, ensure that your code includes appropriate back off and retry logic.

Use this parameter with caution. This parameter causes the operation to skip the normal waiting period before the permanent deletion that AWS would normally impose with the RecoveryWindowInDays parameter. If you delete a secret with the ForceDeleteWithouRecovery parameter, then you have no opportunity to recover the secret. It is permanently lost.

", + "documentation":"

(Optional) Specifies that the secret is to be deleted without any recovery window. You can't use both this parameter and the RecoveryWindowInDays parameter in the same API call.

An asynchronous background process performs the actual deletion, so there can be a short delay before the operation completes. If you write code to delete and then immediately recreate a secret with the same name, ensure that your code includes appropriate back off and retry logic.

Use this parameter with caution. This parameter causes the operation to skip the normal waiting period before the permanent deletion that AWS would normally impose with the RecoveryWindowInDays parameter. If you delete a secret with the ForceDeleteWithouRecovery parameter, then you have no opportunity to recover the secret. You lose the secret permanently.

If you use this parameter and include a previously deleted or nonexistent secret, the operation does not return the error ResourceNotFoundException in order to correctly handle retries.

", "box":true } } @@ -479,7 +545,7 @@ }, "Name":{ "shape":"SecretNameType", - "documentation":"

The friendly name of the secret that is now scheduled for deletion.

" + "documentation":"

The friendly name of the secret currently scheduled for deletion.

" }, "DeletionDate":{ "shape":"DeletionDateType", @@ -530,11 +596,11 @@ }, "RotationRules":{ "shape":"RotationRulesType", - "documentation":"

A structure that contains the rotation configuration for this secret.

" + "documentation":"

A structure with the rotation configuration for this secret.

" }, "LastRotatedDate":{ "shape":"LastRotatedDateType", - "documentation":"

The most recent date and time that the Secrets Manager rotation process was successfully completed. This value is null if the secret has never rotated.

", + "documentation":"

The last date and time that the rotation process for this secret was invoked.

The most recent date and time that the Secrets Manager rotation process successfully completed. If the secret doesn't rotate, Secrets Manager returns a null value.

", "box":true }, "LastChangedDate":{ @@ -566,8 +632,16 @@ }, "CreatedDate":{ "shape":"TimestampType", - "documentation":"

The date that the secret was created.

", + "documentation":"

The date you created the secret.

", "box":true + }, + "PrimaryRegion":{ + "shape":"RegionType", + "documentation":"

Specifies the primary region for secret replication.

" + }, + "ReplicationStatus":{ + "shape":"ReplicationStatusListType", + "documentation":"

Describes a list of replication status objects as InProgress, Failed or InSync.P

" } } }, @@ -602,10 +676,10 @@ }, "Values":{ "shape":"FilterValuesStringList", - "documentation":"

Filters your list of secrets by a specific value.

" + "documentation":"

Filters your list of secrets by a specific value.

You can prefix your search value with an exclamation mark (!) in order to perform negation filters.

" } }, - "documentation":"

Allows you to filter your list of secrets.

" + "documentation":"

Allows you to add filters when you use the search function in Secrets Manager.

" }, "FilterNameStringType":{ "type":"string", @@ -614,14 +688,14 @@ "name", "tag-key", "tag-value", + "primary-region", "all" ] }, "FilterValueStringType":{ "type":"string", "max":512, - "min":1, - "pattern":"[a-zA-Z0-9 :_@\\/\\+\\=\\.\\-]+" + "pattern":"^\\!?[a-zA-Z0-9 :_@\\/\\+\\=\\.\\-]*$" }, "FilterValuesStringList":{ "type":"list", @@ -724,11 +798,11 @@ }, "VersionId":{ "shape":"SecretVersionIdType", - "documentation":"

Specifies the unique identifier of the version of the secret that you want to retrieve. If you specify this parameter then don't specify VersionStage. If you don't specify either a VersionStage or VersionId then the default is to perform the operation on the version with the VersionStage value of AWSCURRENT.

This value is typically a UUID-type value with 32 hexadecimal digits.

" + "documentation":"

Specifies the unique identifier of the version of the secret that you want to retrieve. If you specify both this parameter and VersionStage, the two parameters must refer to the same secret version. If you don't specify either a VersionStage or VersionId then the default is to perform the operation on the version with the VersionStage value of AWSCURRENT.

This value is typically a UUID-type value with 32 hexadecimal digits.

" }, "VersionStage":{ "shape":"SecretVersionStageType", - "documentation":"

Specifies the secret version that you want to retrieve by the staging label attached to the version.

Staging labels are used to keep track of different versions during the rotation process. If you use this parameter then don't specify VersionId. If you don't specify either a VersionStage or VersionId, then the default is to perform the operation on the version with the VersionStage value of AWSCURRENT.

" + "documentation":"

Specifies the secret version that you want to retrieve by the staging label attached to the version.

Staging labels are used to keep track of different versions during the rotation process. If you specify both this parameter and VersionId, the two parameters must refer to the same secret version . If you don't specify either a VersionStage or VersionId, then the default is to perform the operation on the version with the VersionStage value of AWSCURRENT.

" } } }, @@ -901,7 +975,7 @@ "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"

The policy document that you provided isn't valid.

", + "documentation":"

You provided a resource-based policy with syntax errors.

", "exception":true }, "MaxResultsType":{ @@ -947,7 +1021,7 @@ "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"

The resource policy did not prevent broad access to the secret.

", + "documentation":"

The BlockPublicPolicy parameter is set to true and the resource policy did not prevent broad access to the secret.

", "exception":true }, "PutResourcePolicyRequest":{ @@ -959,15 +1033,15 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

Specifies the secret that you want to attach the resource-based policy to. You can specify either the ARN or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names ending with a hyphen followed by six characters.

If you specify an incomplete ARN without the random suffix, and instead provide the 'friendly name', you must not include the random suffix. If you do include the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException or an AccessDeniedException error, depending on your permissions.

" + "documentation":"

Specifies the secret that you want to attach the resource-based policy. You can specify either the ARN or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names ending with a hyphen followed by six characters.

If you specify an incomplete ARN without the random suffix, and instead provide the 'friendly name', you must not include the random suffix. If you do include the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException or an AccessDeniedException error, depending on your permissions.

" }, "ResourcePolicy":{ "shape":"NonEmptyResourcePolicyType", - "documentation":"

A JSON-formatted string that's constructed according to the grammar and syntax for an AWS resource-based policy. The policy in the string identifies who can access or manage this secret and its versions. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide.

" + "documentation":"

A JSON-formatted string constructed according to the grammar and syntax for an AWS resource-based policy. The policy in the string identifies who can access or manage this secret and its versions. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide.

" }, "BlockPublicPolicy":{ "shape":"BooleanType", - "documentation":"

Makes an optional API call to Zelkova to validate the Resource Policy to prevent broad access to your secret.

", + "documentation":"

(Optional) If you set the parameter, BlockPublicPolicy to true, then you block resource-based policies that allow broad access to the secret.

", "box":true } } @@ -981,7 +1055,7 @@ }, "Name":{ "shape":"NameType", - "documentation":"

The friendly name of the secret that the retrieved by the resource-based policy.

" + "documentation":"

The friendly name of the secret retrieved by the resource-based policy.

" } } }, @@ -1040,6 +1114,125 @@ "sensitive":true }, "RecoveryWindowInDaysType":{"type":"long"}, + "RegionType":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^([a-z]+-)+\\d+$" + }, + "RemoveRegionsFromReplicationRequest":{ + "type":"structure", + "required":[ + "SecretId", + "RemoveReplicaRegions" + ], + "members":{ + "SecretId":{ + "shape":"SecretIdType", + "documentation":"

Remove a secret by SecretId from replica Regions.

" + }, + "RemoveReplicaRegions":{ + "shape":"RemoveReplicaRegionListType", + "documentation":"

Remove replication from specific Regions.

" + } + } + }, + "RemoveRegionsFromReplicationResponse":{ + "type":"structure", + "members":{ + "ARN":{ + "shape":"SecretARNType", + "documentation":"

The secret ARN removed from replication regions.

" + }, + "ReplicationStatus":{ + "shape":"ReplicationStatusListType", + "documentation":"

Describes the remaining replication status after you remove regions from the replication list.

" + } + } + }, + "RemoveReplicaRegionListType":{ + "type":"list", + "member":{"shape":"RegionType"}, + "min":1 + }, + "ReplicaRegionType":{ + "type":"structure", + "members":{ + "Region":{ + "shape":"RegionType", + "documentation":"

Describes a single instance of Region objects.

" + }, + "KmsKeyId":{ + "shape":"KmsKeyIdType", + "documentation":"

Can be an ARN, Key ID, or Alias.

" + } + }, + "documentation":"

(Optional) Custom type consisting of a Region (required) and the KmsKeyId which can be an ARN, Key ID, or Alias.

" + }, + "ReplicateSecretToRegionsRequest":{ + "type":"structure", + "required":[ + "SecretId", + "AddReplicaRegions" + ], + "members":{ + "SecretId":{ + "shape":"SecretIdType", + "documentation":"

Use the Secret Id to replicate a secret to regions.

" + }, + "AddReplicaRegions":{ + "shape":"AddReplicaRegionListType", + "documentation":"

Add Regions to replicate the secret.

" + }, + "ForceOverwriteReplicaSecret":{ + "shape":"BooleanType", + "documentation":"

(Optional) If set, Secrets Manager replication overwrites a secret with the same name in the destination region.

" + } + } + }, + "ReplicateSecretToRegionsResponse":{ + "type":"structure", + "members":{ + "ARN":{ + "shape":"SecretARNType", + "documentation":"

Replicate a secret based on the ReplicaRegionType> consisting of a Region(required) and a KMSKeyId (optional) which can be the ARN, KeyID, or Alias.

" + }, + "ReplicationStatus":{ + "shape":"ReplicationStatusListType", + "documentation":"

Describes the secret replication status as PENDING, SUCCESS or FAIL.

" + } + } + }, + "ReplicationStatusListType":{ + "type":"list", + "member":{"shape":"ReplicationStatusType"} + }, + "ReplicationStatusType":{ + "type":"structure", + "members":{ + "Region":{ + "shape":"RegionType", + "documentation":"

The Region where replication occurs.

" + }, + "KmsKeyId":{ + "shape":"KmsKeyIdType", + "documentation":"

Can be an ARN, Key ID, or Alias.

" + }, + "Status":{ + "shape":"StatusType", + "documentation":"

The status can be InProgress, Failed, or InSync.

" + }, + "StatusMessage":{ + "shape":"StatusMessageType", + "documentation":"

Status message such as \"Secret with this name already exists in this region\".

" + }, + "LastAccessedDate":{ + "shape":"LastAccessedDateType", + "documentation":"

The date that you last accessed the secret in the Region.

" + } + }, + "documentation":"

A replication object consisting of a RegionReplicationStatus object and includes a Region, KMSKeyId, status, and status message.

" + }, "RequireEachIncludedTypeType":{"type":"boolean"}, "ResourceExistsException":{ "type":"structure", @@ -1188,7 +1381,7 @@ }, "LastRotatedDate":{ "shape":"LastRotatedDateType", - "documentation":"

The last date and time that the rotation process for this secret was invoked.

", + "documentation":"

The most recent date and time that the Secrets Manager rotation process was successfully completed. This value is null if the secret hasn't ever rotated.

", "box":true }, "LastChangedDate":{ @@ -1221,6 +1414,10 @@ "shape":"TimestampType", "documentation":"

The date and time when a secret was created.

", "box":true + }, + "PrimaryRegion":{ + "shape":"RegionType", + "documentation":"

The Region where Secrets Manager originated the secret.

" } }, "documentation":"

A structure that contains the details about a secret. It does not include the encrypted SecretString and SecretBinary values. To get those values, use the GetSecretValue operation.

" @@ -1296,6 +1493,38 @@ "desc" ] }, + "StatusMessageType":{ + "type":"string", + "max":4096, + "min":1 + }, + "StatusType":{ + "type":"string", + "enum":[ + "InSync", + "Failed", + "InProgress" + ] + }, + "StopReplicationToReplicaRequest":{ + "type":"structure", + "required":["SecretId"], + "members":{ + "SecretId":{ + "shape":"SecretIdType", + "documentation":"

Response to StopReplicationToReplica of a secret, based on the SecretId.

" + } + } + }, + "StopReplicationToReplicaResponse":{ + "type":"structure", + "members":{ + "ARN":{ + "shape":"SecretARNType", + "documentation":"

Response StopReplicationToReplica of a secret, based on the ARN,.

" + } + } + }, "Tag":{ "type":"structure", "members":{ @@ -1336,7 +1565,7 @@ }, "Tags":{ "shape":"TagListType", - "documentation":"

The tags to attach to the secret. Each element in the list consists of a Key and a Value.

This parameter to the API requires a JSON text string argument. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide. For the AWS CLI, you can also use the syntax: --Tags Key=\"Key1\",Value=\"Value1\",Key=\"Key2\",Value=\"Value2\"[,…]

" + "documentation":"

The tags to attach to the secret. Each element in the list consists of a Key and a Value.

This parameter to the API requires a JSON text string argument. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide. For the AWS CLI, you can also use the syntax: --Tags Key=\"Key1\",Value=\"Value1\" Key=\"Key2\",Value=\"Value2\"[,…]

" } } }, @@ -1457,11 +1686,11 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

The identifier for the secret that you want to validate a resource policy. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names ending with a hyphen followed by six characters.

If you specify an incomplete ARN without the random suffix, and instead provide the 'friendly name', you must not include the random suffix. If you do include the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException or an AccessDeniedException error, depending on your permissions.

" + "documentation":"

(Optional) The identifier of the secret with the resource-based policy you want to validate. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names ending with a hyphen followed by six characters.

If you specify an incomplete ARN without the random suffix, and instead provide the 'friendly name', you must not include the random suffix. If you do include the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException or an AccessDeniedException error, depending on your permissions.

" }, "ResourcePolicy":{ "shape":"NonEmptyResourcePolicyType", - "documentation":"

Identifies the Resource Policy attached to the secret.

" + "documentation":"

A JSON-formatted string constructed according to the grammar and syntax for an AWS resource-based policy. The policy in the string identifies who can access or manage this secret and its versions. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide.publi

" } } }, diff --git a/botocore/data/securityhub/2018-10-26/service-2.json b/botocore/data/securityhub/2018-10-26/service-2.json index 410510a1..e87dade2 100644 --- a/botocore/data/securityhub/2018-10-26/service-2.json +++ b/botocore/data/securityhub/2018-10-26/service-2.json @@ -75,7 +75,7 @@ {"shape":"LimitExceededException"}, {"shape":"InvalidAccessException"} ], - "documentation":"

Imports security findings generated from an integrated third-party product into Security Hub. This action is requested by the integrated product to import its findings into Security Hub.

The maximum allowed size for a finding is 240 Kb. An error is returned for any finding larger than 240 Kb.

After a finding is created, BatchImportFindings cannot be used to update the following finding fields and objects, which Security Hub customers use to manage their investigation workflow.

BatchImportFindings can be used to update the following finding fields and objects only if they have not been updated using BatchUpdateFindings. After they are updated using BatchUpdateFindings, these fields cannot be updated using BatchImportFindings.

" + "documentation":"

Imports security findings generated from an integrated product into Security Hub. This action is requested by the integrated product to import its findings into Security Hub.

The maximum allowed size for a finding is 240 Kb. An error is returned for any finding larger than 240 Kb.

After a finding is created, BatchImportFindings cannot be used to update the following finding fields and objects, which Security Hub customers use to manage their investigation workflow.

Finding providers also should not use BatchImportFindings to update the following attributes.

Instead, finding providers use FindingProviderFields to provide values for these attributes.

" }, "BatchUpdateFindings":{ "name":"BatchUpdateFindings", @@ -290,7 +290,7 @@ {"shape":"InvalidAccessException"}, {"shape":"InvalidInputException"} ], - "documentation":"

Returns information about the available products that you can subscribe to and integrate with Security Hub in order to consolidate findings.

" + "documentation":"

Returns information about product integrations in Security Hub.

You can optionally provide an integration ARN. If you provide an integration ARN, then the results only include that integration.

If you do not provide an integration ARN, then the results include all of the available product integrations.

" }, "DescribeStandards":{ "name":"DescribeStandards", @@ -879,7 +879,7 @@ "documentation":"

Included if ActionType is PORT_PROBE. Provides details about the port probe that was detected.

" } }, - "documentation":"

Provides details about one of the following actions that were detected for the finding:

" + "documentation":"

Provides details about one of the following actions that affects or that was taken on a resource:

" }, "ActionLocalIpDetails":{ "type":"structure", @@ -5434,6 +5434,28 @@ "type":"list", "member":{"shape":"AwsRedshiftClusterVpcSecurityGroup"} }, + "AwsS3AccountPublicAccessBlockDetails":{ + "type":"structure", + "members":{ + "BlockPublicAcls":{ + "shape":"Boolean", + "documentation":"

Indicates whether to reject calls to update an S3 bucket if the calls include a public access control list (ACL).

" + }, + "BlockPublicPolicy":{ + "shape":"Boolean", + "documentation":"

Indicates whether to reject calls to update the access policy for an S3 bucket or access point if the policy allows public access.

" + }, + "IgnorePublicAcls":{ + "shape":"Boolean", + "documentation":"

Indicates whether Amazon S3 ignores public ACLs that are associated with an S3 bucket.

" + }, + "RestrictPublicBuckets":{ + "shape":"Boolean", + "documentation":"

Indicates whether to restrict access to an access point or S3 bucket that has a public policy to only AWS service principals and authorized users within the S3 bucket owner's account.

" + } + }, + "documentation":"

provides information about the Amazon S3 Public Access Block configuration for accounts.

" + }, "AwsS3BucketDetails":{ "type":"structure", "members":{ @@ -5452,6 +5474,10 @@ "ServerSideEncryptionConfiguration":{ "shape":"AwsS3BucketServerSideEncryptionConfiguration", "documentation":"

The encryption rules that are applied to the S3 bucket.

" + }, + "PublicAccessBlockConfiguration":{ + "shape":"AwsS3AccountPublicAccessBlockDetails", + "documentation":"

Provides information about the Amazon S3 Public Access Block configuration for the S3 bucket.

" } }, "documentation":"

The details of an Amazon S3 bucket.

" @@ -5580,10 +5606,8 @@ "ProductArn", "GeneratorId", "AwsAccountId", - "Types", "CreatedAt", "UpdatedAt", - "Severity", "Title", "Description", "Resources" @@ -5727,7 +5751,11 @@ }, "Action":{ "shape":"Action", - "documentation":"

Provides details about an action that was detected for the finding.

" + "documentation":"

Provides details about an action that affects or that was taken on a resource.

" + }, + "FindingProviderFields":{ + "shape":"FindingProviderFields", + "documentation":"

In a BatchImportFindings request, finding providers use FindingProviderFields to provide and update their own values for confidence, criticality, related findings, severity, and types.

" } }, "documentation":"

Provides consistent format for the contents of the Security Hub-aggregated findings. AwsSecurityFinding format enables you to share findings between AWS security services and third-party solutions, and security standards checks.

A finding is a potential security issue generated either by AWS services (Amazon GuardDuty, Amazon Inspector, and Amazon Macie) or by the integrated third-party solutions and standards checks.

" @@ -5773,11 +5801,15 @@ }, "SeverityProduct":{ "shape":"NumberFilterList", - "documentation":"

The native severity as defined by the security-findings provider's solution that generated the finding.

" + "documentation":"

The native severity as defined by the security-findings provider's solution that generated the finding.

", + "deprecated":true, + "deprecatedMessage":"This filter is deprecated, use FindingProviiltersSeverityOriginal instead." }, "SeverityNormalized":{ "shape":"NumberFilterList", - "documentation":"

The normalized severity of a finding.

" + "documentation":"

The normalized severity of a finding.

", + "deprecated":true, + "deprecatedMessage":"This filter is deprecated, use SeverityLabel or FindingProviderFieldsSeverityLabel instead." }, "SeverityLabel":{ "shape":"StringFilterList", @@ -6070,6 +6102,34 @@ "Keyword":{ "shape":"KeywordFilterList", "documentation":"

A keyword for a finding.

" + }, + "FindingProviderFieldsConfidence":{ + "shape":"NumberFilterList", + "documentation":"

The finding provider value for the finding confidence. Confidence is defined as the likelihood that a finding accurately identifies the behavior or issue that it was intended to identify.

Confidence is scored on a 0-100 basis using a ratio scale, where 0 means zero percent confidence and 100 means 100 percent confidence.

" + }, + "FindingProviderFieldsCriticality":{ + "shape":"NumberFilterList", + "documentation":"

The finding provider value for the level of importance assigned to the resources associated with the findings.

A score of 0 means that the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources.

" + }, + "FindingProviderFieldsRelatedFindingsId":{ + "shape":"StringFilterList", + "documentation":"

The finding identifier of a related finding that is identified by the finding provider.

" + }, + "FindingProviderFieldsRelatedFindingsProductArn":{ + "shape":"StringFilterList", + "documentation":"

The ARN of the solution that generated a related finding that is identified by the finding provider.

" + }, + "FindingProviderFieldsSeverityLabel":{ + "shape":"StringFilterList", + "documentation":"

The finding provider value for the severity label.

" + }, + "FindingProviderFieldsSeverityOriginal":{ + "shape":"StringFilterList", + "documentation":"

The finding provider's original value for the severity.

" + }, + "FindingProviderFieldsTypes":{ + "shape":"StringFilterList", + "documentation":"

One or more finding types that the finding provider assigned to the finding. Uses the format of namespace/category/classifier that classify a finding.

Valid namespace values are: Software and Configuration Checks | TTPs | Effects | Unusual Behaviors | Sensitive Data Identifications

" } }, "documentation":"

A collection of attributes that are applied to all active Security Hub-aggregated findings and that result in a subset of findings that are included in this insight.

You can filter by up to 10 finding attributes. For each attribute, you can provide up to 20 filter values.

" @@ -6359,11 +6419,17 @@ "required":["Findings"], "members":{ "Findings":{ - "shape":"AwsSecurityFindingList", + "shape":"BatchImportFindingsRequestFindingList", "documentation":"

A list of findings to import. To successfully import a finding, it must follow the AWS Security Finding Format. Maximum of 100 findings per request.

" } } }, + "BatchImportFindingsRequestFindingList":{ + "type":"list", + "member":{"shape":"AwsSecurityFinding"}, + "max":100, + "min":1 + }, "BatchImportFindingsResponse":{ "type":"structure", "required":[ @@ -6477,6 +6543,32 @@ "type":"list", "member":{"shape":"NonEmptyString"} }, + "Cell":{ + "type":"structure", + "members":{ + "Column":{ + "shape":"Long", + "documentation":"

The column number of the column that contains the data. For a Microsoft Excel workbook, the column number corresponds to the alphabetical column identifiers. For example, a value of 1 for Column corresponds to the A column in the workbook.

" + }, + "Row":{ + "shape":"Long", + "documentation":"

The row number of the row that contains the data.

" + }, + "ColumnName":{ + "shape":"NonEmptyString", + "documentation":"

The name of the column that contains the data.

" + }, + "CellReference":{ + "shape":"NonEmptyString", + "documentation":"

For a Microsoft Excel workbook, provides the location of the cell, as an absolute cell reference, that contains the data. For example, Sheet2!C5 for cell C5 on Sheet2.

" + } + }, + "documentation":"

An occurrence of sensitive data detected in a Microsoft Excel workbook, comma-separated value (CSV) file, or tab-separated value (TSV) file.

" + }, + "Cells":{ + "type":"list", + "member":{"shape":"Cell"} + }, "CidrBlockAssociation":{ "type":"structure", "members":{ @@ -6509,6 +6601,50 @@ }, "documentation":"

Information about a city.

" }, + "ClassificationResult":{ + "type":"structure", + "members":{ + "MimeType":{ + "shape":"NonEmptyString", + "documentation":"

The type of content that the finding applies to.

" + }, + "SizeClassified":{ + "shape":"Long", + "documentation":"

The total size in bytes of the affected data.

" + }, + "AdditionalOccurrences":{ + "shape":"Boolean", + "documentation":"

Indicates whether there are additional occurrences of sensitive data that are not included in the finding. This occurs when the number of occurrences exceeds the maximum that can be included.

" + }, + "Status":{ + "shape":"ClassificationStatus", + "documentation":"

The current status of the sensitive data detection.

" + }, + "SensitiveData":{ + "shape":"SensitiveDataResultList", + "documentation":"

Provides details about sensitive data that was identified based on built-in configuration.

" + }, + "CustomDataIdentifiers":{ + "shape":"CustomDataIdentifiersResult", + "documentation":"

Provides details about sensitive data that was identified based on customer-defined configuration.

" + } + }, + "documentation":"

Details about the sensitive data that was detected on the resource.

" + }, + "ClassificationStatus":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"NonEmptyString", + "documentation":"

The code that represents the status of the sensitive data detection.

" + }, + "Reason":{ + "shape":"NonEmptyString", + "documentation":"

A longer description of the current status of the sensitive data detection.

" + } + }, + "documentation":"

Provides details about the current status of the sensitive data detection.

" + }, "Compliance":{ "type":"structure", "members":{ @@ -6667,6 +6803,46 @@ "max":50, "min":1 }, + "CustomDataIdentifiersDetections":{ + "type":"structure", + "members":{ + "Count":{ + "shape":"Long", + "documentation":"

The total number of occurrences of sensitive data that were detected.

" + }, + "Arn":{ + "shape":"NonEmptyString", + "documentation":"

The ARN of the custom identifier that was used to detect the sensitive data.

" + }, + "Name":{ + "shape":"NonEmptyString", + "documentation":"

he name of the custom identifier that detected the sensitive data.

" + }, + "Occurrences":{ + "shape":"Occurrences", + "documentation":"

Details about the sensitive data that was detected.

" + } + }, + "documentation":"

The list of detected instances of sensitive data.

" + }, + "CustomDataIdentifiersDetectionsList":{ + "type":"list", + "member":{"shape":"CustomDataIdentifiersDetections"} + }, + "CustomDataIdentifiersResult":{ + "type":"structure", + "members":{ + "Detections":{ + "shape":"CustomDataIdentifiersDetectionsList", + "documentation":"

The list of detected instances of sensitive data.

" + }, + "TotalCount":{ + "shape":"Long", + "documentation":"

The total number of occurrences of sensitive data.

" + } + }, + "documentation":"

Contains an instance of sensitive data that was detected by a customer-defined identifier.

" + }, "Cvss":{ "type":"structure", "members":{ @@ -6689,6 +6865,20 @@ "type":"list", "member":{"shape":"Cvss"} }, + "DataClassificationDetails":{ + "type":"structure", + "members":{ + "DetailedResultsLocation":{ + "shape":"NonEmptyString", + "documentation":"

The path to the folder or file that contains the sensitive data.

" + }, + "Result":{ + "shape":"ClassificationResult", + "documentation":"

The details about the sensitive data that was detected on the resource.

" + } + }, + "documentation":"

Provides details about sensitive data that was detected on a resource.

" + }, "DateFilter":{ "type":"structure", "members":{ @@ -6921,6 +7111,12 @@ "documentation":"

The maximum number of results to return.

", "location":"querystring", "locationName":"MaxResults" + }, + "ProductArn":{ + "shape":"NonEmptyString", + "documentation":"

The ARN of the integration to return.

", + "location":"querystring", + "locationName":"ProductArn" } } }, @@ -7148,6 +7344,46 @@ "key":{"shape":"NonEmptyString"}, "value":{"shape":"NonEmptyString"} }, + "FindingProviderFields":{ + "type":"structure", + "members":{ + "Confidence":{ + "shape":"RatioScale", + "documentation":"

A finding's confidence. Confidence is defined as the likelihood that a finding accurately identifies the behavior or issue that it was intended to identify.

Confidence is scored on a 0-100 basis using a ratio scale, where 0 means zero percent confidence and 100 means 100 percent confidence.

" + }, + "Criticality":{ + "shape":"RatioScale", + "documentation":"

The level of importance assigned to the resources associated with the finding.

A score of 0 means that the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources.

" + }, + "RelatedFindings":{ + "shape":"RelatedFindingList", + "documentation":"

A list of findings that are related to the current finding.

" + }, + "Severity":{ + "shape":"FindingProviderSeverity", + "documentation":"

The severity of a finding.

" + }, + "Types":{ + "shape":"TypeList", + "documentation":"

One or more finding types in the format of namespace/category/classifier that classify a finding.

Valid namespace values are: Software and Configuration Checks | TTPs | Effects | Unusual Behaviors | Sensitive Data Identifications

" + } + }, + "documentation":"

In a BatchImportFindings request, finding providers use FindingProviderFields to provide and update values for confidence, criticality, related findings, severity, and types.

" + }, + "FindingProviderSeverity":{ + "type":"structure", + "members":{ + "Label":{ + "shape":"SeverityLabel", + "documentation":"

The severity label assigned to the finding by the finding provider.

" + }, + "Original":{ + "shape":"NonEmptyString", + "documentation":"

The finding provider's original value for the severity.

" + } + }, + "documentation":"

The severity assigned to the finding by the finding provider.

" + }, "GeoLocation":{ "type":"structure", "members":{ @@ -8110,6 +8346,54 @@ "type":"list", "member":{"shape":"NumberFilter"} }, + "Occurrences":{ + "type":"structure", + "members":{ + "LineRanges":{ + "shape":"Ranges", + "documentation":"

Occurrences of sensitive data detected in a non-binary text file or a Microsoft Word file. Non-binary text files include files such as HTML, XML, JSON, and TXT files.

" + }, + "OffsetRanges":{ + "shape":"Ranges", + "documentation":"

Occurrences of sensitive data detected in a binary text file.

" + }, + "Pages":{ + "shape":"Pages", + "documentation":"

Occurrences of sensitive data in an Adobe Portable Document Format (PDF) file.

" + }, + "Records":{ + "shape":"Records", + "documentation":"

Occurrences of sensitive data in an Apache Avro object container or an Apache Parquet file.

" + }, + "Cells":{ + "shape":"Cells", + "documentation":"

Occurrences of sensitive data detected in Microsoft Excel workbooks, comma-separated value (CSV) files, or tab-separated value (TSV) files.

" + } + }, + "documentation":"

The detected occurrences of sensitive data.

" + }, + "Page":{ + "type":"structure", + "members":{ + "PageNumber":{ + "shape":"Long", + "documentation":"

The page number of the page that contains the sensitive data.

" + }, + "LineRange":{ + "shape":"Range", + "documentation":"

An occurrence of sensitive data detected in a non-binary text file or a Microsoft Word file. Non-binary text files include files such as HTML, XML, JSON, and TXT files.

" + }, + "OffsetRange":{ + "shape":"Range", + "documentation":"

An occurrence of sensitive data detected in a binary text file.

" + } + }, + "documentation":"

An occurrence of sensitive data in an Adobe Portable Document Format (PDF) file.

" + }, + "Pages":{ + "type":"list", + "member":{"shape":"Page"} + }, "Partition":{ "type":"string", "enum":[ @@ -8304,6 +8588,28 @@ "type":"list", "member":{"shape":"Product"} }, + "Range":{ + "type":"structure", + "members":{ + "Start":{ + "shape":"Long", + "documentation":"

The number of lines (for a line range) or characters (for an offset range) from the beginning of the file to the end of the sensitive data.

" + }, + "End":{ + "shape":"Long", + "documentation":"

The number of lines (for a line range) or characters (for an offset range) from the beginning of the file to the end of the sensitive data.

" + }, + "StartColumn":{ + "shape":"Long", + "documentation":"

In the line where the sensitive data starts, the column within the line where the sensitive data starts.

" + } + }, + "documentation":"

Identifies where the sensitive data begins and ends.

" + }, + "Ranges":{ + "type":"list", + "member":{"shape":"Range"} + }, "RatioScale":{ "type":"integer", "max":100, @@ -8323,6 +8629,20 @@ }, "documentation":"

A recommendation on how to remediate the issue identified in a finding.

" }, + "Record":{ + "type":"structure", + "members":{ + "JsonPath":{ + "shape":"NonEmptyString", + "documentation":"

The path, as a JSONPath expression, to the field in the record that contains the data. If the field name is longer than 20 characters, it is truncated. If the path is longer than 250 characters, it is truncated.

" + }, + "RecordIndex":{ + "shape":"Long", + "documentation":"

The record index, starting from 0, for the record that contains the data.

" + } + }, + "documentation":"

An occurrence of sensitive data in an Apache Avro object container or an Apache Parquet file.

" + }, "RecordState":{ "type":"string", "enum":[ @@ -8330,6 +8650,10 @@ "ARCHIVED" ] }, + "Records":{ + "type":"list", + "member":{"shape":"Record"} + }, "RelatedFinding":{ "type":"structure", "required":[ @@ -8397,6 +8721,10 @@ "shape":"FieldMap", "documentation":"

A list of AWS tags associated with a resource at the time the finding was processed.

" }, + "DataClassification":{ + "shape":"DataClassificationDetails", + "documentation":"

Contains information about sensitive data that was detected on the resource.

" + }, "Details":{ "shape":"ResourceDetails", "documentation":"

Additional details about the resource related to a finding.

" @@ -8469,6 +8797,10 @@ "shape":"AwsS3BucketDetails", "documentation":"

Details about an Amazon S3 bucket related to a finding.

" }, + "AwsS3AccountPublicAccessBlock":{ + "shape":"AwsS3AccountPublicAccessBlockDetails", + "documentation":"

Details about the Amazon S3 Public Access Block configuration for an account.

" + }, "AwsS3Object":{ "shape":"AwsS3ObjectDetails", "documentation":"

Details about an Amazon S3 object related to a finding.

" @@ -8624,6 +8956,50 @@ "type":"list", "member":{"shape":"NonEmptyString"} }, + "SensitiveDataDetections":{ + "type":"structure", + "members":{ + "Count":{ + "shape":"Long", + "documentation":"

The total number of occurrences of sensitive data that were detected.

" + }, + "Type":{ + "shape":"NonEmptyString", + "documentation":"

The type of sensitive data that was detected. For example, the type might indicate that the data is an email address.

" + }, + "Occurrences":{ + "shape":"Occurrences", + "documentation":"

Details about the sensitive data that was detected.

" + } + }, + "documentation":"

The list of detected instances of sensitive data.

" + }, + "SensitiveDataDetectionsList":{ + "type":"list", + "member":{"shape":"SensitiveDataDetections"} + }, + "SensitiveDataResult":{ + "type":"structure", + "members":{ + "Category":{ + "shape":"NonEmptyString", + "documentation":"

The category of sensitive data that was detected. For example, the category can indicate that the sensitive data involved credentials, financial information, or personal information.

" + }, + "Detections":{ + "shape":"SensitiveDataDetectionsList", + "documentation":"

The list of detected instances of sensitive data.

" + }, + "TotalCount":{ + "shape":"Long", + "documentation":"

The total number of occurrences of sensitive data.

" + } + }, + "documentation":"

Contains a detected instance of sensitive data that are based on built-in identifiers.

" + }, + "SensitiveDataResultList":{ + "type":"list", + "member":{"shape":"SensitiveDataResult"} + }, "Severity":{ "type":"structure", "members":{ diff --git a/botocore/data/servicediscovery/2017-03-14/service-2.json b/botocore/data/servicediscovery/2017-03-14/service-2.json index 5bee65b0..8ba77c5f 100644 --- a/botocore/data/servicediscovery/2017-03-14/service-2.json +++ b/botocore/data/servicediscovery/2017-03-14/service-2.json @@ -393,7 +393,7 @@ "required":["Name"], "members":{ "Name":{ - "shape":"NamespaceName", + "shape":"NamespaceNameHttp", "documentation":"

The name that you want to assign to this namespace.

" }, "CreatorRequestId":{ @@ -428,7 +428,7 @@ ], "members":{ "Name":{ - "shape":"NamespaceName", + "shape":"NamespaceNamePrivate", "documentation":"

The name that you want to assign to this namespace. When you create a private DNS namespace, AWS Cloud Map automatically creates an Amazon Route 53 private hosted zone that has the same name as the namespace.

" }, "CreatorRequestId":{ @@ -464,7 +464,7 @@ "required":["Name"], "members":{ "Name":{ - "shape":"NamespaceName", + "shape":"NamespaceNamePublic", "documentation":"

The name that you want to assign to this namespace.

" }, "CreatorRequestId":{ @@ -497,7 +497,7 @@ "members":{ "Name":{ "shape":"ServiceName", - "documentation":"

The name that you want to assign to the service.

If you want AWS Cloud Map to create an SRV record when you register an instance, and if you're using a system that requires a specific SRV format, such as HAProxy, specify the following for Name:

When you register an instance, AWS Cloud Map creates an SRV record and assigns a name to the record by concatenating the service name and the namespace name, for example:

_exampleservice._tcp.example.com

" + "documentation":"

The name that you want to assign to the service.

If you want AWS Cloud Map to create an SRV record when you register an instance, and if you're using a system that requires a specific SRV format, such as HAProxy, specify the following for Name:

When you register an instance, AWS Cloud Map creates an SRV record and assigns a name to the record by concatenating the service name and the namespace name, for example:

_exampleservice._tcp.example.com

For a single DNS namespace, you cannot create two services with names that differ only by case (such as EXAMPLE and example). Otherwise, these services will have the same DNS name. However, you can create multiple HTTP services with names that differ only by case because HTTP services are case sensitive.

" }, "NamespaceId":{ "shape":"ResourceId", @@ -527,6 +527,10 @@ "Tags":{ "shape":"TagList", "documentation":"

The tags to add to the service. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" + }, + "Type":{ + "shape":"ServiceTypeOption", + "documentation":"

If present, specifies that the service instances are only discoverable using the DiscoverInstances API operation. No DNS records will be registered for the service instances. The only valid value is HTTP.

" } } }, @@ -902,7 +906,9 @@ "members":{ "FailureThreshold":{ "shape":"FailureThreshold", - "documentation":"

This parameter has been deprecated and is always set to 1. AWS Cloud Map waits for approximately 30 seconds after receiving an UpdateInstanceCustomHealthStatus request before changing the status of the service instance.

The number of 30-second intervals that you want AWS Cloud Map to wait after receiving an UpdateInstanceCustomHealthStatus request before it changes the health status of a service instance.

Sending a second or subsequent UpdateInstanceCustomHealthStatus request with the same value before 30 seconds has passed doesn't accelerate the change. AWS Cloud Map still waits 30 seconds after the first request to make the change.

" + "documentation":"

This parameter has been deprecated and is always set to 1. AWS Cloud Map waits for approximately 30 seconds after receiving an UpdateInstanceCustomHealthStatus request before changing the status of the service instance.

The number of 30-second intervals that you want AWS Cloud Map to wait after receiving an UpdateInstanceCustomHealthStatus request before it changes the health status of a service instance.

Sending a second or subsequent UpdateInstanceCustomHealthStatus request with the same value before 30 seconds has passed doesn't accelerate the change. AWS Cloud Map still waits 30 seconds after the first request to make the change.

", + "deprecated":true, + "deprecatedMessage":"Configurable FailureThreshold of HealthCheckCustomConfig is deprecated. It will always have value 1." } }, "documentation":"

A complex type that contains information about an optional custom health check. A custom health check, which requires that you use a third-party health checker to evaluate the health of your resources, is useful in the following circumstances:

If you specify a health check configuration, you can specify either HealthCheckCustomConfig or HealthCheckConfig but not both.

To change the status of a custom health check, submit an UpdateInstanceCustomHealthStatus request. AWS Cloud Map doesn't monitor the status of the resource, it just keeps a record of the status specified in the most recent UpdateInstanceCustomHealthStatus request.

Here's how custom health checks work:

  1. You create a service and specify a value for FailureThreshold.

    The failure threshold indicates the number of 30-second intervals you want AWS Cloud Map to wait between the time that your application sends an UpdateInstanceCustomHealthStatus request and the time that AWS Cloud Map stops routing internet traffic to the corresponding resource.

  2. You register an instance.

  3. You configure a third-party health checker to monitor the resource that is associated with the new instance.

    AWS Cloud Map doesn't check the health of the resource directly.

  4. The third-party health-checker determines that the resource is unhealthy and notifies your application.

  5. Your application submits an UpdateInstanceCustomHealthStatus request.

  6. AWS Cloud Map waits for (FailureThreshold x 30) seconds.

  7. If another UpdateInstanceCustomHealthStatus request doesn't arrive during that time to change the status back to healthy, AWS Cloud Map stops routing traffic to the resource.

" @@ -939,7 +945,7 @@ "documentation":"

The ID of an instance that matches the values that you specified in the request.

" }, "NamespaceName":{ - "shape":"NamespaceName", + "shape":"NamespaceNameHttp", "documentation":"

The name of the namespace that you specified when you registered the instance.

" }, "ServiceName":{ @@ -1272,6 +1278,21 @@ "type":"string", "max":1024 }, + "NamespaceNameHttp":{ + "type":"string", + "max":1024, + "pattern":"^[!-~]{1,1024}$" + }, + "NamespaceNamePrivate":{ + "type":"string", + "max":1024, + "pattern":"^[!-~]{1,1024}$" + }, + "NamespaceNamePublic":{ + "type":"string", + "max":1024, + "pattern":"^([a-zA-Z0-9]([a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9])?\\.)+[a-zA-Z0-9]([a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9])?$" + }, "NamespaceNotFound":{ "type":"structure", "members":{ @@ -1614,6 +1635,10 @@ "shape":"DnsConfig", "documentation":"

A complex type that contains information about the Route 53 DNS records that you want AWS Cloud Map to create when you register an instance.

" }, + "Type":{ + "shape":"ServiceType", + "documentation":"

Describes the systems that can be used to discover the service instances.

DNS_HTTP

The service instances can be discovered using either DNS queries or the DiscoverInstances API operation.

HTTP

The service instances can only be discovered using the DiscoverInstances API operation.

DNS

Reserved.

" + }, "HealthCheckConfig":{ "shape":"HealthCheckConfig", "documentation":"

Public DNS and HTTP namespaces only. A complex type that contains settings for an optional health check. If you specify settings for a health check, AWS Cloud Map associates the health check with the records that you specify in DnsConfig.

For information about the charges for health checks, see Amazon Route 53 Pricing.

" @@ -1725,6 +1750,10 @@ "shape":"ServiceName", "documentation":"

The name of the service.

" }, + "Type":{ + "shape":"ServiceType", + "documentation":"

Describes the systems that can be used to discover the service instances.

DNS_HTTP

The service instances can be discovered using either DNS queries or the DiscoverInstances API operation.

HTTP

The service instances can only be discovered using the DiscoverInstances API operation.

DNS

Reserved.

" + }, "Description":{ "shape":"ResourceDescription", "documentation":"

The description that you specify when you create the service.

" @@ -1743,6 +1772,18 @@ }, "documentation":"

A complex type that contains information about a specified service.

" }, + "ServiceType":{ + "type":"string", + "enum":[ + "HTTP", + "DNS_HTTP", + "DNS" + ] + }, + "ServiceTypeOption":{ + "type":"string", + "enum":["HTTP"] + }, "Tag":{ "type":"structure", "required":[ diff --git a/botocore/data/shield/2016-06-02/service-2.json b/botocore/data/shield/2016-06-02/service-2.json index b0cc654c..e9dbc946 100644 --- a/botocore/data/shield/2016-06-02/service-2.json +++ b/botocore/data/shield/2016-06-02/service-2.json @@ -437,6 +437,53 @@ ], "documentation":"

Retrieves the resources that are included in the protection group.

" }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InternalErrorException"}, + {"shape":"InvalidResourceException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Gets information about AWS tags for a specified Amazon Resource Name (ARN) in AWS Shield.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InternalErrorException"}, + {"shape":"InvalidResourceException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Adds or updates tags for a resource in AWS Shield.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InternalErrorException"}, + {"shape":"InvalidResourceException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Removes tags from a resource in AWS Shield.

" + }, "UpdateEmergencyContactSettings":{ "name":"UpdateEmergencyContactSettings", "http":{ @@ -813,6 +860,10 @@ "Members":{ "shape":"ProtectionGroupMembers", "documentation":"

The Amazon Resource Names (ARNs) of the resources to include in the protection group. You must set this when you set Pattern to ARBITRARY and you must not set it for any other Pattern setting.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

One or more tag key-value pairs for the protection group.

" } } }, @@ -835,6 +886,10 @@ "ResourceArn":{ "shape":"ResourceArn", "documentation":"

The ARN (Amazon Resource Name) of the resource to be protected.

The ARN should be in one of the following formats:

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

One or more tag key-value pairs for the Protection object that is created.

" } } }, @@ -1357,6 +1412,25 @@ } } }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceARN"], + "members":{ + "ResourceARN":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource to get tags for.

" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagList", + "documentation":"

A list of tag key and value pairs associated with the specified resource.

" + } + } + }, "LockedSubscriptionException":{ "type":"structure", "members":{ @@ -1457,6 +1531,10 @@ "HealthCheckIds":{ "shape":"HealthCheckIds", "documentation":"

The unique identifier (ID) for the Route 53 health check that's associated with the protection.

" + }, + "ProtectionArn":{ + "shape":"ResourceArn", + "documentation":"

The ARN (Amazon Resource Name) of the protection.

" } }, "documentation":"

An object that represents a resource that is under DDoS protection.

" @@ -1489,6 +1567,10 @@ "Members":{ "shape":"ProtectionGroupMembers", "documentation":"

The Amazon Resource Names (ARNs) of the resources to include in the protection group. You must set this when you set Pattern to ARBITRARY and you must not set it for any other Pattern setting.

" + }, + "ProtectionGroupArn":{ + "shape":"ResourceArn", + "documentation":"

The ARN (Amazon Resource Name) of the protection group.

" } }, "documentation":"

A grouping of protected resources that you and AWS Shield Advanced can monitor as a collective. This resource grouping improves the accuracy of detection and reduces false positives.

" @@ -1701,6 +1783,10 @@ "SubscriptionLimits":{ "shape":"SubscriptionLimits", "documentation":"

Limits settings for your subscription.

" + }, + "SubscriptionArn":{ + "shape":"ResourceArn", + "documentation":"

The ARN (Amazon Resource Name) of the subscription.

" } }, "documentation":"

Information about the AWS Shield Advanced subscription for an account.

" @@ -1783,6 +1869,64 @@ "type":"list", "member":{"shape":"SummarizedCounter"} }, + "Tag":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

Part of the key:value pair that defines a tag. You can use a tag key to describe a category of information, such as \"customer.\" Tag keys are case-sensitive.

" + }, + "Value":{ + "shape":"TagValue", + "documentation":"

Part of the key:value pair that defines a tag. You can use a tag value to describe a specific value within a category, such as \"companyA\" or \"companyB.\" Tag values are case-sensitive.

" + } + }, + "documentation":"

A tag associated with an AWS resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing or other management. Typically, the tag key represents a category, such as \"environment\", and the tag value represents a specific value within that category, such as \"test,\" \"development,\" or \"production\". Or you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.

" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceARN", + "Tags" + ], + "members":{ + "ResourceARN":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource that you want to add or update tags for.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags that you want to modify or add to the resource.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, "TimeRange":{ "type":"structure", "members":{ @@ -1817,6 +1961,28 @@ "REQUESTS" ] }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceARN", + "TagKeys" + ], + "members":{ + "ResourceARN":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource that you want to remove tags from.

" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

The tag key for each tag that you want to remove from the resource.

" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateEmergencyContactSettingsRequest":{ "type":"structure", "members":{ diff --git a/botocore/data/ssm/2014-11-06/service-2.json b/botocore/data/ssm/2014-11-06/service-2.json index 682894ca..69383bb9 100644 --- a/botocore/data/ssm/2014-11-06/service-2.json +++ b/botocore/data/ssm/2014-11-06/service-2.json @@ -2182,7 +2182,7 @@ }, "ResourceId":{ "shape":"ResourceId", - "documentation":"

The resource ID you want to tag.

Use the ID of the resource. Here are some examples:

ManagedInstance: mi-012345abcde

MaintenanceWindow: mw-012345abcde

PatchBaseline: pb-012345abcde

For the Document and Parameter values, use the name of the resource.

The ManagedInstance type for this API action is only for on-premises managed instances. You must specify the name of the managed instance in the following format: mi-ID_number. For example, mi-1a2b3c4d5e6f.

" + "documentation":"

The resource ID you want to tag.

Use the ID of the resource. Here are some examples:

ManagedInstance: mi-012345abcde

MaintenanceWindow: mw-012345abcde

PatchBaseline: pb-012345abcde

OpsMetadata object: ResourceID for tagging is created from the Amazon Resource Name (ARN) for the object. Specifically, ResourceID is created from the strings that come after the word opsmetadata in the ARN. For example, an OpsMetadata object with an ARN of arn:aws:ssm:us-east-2:1234567890:opsmetadata/aws/ssm/MyGroup/appmanager has a ResourceID of either aws/ssm/MyGroup/appmanager or /aws/ssm/MyGroup/appmanager.

For the Document and Parameter values, use the name of the resource.

The ManagedInstance type for this API action is only for on-premises managed instances. You must specify the name of the managed instance in the following format: mi-ID_number. For example, mi-1a2b3c4d5e6f.

" }, "Tags":{ "shape":"TagList", @@ -3364,6 +3364,42 @@ "min":3, "pattern":"^[a-zA-Z0-9_\\-.]{3,128}$" }, + "BaselineOverride":{ + "type":"structure", + "members":{ + "OperatingSystem":{ + "shape":"OperatingSystem", + "documentation":"

The operating system rule used by the patch baseline override.

" + }, + "GlobalFilters":{"shape":"PatchFilterGroup"}, + "ApprovalRules":{"shape":"PatchRuleGroup"}, + "ApprovedPatches":{ + "shape":"PatchIdList", + "documentation":"

A list of explicitly approved patches for the baseline.

For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the AWS Systems Manager User Guide.

" + }, + "ApprovedPatchesComplianceLevel":{ + "shape":"PatchComplianceLevel", + "documentation":"

Defines the compliance level for approved patches. When an approved patch is reported as missing, this value describes the severity of the compliance violation.

" + }, + "RejectedPatches":{ + "shape":"PatchIdList", + "documentation":"

A list of explicitly rejected patches for the baseline.

For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the AWS Systems Manager User Guide.

" + }, + "RejectedPatchesAction":{ + "shape":"PatchAction", + "documentation":"

The action for Patch Manager to take on patches included in the RejectedPackages list. A patch can be allowed only if it is a dependency of another package, or blocked entirely along with packages that include it as a dependency.

" + }, + "ApprovedPatchesEnableNonSecurity":{ + "shape":"Boolean", + "documentation":"

Indicates whether the list of approved patches includes non-security updates that should be applied to the instances. The default value is 'false'. Applies to Linux instances only.

" + }, + "Sources":{ + "shape":"PatchSourceList", + "documentation":"

Information about the patches to use to update the instances, including target operating systems and source repositories. Applies to Linux instances only.

" + } + }, + "documentation":"

Defines the basic information about a patch baseline override.

" + }, "BatchErrorMessage":{"type":"string"}, "Boolean":{"type":"boolean"}, "CalendarNameOrARN":{"type":"string"}, @@ -4486,6 +4522,10 @@ "Metadata":{ "shape":"MetadataMap", "documentation":"

Metadata for a new Application Manager application.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Optional metadata that you assign to a resource. You can specify a maximum of five tags for an OpsMetadata object. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag an OpsMetadata object to identify an environment or target AWS Region. In this case, you could specify the following key-value pairs:

" } } }, @@ -4524,7 +4564,7 @@ }, "ApprovedPatchesComplianceLevel":{ "shape":"PatchComplianceLevel", - "documentation":"

Defines the compliance level for approved patches. This means that if an approved patch is reported as missing, this is the severity of the compliance violation. The default value is UNSPECIFIED.

" + "documentation":"

Defines the compliance level for approved patches. When an approved patch is reported as missing, this value describes the severity of the compliance violation. The default value is UNSPECIFIED.

" }, "ApprovedPatchesEnableNonSecurity":{ "shape":"Boolean", @@ -6878,7 +6918,7 @@ }, "PluginName":{ "shape":"CommandPluginName", - "documentation":"

(Optional) The name of the plugin for which you want detailed results. If the document contains only one plugin, the name can be omitted and the details will be returned.

Plugin names are also referred to as step names in Systems Manager documents.

" + "documentation":"

The name of the plugin for which you want detailed results. If the document contains only one plugin, you can omit the name and details for that plugin are returned. If the document contains more than one plugin, you must specify the name of the plugin for which you want to view details.

Plugin names are also referred to as step names in Systems Manager documents. For example, aws:RunShellScript is a plugin.

" } } }, @@ -6907,7 +6947,7 @@ }, "PluginName":{ "shape":"CommandPluginName", - "documentation":"

The name of the plugin for which you want detailed results. For example, aws:RunShellScript is a plugin.

" + "documentation":"

The name of the plugin, or step name, for which details are reported. For example, aws:RunShellScript is a plugin.

" }, "ResponseCode":{ "shape":"ResponseCode", @@ -7014,6 +7054,10 @@ "SnapshotId":{ "shape":"SnapshotId", "documentation":"

The user-defined snapshot ID.

" + }, + "BaselineOverride":{ + "shape":"BaselineOverride", + "documentation":"

Defines the basic information about a patch baseline override.

" } } }, @@ -7891,7 +7935,7 @@ "members":{ "SettingId":{ "shape":"ServiceSettingId", - "documentation":"

The ID of the service setting to get. The setting ID can be /ssm/parameter-store/default-parameter-tier, /ssm/parameter-store/high-throughput-enabled, or /ssm/managed-instance/activation-tier.

" + "documentation":"

The ID of the service setting to get. The setting ID can be /ssm/automation/customer-script-log-destination, /ssm/automation/customer-script-log-group-name, /ssm/parameter-store/default-parameter-tier, /ssm/parameter-store/high-throughput-enabled, or /ssm/managed-instance/activation-tier.

" } }, "documentation":"

The request body of the GetServiceSetting API action.

" @@ -8019,7 +8063,7 @@ "documentation":"

An S3 bucket where you want to store the results of this request.

" } }, - "documentation":"

An S3 bucket where you want to store the results of this request.

" + "documentation":"

An S3 bucket where you want to store the results of this request.

For the minimal permissions required to enable Amazon S3 output for an association, see Creating associations in the Systems Manager User Guide.

" }, "InstanceAssociationOutputUrl":{ "type":"structure", @@ -8249,7 +8293,7 @@ "members":{ "Key":{ "shape":"InstanceInformationStringFilterKey", - "documentation":"

The filter key name to describe your instances. For example:

\"InstanceIds\"|\"AgentVersion\"|\"PingStatus\"|\"PlatformTypes\"|\"ActivationIds\"|\"IamRole\"|\"ResourceType\"|\"AssociationStatus\"|\"Tag Key\"

" + "documentation":"

The filter key name to describe your instances. For example:

\"InstanceIds\"|\"AgentVersion\"|\"PingStatus\"|\"PlatformTypes\"|\"ActivationIds\"|\"IamRole\"|\"ResourceType\"|\"AssociationStatus\"|\"Tag Key\"

Tag key is not a valid filter. You must specify either tag-key or tag:keyname and a string. Here are some valid examples: tag-key, tag:123, tag:al!, tag:Windows. Here are some invalid examples: tag-keys, Tag Key, tag:, tagKey, abc:keyname.

" }, "Values":{ "shape":"InstanceInformationFilterValueSet", @@ -12426,12 +12470,12 @@ }, "ApproveAfterDays":{ "shape":"ApproveAfterDays", - "documentation":"

The number of days after the release date of each patch matched by the rule that the patch is marked as approved in the patch baseline. For example, a value of 7 means that patches are approved seven days after they are released. Not supported on Ubuntu Server.

", + "documentation":"

The number of days after the release date of each patch matched by the rule that the patch is marked as approved in the patch baseline. For example, a value of 7 means that patches are approved seven days after they are released. Not supported on Debian Server or Ubuntu Server.

", "box":true }, "ApproveUntilDate":{ "shape":"PatchStringDateTime", - "documentation":"

The cutoff date for auto approval of released patches. Any patches released on or before this date are installed automatically. Not supported on Ubuntu Server.

Enter dates in the format YYYY-MM-DD. For example, 2020-12-31.

", + "documentation":"

The cutoff date for auto approval of released patches. Any patches released on or before this date are installed automatically. Not supported on Debian Server or Ubuntu Server.

Enter dates in the format YYYY-MM-DD. For example, 2020-12-31.

", "box":true }, "EnableNonSecurity":{ @@ -12680,7 +12724,7 @@ "members":{ "Name":{ "shape":"PSParameterName", - "documentation":"

The fully qualified name of the parameter that you want to add to the system. The fully qualified name includes the complete hierarchy of the parameter path and name. For parameters in a hierarchy, you must include a leading forward slash character (/) when you create or reference a parameter. For example: /Dev/DBServer/MySQL/db-string13

Naming Constraints:

For additional information about valid values for parameter names, see About requirements and constraints for parameter names in the AWS Systems Manager User Guide.

The maximum length constraint listed below includes capacity for additional system attributes that are not part of the name. The maximum length for a parameter name, including the full length of the parameter ARN, is 1011 characters. For example, the length of the following parameter name is 65 characters, not 20 characters:

arn:aws:ssm:us-east-2:111122223333:parameter/ExampleParameterName

" + "documentation":"

The fully qualified name of the parameter that you want to add to the system. The fully qualified name includes the complete hierarchy of the parameter path and name. For parameters in a hierarchy, you must include a leading forward slash character (/) when you create or reference a parameter. For example: /Dev/DBServer/MySQL/db-string13

Naming Constraints:

For additional information about valid values for parameter names, see Creating Systems Manager parameters in the AWS Systems Manager User Guide.

The maximum length constraint listed below includes capacity for additional system attributes that are not part of the name. The maximum length for a parameter name, including the full length of the parameter ARN, is 1011 characters. For example, the length of the following parameter name is 65 characters, not 20 characters:

arn:aws:ssm:us-east-2:111122223333:parameter/ExampleParameterName

" }, "Description":{ "shape":"ParameterDescription", @@ -12819,7 +12863,7 @@ }, "Targets":{ "shape":"Targets", - "documentation":"

The targets to register with the maintenance window. In other words, the instances to run commands on when the maintenance window runs.

You can specify targets using instance IDs, resource group names, or tags that have been applied to instances.

Example 1: Specify instance IDs

Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3

Example 2: Use tag key-pairs applied to instances

Key=tag:my-tag-key,Values=my-tag-value-1,my-tag-value-2

Example 3: Use tag-keys applied to instances

Key=tag-key,Values=my-tag-key-1,my-tag-key-2

Example 4: Use resource group names

Key=resource-groups:Name,Values=resource-group-name

Example 5: Use filters for resource group types

Key=resource-groups:ResourceTypeFilters,Values=resource-type-1,resource-type-2

For Key=resource-groups:ResourceTypeFilters, specify resource types in the following format

Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC

For more information about these examples formats, including the best use case for each one, see Examples: Register targets with a maintenance window in the AWS Systems Manager User Guide.

" + "documentation":"

The targets to register with the maintenance window. In other words, the instances to run commands on when the maintenance window runs.

If a single maintenance window task is registered with multiple targets, its task invocations occur sequentially and not in parallel. If your task must run on multiple targets at the same time, register a task for each target individually and assign each task the same priority level.

You can specify targets using instance IDs, resource group names, or tags that have been applied to instances.

Example 1: Specify instance IDs

Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3

Example 2: Use tag key-pairs applied to instances

Key=tag:my-tag-key,Values=my-tag-value-1,my-tag-value-2

Example 3: Use tag-keys applied to instances

Key=tag-key,Values=my-tag-key-1,my-tag-key-2

Example 4: Use resource group names

Key=resource-groups:Name,Values=resource-group-name

Example 5: Use filters for resource group types

Key=resource-groups:ResourceTypeFilters,Values=resource-type-1,resource-type-2

For Key=resource-groups:ResourceTypeFilters, specify resource types in the following format

Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC

For more information about these examples formats, including the best use case for each one, see Examples: Register targets with a maintenance window in the AWS Systems Manager User Guide.

" }, "OwnerInformation":{ "shape":"OwnerInformation", @@ -12969,7 +13013,7 @@ }, "ResourceId":{ "shape":"ResourceId", - "documentation":"

The ID of the resource from which you want to remove tags. For example:

ManagedInstance: mi-012345abcde

MaintenanceWindow: mw-012345abcde

PatchBaseline: pb-012345abcde

For the Document and Parameter values, use the name of the resource.

The ManagedInstance type for this API action is only for on-premises managed instances. Specify the name of the managed instance in the following format: mi-ID_number. For example, mi-1a2b3c4d5e6f.

" + "documentation":"

The ID of the resource from which you want to remove tags. For example:

ManagedInstance: mi-012345abcde

MaintenanceWindow: mw-012345abcde

PatchBaseline: pb-012345abcde

OpsMetadata object: ResourceID for tagging is created from the Amazon Resource Name (ARN) for the object. Specifically, ResourceID is created from the strings that come after the word opsmetadata in the ARN. For example, an OpsMetadata object with an ARN of arn:aws:ssm:us-east-2:1234567890:opsmetadata/aws/ssm/MyGroup/appmanager has a ResourceID of either aws/ssm/MyGroup/appmanager or /aws/ssm/MyGroup/appmanager.

For the Document and Parameter values, use the name of the resource.

The ManagedInstance type for this API action is only for on-premises managed instances. Specify the name of the managed instance in the following format: mi-ID_number. For example, mi-1a2b3c4d5e6f.

" }, "TagKeys":{ "shape":"KeyList", @@ -12988,7 +13032,7 @@ "members":{ "SettingId":{ "shape":"ServiceSettingId", - "documentation":"

The Amazon Resource Name (ARN) of the service setting to reset. The setting ID can be /ssm/parameter-store/default-parameter-tier, /ssm/parameter-store/high-throughput-enabled, or /ssm/managed-instance/activation-tier. For example, arn:aws:ssm:us-east-1:111122223333:servicesetting/ssm/parameter-store/high-throughput-enabled.

" + "documentation":"

The Amazon Resource Name (ARN) of the service setting to reset. The setting ID can be /ssm/automation/customer-script-log-destination, /ssm/automation/customer-script-log-group-name, /ssm/parameter-store/default-parameter-tier, /ssm/parameter-store/high-throughput-enabled, or /ssm/managed-instance/activation-tier. For example, arn:aws:ssm:us-east-1:111122223333:servicesetting/ssm/parameter-store/high-throughput-enabled.

" } }, "documentation":"

The request body of the ResetServiceSetting API action.

" @@ -13387,7 +13431,8 @@ "MaintenanceWindow", "Parameter", "PatchBaseline", - "OpsItem" + "OpsItem", + "OpsMetadata" ] }, "ResponseCode":{"type":"integer"}, @@ -15426,11 +15471,11 @@ "members":{ "SettingId":{ "shape":"ServiceSettingId", - "documentation":"

The Amazon Resource Name (ARN) of the service setting to reset. For example, arn:aws:ssm:us-east-1:111122223333:servicesetting/ssm/parameter-store/high-throughput-enabled. The setting ID can be one of the following.

" + "documentation":"

The Amazon Resource Name (ARN) of the service setting to reset. For example, arn:aws:ssm:us-east-1:111122223333:servicesetting/ssm/parameter-store/high-throughput-enabled. The setting ID can be one of the following.

" }, "SettingValue":{ "shape":"ServiceSettingValue", - "documentation":"

The new value to specify for the service setting. For the /ssm/parameter-store/default-parameter-tier setting ID, the setting value can be one of the following.

For the /ssm/parameter-store/high-throughput-enabled, and /ssm/managed-instance/activation-tier setting IDs, the setting value can be true or false.

" + "documentation":"

The new value to specify for the service setting. For the /ssm/parameter-store/default-parameter-tier setting ID, the setting value can be one of the following.

For the /ssm/parameter-store/high-throughput-enabled, and /ssm/managed-instance/activation-tier setting IDs, the setting value can be true or false.

For the /ssm/automation/customer-script-log-destination setting ID, the setting value can be CloudWatch.

For the /ssm/automation/customer-script-log-group-name setting ID, the setting value can be the name of a CloudWatch Logs log group.

" } }, "documentation":"

The request body of the UpdateServiceSetting API action.

" diff --git a/botocore/data/sso-admin/2020-07-20/service-2.json b/botocore/data/sso-admin/2020-07-20/service-2.json index e886ccab..91dddf65 100644 --- a/botocore/data/sso-admin/2020-07-20/service-2.json +++ b/botocore/data/sso-admin/2020-07-20/service-2.json @@ -1954,7 +1954,7 @@ "Token":{ "type":"string", "max":2048, - "pattern":"^[-a-zA-Z0-9+=/]*" + "pattern":"^[-a-zA-Z0-9+=/_]*" }, "UUId":{ "type":"string", diff --git a/botocore/data/transfer/2018-11-05/service-2.json b/botocore/data/transfer/2018-11-05/service-2.json index 31dd478a..4fcaf500 100644 --- a/botocore/data/transfer/2018-11-05/service-2.json +++ b/botocore/data/transfer/2018-11-05/service-2.json @@ -1631,7 +1631,7 @@ }, "UserPassword":{ "type":"string", - "max":2048, + "max":1024, "sensitive":true }, "VpcEndpointId":{ diff --git a/botocore/data/wafv2/2019-07-29/service-2.json b/botocore/data/wafv2/2019-07-29/service-2.json index 72470f4d..61d63da0 100644 --- a/botocore/data/wafv2/2019-07-29/service-2.json +++ b/botocore/data/wafv2/2019-07-29/service-2.json @@ -28,7 +28,7 @@ {"shape":"WAFUnavailableEntityException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Associates a Web ACL with a regional application resource, to protect the resource. A regional application can be an Application Load Balancer (ALB), an API Gateway REST API, or an AppSync GraphQL API.

For AWS CloudFront, don't use this call. Instead, use your CloudFront distribution configuration. To associate a Web ACL, in the CloudFront call UpdateDistribution, set the web ACL ID to the Amazon Resource Name (ARN) of the Web ACL. For information, see UpdateDistribution.

" + "documentation":"

Associates a Web ACL with a regional application resource, to protect the resource. A regional application can be an Application Load Balancer (ALB), an API Gateway REST API, or an AppSync GraphQL API.

For AWS CloudFront, don't use this call. Instead, use your CloudFront distribution configuration. To associate a Web ACL, in the CloudFront call UpdateDistribution, set the web ACL ID to the Amazon Resource Name (ARN) of the Web ACL. For information, see UpdateDistribution.

" }, "CheckCapacity":{ "name":"CheckCapacity", @@ -47,7 +47,7 @@ {"shape":"WAFUnavailableEntityException"}, {"shape":"WAFSubscriptionNotFoundException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Returns the web ACL capacity unit (WCU) requirements for a specified scope and set of rules. You can use this to check the capacity requirements for the rules you want to use in a RuleGroup or WebACL.

AWS WAF uses WCUs to calculate and control the operating resources that are used to run your rules, rule groups, and web ACLs. AWS WAF calculates capacity differently for each rule type, to reflect the relative cost of each rule. Simple rules that cost little to run use fewer WCUs than more complex rules that use more processing power. Rule group capacity is fixed at creation, which helps users plan their web ACL WCU usage when they use a rule group. The WCU limit for web ACLs is 1,500.

" + "documentation":"

Returns the web ACL capacity unit (WCU) requirements for a specified scope and set of rules. You can use this to check the capacity requirements for the rules you want to use in a RuleGroup or WebACL.

AWS WAF uses WCUs to calculate and control the operating resources that are used to run your rules, rule groups, and web ACLs. AWS WAF calculates capacity differently for each rule type, to reflect the relative cost of each rule. Simple rules that cost little to run use fewer WCUs than more complex rules that use more processing power. Rule group capacity is fixed at creation, which helps users plan their web ACL WCU usage when they use a rule group. The WCU limit for web ACLs is 1,500.

" }, "CreateIPSet":{ "name":"CreateIPSet", @@ -67,7 +67,7 @@ {"shape":"WAFTagOperationInternalErrorException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Creates an IPSet, which you use to identify web requests that originate from specific IP addresses or ranges of IP addresses. For example, if you're receiving a lot of requests from a ranges of IP addresses, you can configure AWS WAF to block them using an IPSet that lists those IP addresses.

" + "documentation":"

Creates an IPSet, which you use to identify web requests that originate from specific IP addresses or ranges of IP addresses. For example, if you're receiving a lot of requests from a ranges of IP addresses, you can configure AWS WAF to block them using an IPSet that lists those IP addresses.

" }, "CreateRegexPatternSet":{ "name":"CreateRegexPatternSet", @@ -87,7 +87,7 @@ {"shape":"WAFTagOperationInternalErrorException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Creates a RegexPatternSet, which you reference in a RegexPatternSetReferenceStatement, to have AWS WAF inspect a web request component for the specified patterns.

" + "documentation":"

Creates a RegexPatternSet, which you reference in a RegexPatternSetReferenceStatement, to have AWS WAF inspect a web request component for the specified patterns.

" }, "CreateRuleGroup":{ "name":"CreateRuleGroup", @@ -110,7 +110,7 @@ {"shape":"WAFNonexistentItemException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Creates a RuleGroup per the specifications provided.

A rule group defines a collection of rules to inspect and control web requests that you can use in a WebACL. When you create a rule group, you define an immutable capacity limit. If you update a rule group, you must stay within the capacity. This allows others to reuse the rule group with confidence in its capacity requirements.

" + "documentation":"

Creates a RuleGroup per the specifications provided.

A rule group defines a collection of rules to inspect and control web requests that you can use in a WebACL. When you create a rule group, you define an immutable capacity limit. If you update a rule group, you must stay within the capacity. This allows others to reuse the rule group with confidence in its capacity requirements.

" }, "CreateWebACL":{ "name":"CreateWebACL", @@ -134,7 +134,7 @@ {"shape":"WAFSubscriptionNotFoundException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Creates a WebACL per the specifications provided.

A Web ACL defines a collection of rules to use to inspect and control web requests. Each rule has an action defined (allow, block, or count) for requests that match the statement of the rule. In the Web ACL, you assign a default action to take (allow, block) for any request that does not match any of the rules. The rules in a Web ACL can be a combination of the types Rule, RuleGroup, and managed rule group. You can associate a Web ACL with one or more AWS resources to protect. The resources can be Amazon CloudFront, an Amazon API Gateway REST API, an Application Load Balancer, or an AWS AppSync GraphQL API.

" + "documentation":"

Creates a WebACL per the specifications provided.

A Web ACL defines a collection of rules to use to inspect and control web requests. Each rule has an action defined (allow, block, or count) for requests that match the statement of the rule. In the Web ACL, you assign a default action to take (allow, block) for any request that does not match any of the rules. The rules in a Web ACL can be a combination of the types Rule, RuleGroup, and managed rule group. You can associate a Web ACL with one or more AWS resources to protect. The resources can be Amazon CloudFront, an Amazon API Gateway REST API, an Application Load Balancer, or an AWS AppSync GraphQL API.

" }, "DeleteFirewallManagerRuleGroups":{ "name":"DeleteFirewallManagerRuleGroups", @@ -171,7 +171,7 @@ {"shape":"WAFTagOperationInternalErrorException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Deletes the specified IPSet.

" + "documentation":"

Deletes the specified IPSet.

" }, "DeleteLoggingConfiguration":{ "name":"DeleteLoggingConfiguration", @@ -188,7 +188,7 @@ {"shape":"WAFInvalidParameterException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Deletes the LoggingConfiguration from the specified web ACL.

" + "documentation":"

Deletes the LoggingConfiguration from the specified web ACL.

" }, "DeletePermissionPolicy":{ "name":"DeletePermissionPolicy", @@ -223,7 +223,7 @@ {"shape":"WAFTagOperationInternalErrorException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Deletes the specified RegexPatternSet.

" + "documentation":"

Deletes the specified RegexPatternSet.

" }, "DeleteRuleGroup":{ "name":"DeleteRuleGroup", @@ -243,7 +243,7 @@ {"shape":"WAFTagOperationInternalErrorException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Deletes the specified RuleGroup.

" + "documentation":"

Deletes the specified RuleGroup.

" }, "DeleteWebACL":{ "name":"DeleteWebACL", @@ -263,7 +263,7 @@ {"shape":"WAFTagOperationInternalErrorException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Deletes the specified WebACL.

You can only use this if ManagedByFirewallManager is false in the specified WebACL.

" + "documentation":"

Deletes the specified WebACL.

You can only use this if ManagedByFirewallManager is false in the specified WebACL.

" }, "DescribeManagedRuleGroup":{ "name":"DescribeManagedRuleGroup", @@ -280,7 +280,7 @@ {"shape":"WAFNonexistentItemException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Provides high-level information for a managed rule group, including descriptions of the rules.

" + "documentation":"

Provides high-level information for a managed rule group, including descriptions of the rules.

" }, "DisassociateWebACL":{ "name":"DisassociateWebACL", @@ -296,7 +296,7 @@ {"shape":"WAFNonexistentItemException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Disassociates a Web ACL from a regional application resource. A regional application can be an Application Load Balancer (ALB), an API Gateway REST API, or an AppSync GraphQL API.

For AWS CloudFront, don't use this call. Instead, use your CloudFront distribution configuration. To disassociate a Web ACL, provide an empty web ACL ID in the CloudFront call UpdateDistribution. For information, see UpdateDistribution.

" + "documentation":"

Disassociates a Web ACL from a regional application resource. A regional application can be an Application Load Balancer (ALB), an API Gateway REST API, or an AppSync GraphQL API.

For AWS CloudFront, don't use this call. Instead, use your CloudFront distribution configuration. To disassociate a Web ACL, provide an empty web ACL ID in the CloudFront call UpdateDistribution. For information, see UpdateDistribution.

" }, "GetIPSet":{ "name":"GetIPSet", @@ -312,7 +312,7 @@ {"shape":"WAFNonexistentItemException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Retrieves the specified IPSet.

" + "documentation":"

Retrieves the specified IPSet.

" }, "GetLoggingConfiguration":{ "name":"GetLoggingConfiguration", @@ -328,7 +328,7 @@ {"shape":"WAFInvalidParameterException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Returns the LoggingConfiguration for the specified web ACL.

" + "documentation":"

Returns the LoggingConfiguration for the specified web ACL.

" }, "GetPermissionPolicy":{ "name":"GetPermissionPolicy", @@ -359,7 +359,7 @@ {"shape":"WAFNonexistentItemException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Retrieves the keys that are currently blocked by a rate-based rule. The maximum number of managed keys that can be blocked for a single rate-based rule is 10,000. If more than 10,000 addresses exceed the rate limit, those with the highest rates are blocked.

" + "documentation":"

Retrieves the keys that are currently blocked by a rate-based rule. The maximum number of managed keys that can be blocked for a single rate-based rule is 10,000. If more than 10,000 addresses exceed the rate limit, those with the highest rates are blocked.

" }, "GetRegexPatternSet":{ "name":"GetRegexPatternSet", @@ -375,7 +375,7 @@ {"shape":"WAFNonexistentItemException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Retrieves the specified RegexPatternSet.

" + "documentation":"

Retrieves the specified RegexPatternSet.

" }, "GetRuleGroup":{ "name":"GetRuleGroup", @@ -391,7 +391,7 @@ {"shape":"WAFNonexistentItemException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Retrieves the specified RuleGroup.

" + "documentation":"

Retrieves the specified RuleGroup.

" }, "GetSampledRequests":{ "name":"GetSampledRequests", @@ -406,7 +406,7 @@ {"shape":"WAFInternalErrorException"}, {"shape":"WAFInvalidParameterException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Gets detailed information about a specified number of requests--a sample--that AWS WAF randomly selects from among the first 5,000 requests that your AWS resource received during a time range that you choose. You can specify a sample size of up to 500 requests, and you can specify any time range in the previous three hours.

GetSampledRequests returns a time range, which is usually the time range that you specified. However, if your resource (such as a CloudFront distribution) received 5,000 requests before the specified time range elapsed, GetSampledRequests returns an updated time range. This new time range indicates the actual period during which AWS WAF selected the requests in the sample.

" + "documentation":"

Gets detailed information about a specified number of requests--a sample--that AWS WAF randomly selects from among the first 5,000 requests that your AWS resource received during a time range that you choose. You can specify a sample size of up to 500 requests, and you can specify any time range in the previous three hours.

GetSampledRequests returns a time range, which is usually the time range that you specified. However, if your resource (such as a CloudFront distribution) received 5,000 requests before the specified time range elapsed, GetSampledRequests returns an updated time range. This new time range indicates the actual period during which AWS WAF selected the requests in the sample.

" }, "GetWebACL":{ "name":"GetWebACL", @@ -422,7 +422,7 @@ {"shape":"WAFNonexistentItemException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Retrieves the specified WebACL.

" + "documentation":"

Retrieves the specified WebACL.

" }, "GetWebACLForResource":{ "name":"GetWebACLForResource", @@ -439,7 +439,7 @@ {"shape":"WAFUnavailableEntityException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Retrieves the WebACL for the specified resource.

" + "documentation":"

Retrieves the WebACL for the specified resource.

" }, "ListAvailableManagedRuleGroups":{ "name":"ListAvailableManagedRuleGroups", @@ -454,7 +454,7 @@ {"shape":"WAFInvalidParameterException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Retrieves an array of managed rule groups that are available for you to use. This list includes all AWS Managed Rules rule groups and the AWS Marketplace managed rule groups that you're subscribed to.

" + "documentation":"

Retrieves an array of managed rule groups that are available for you to use. This list includes all AWS Managed Rules rule groups and the AWS Marketplace managed rule groups that you're subscribed to.

" }, "ListIPSets":{ "name":"ListIPSets", @@ -469,7 +469,7 @@ {"shape":"WAFInvalidParameterException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Retrieves an array of IPSetSummary objects for the IP sets that you manage.

" + "documentation":"

Retrieves an array of IPSetSummary objects for the IP sets that you manage.

" }, "ListLoggingConfigurations":{ "name":"ListLoggingConfigurations", @@ -484,7 +484,7 @@ {"shape":"WAFInvalidParameterException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Retrieves an array of your LoggingConfiguration objects.

" + "documentation":"

Retrieves an array of your LoggingConfiguration objects.

" }, "ListRegexPatternSets":{ "name":"ListRegexPatternSets", @@ -499,7 +499,7 @@ {"shape":"WAFInvalidParameterException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Retrieves an array of RegexPatternSetSummary objects for the regex pattern sets that you manage.

" + "documentation":"

Retrieves an array of RegexPatternSetSummary objects for the regex pattern sets that you manage.

" }, "ListResourcesForWebACL":{ "name":"ListResourcesForWebACL", @@ -515,7 +515,7 @@ {"shape":"WAFInvalidParameterException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Retrieves an array of the Amazon Resource Names (ARNs) for the regional resources that are associated with the specified web ACL. If you want the list of AWS CloudFront resources, use the AWS CloudFront call ListDistributionsByWebACLId.

" + "documentation":"

Retrieves an array of the Amazon Resource Names (ARNs) for the regional resources that are associated with the specified web ACL. If you want the list of AWS CloudFront resources, use the AWS CloudFront call ListDistributionsByWebACLId.

" }, "ListRuleGroups":{ "name":"ListRuleGroups", @@ -530,7 +530,7 @@ {"shape":"WAFInvalidParameterException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Retrieves an array of RuleGroupSummary objects for the rule groups that you manage.

" + "documentation":"

Retrieves an array of RuleGroupSummary objects for the rule groups that you manage.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -548,7 +548,7 @@ {"shape":"WAFTagOperationInternalErrorException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Retrieves the TagInfoForResource for the specified resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.

You can tag the AWS resources that you manage through AWS WAF: web ACLs, rule groups, IP sets, and regex pattern sets. You can't manage or view tags through the AWS WAF console.

" + "documentation":"

Retrieves the TagInfoForResource for the specified resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.

You can tag the AWS resources that you manage through AWS WAF: web ACLs, rule groups, IP sets, and regex pattern sets. You can't manage or view tags through the AWS WAF console.

" }, "ListWebACLs":{ "name":"ListWebACLs", @@ -563,7 +563,7 @@ {"shape":"WAFInvalidParameterException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Retrieves an array of WebACLSummary objects for the web ACLs that you manage.

" + "documentation":"

Retrieves an array of WebACLSummary objects for the web ACLs that you manage.

" }, "PutLoggingConfiguration":{ "name":"PutLoggingConfiguration", @@ -582,7 +582,7 @@ {"shape":"WAFInvalidOperationException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Enables the specified LoggingConfiguration, to start logging from a web ACL, according to the configuration provided.

You can access information about all traffic that AWS WAF inspects using the following steps:

  1. Create an Amazon Kinesis Data Firehose.

    Create the data firehose with a PUT source and in the Region that you are operating. If you are capturing logs for Amazon CloudFront, always create the firehose in US East (N. Virginia).

    Give the data firehose a name that starts with the prefix aws-waf-logs-. For example, aws-waf-logs-us-east-2-analytics.

    Do not create the data firehose using a Kinesis stream as your source.

  2. Associate that firehose to your web ACL using a PutLoggingConfiguration request.

When you successfully enable logging using a PutLoggingConfiguration request, AWS WAF will create a service linked role with the necessary permissions to write logs to the Amazon Kinesis Data Firehose. For more information, see Logging Web ACL Traffic Information in the AWS WAF Developer Guide.

" + "documentation":"

Enables the specified LoggingConfiguration, to start logging from a web ACL, according to the configuration provided.

You can access information about all traffic that AWS WAF inspects using the following steps:

  1. Create an Amazon Kinesis Data Firehose.

    Create the data firehose with a PUT source and in the Region that you are operating. If you are capturing logs for Amazon CloudFront, always create the firehose in US East (N. Virginia).

    Give the data firehose a name that starts with the prefix aws-waf-logs-. For example, aws-waf-logs-us-east-2-analytics.

    Do not create the data firehose using a Kinesis stream as your source.

  2. Associate that firehose to your web ACL using a PutLoggingConfiguration request.

When you successfully enable logging using a PutLoggingConfiguration request, AWS WAF will create a service linked role with the necessary permissions to write logs to the Amazon Kinesis Data Firehose. For more information, see Logging Web ACL Traffic Information in the AWS WAF Developer Guide.

" }, "PutPermissionPolicy":{ "name":"PutPermissionPolicy", @@ -617,7 +617,7 @@ {"shape":"WAFTagOperationInternalErrorException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Associates tags with the specified AWS resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.

You can tag the AWS resources that you manage through AWS WAF: web ACLs, rule groups, IP sets, and regex pattern sets. You can't manage or view tags through the AWS WAF console.

" + "documentation":"

Associates tags with the specified AWS resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.

You can tag the AWS resources that you manage through AWS WAF: web ACLs, rule groups, IP sets, and regex pattern sets. You can't manage or view tags through the AWS WAF console.

" }, "UntagResource":{ "name":"UntagResource", @@ -635,7 +635,7 @@ {"shape":"WAFTagOperationInternalErrorException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Disassociates tags from an AWS resource. Tags are key:value pairs that you can associate with AWS resources. For example, the tag key might be \"customer\" and the tag value might be \"companyA.\" You can specify one or more tags to add to each container. You can add up to 50 tags to each AWS resource.

" + "documentation":"

Disassociates tags from an AWS resource. Tags are key:value pairs that you can associate with AWS resources. For example, the tag key might be \"customer\" and the tag value might be \"companyA.\" You can specify one or more tags to add to each container. You can add up to 50 tags to each AWS resource.

" }, "UpdateIPSet":{ "name":"UpdateIPSet", @@ -654,7 +654,7 @@ {"shape":"WAFLimitsExceededException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Updates the specified IPSet.

" + "documentation":"

Updates the specified IPSet.

This operation completely replaces any IP address specifications that you already have in the IP set with the ones that you provide to this call. If you want to add to or modify the addresses that are already in the IP set, retrieve those by calling GetIPSet, update them, and provide the complete updated array of IP addresses to this call.

" }, "UpdateRegexPatternSet":{ "name":"UpdateRegexPatternSet", @@ -673,7 +673,7 @@ {"shape":"WAFLimitsExceededException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Updates the specified RegexPatternSet.

" + "documentation":"

Updates the specified RegexPatternSet.

" }, "UpdateRuleGroup":{ "name":"UpdateRuleGroup", @@ -694,7 +694,7 @@ {"shape":"WAFSubscriptionNotFoundException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Updates the specified RuleGroup.

A rule group defines a collection of rules to inspect and control web requests that you can use in a WebACL. When you create a rule group, you define an immutable capacity limit. If you update a rule group, you must stay within the capacity. This allows others to reuse the rule group with confidence in its capacity requirements.

" + "documentation":"

Updates the specified RuleGroup.

A rule group defines a collection of rules to inspect and control web requests that you can use in a WebACL. When you create a rule group, you define an immutable capacity limit. If you update a rule group, you must stay within the capacity. This allows others to reuse the rule group with confidence in its capacity requirements.

" }, "UpdateWebACL":{ "name":"UpdateWebACL", @@ -716,22 +716,28 @@ {"shape":"WAFSubscriptionNotFoundException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Updates the specified WebACL.

A Web ACL defines a collection of rules to use to inspect and control web requests. Each rule has an action defined (allow, block, or count) for requests that match the statement of the rule. In the Web ACL, you assign a default action to take (allow, block) for any request that does not match any of the rules. The rules in a Web ACL can be a combination of the types Rule, RuleGroup, and managed rule group. You can associate a Web ACL with one or more AWS resources to protect. The resources can be Amazon CloudFront, an Amazon API Gateway REST API, an Application Load Balancer, or an AWS AppSync GraphQL API.

" + "documentation":"

Updates the specified WebACL.

A Web ACL defines a collection of rules to use to inspect and control web requests. Each rule has an action defined (allow, block, or count) for requests that match the statement of the rule. In the Web ACL, you assign a default action to take (allow, block) for any request that does not match any of the rules. The rules in a Web ACL can be a combination of the types Rule, RuleGroup, and managed rule group. You can associate a Web ACL with one or more AWS resources to protect. The resources can be Amazon CloudFront, an Amazon API Gateway REST API, an Application Load Balancer, or an AWS AppSync GraphQL API.

" } }, "shapes":{ "Action":{"type":"string"}, + "All":{ + "type":"structure", + "members":{ + }, + "documentation":"

Inspect all of the elements that AWS WAF has parsed and extracted from the web request JSON body that are within the JsonBody MatchScope. This is used with the FieldToMatch option JsonBody.

This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.

" + }, "AllQueryArguments":{ "type":"structure", "members":{ }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

All query arguments of a web request.

This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.

" + "documentation":"

All query arguments of a web request.

This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.

" }, "AllowAction":{ "type":"structure", "members":{ }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Specifies that AWS WAF should allow requests.

This is used only in the context of other settings, for example to specify values for RuleAction and web ACL DefaultAction.

" + "documentation":"

Specifies that AWS WAF should allow requests.

This is used only in the context of other settings, for example to specify values for RuleAction and web ACL DefaultAction.

" }, "AndStatement":{ "type":"structure", @@ -742,7 +748,7 @@ "documentation":"

The statements to combine with AND logic. You can use any statements that can be nested.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

A logical rule statement used to combine other rule statements with AND logic. You provide more than one Statement within the AndStatement.

" + "documentation":"

A logical rule statement used to combine other rule statements with AND logic. You provide more than one Statement within the AndStatement.

" }, "AssociateWebACLRequest":{ "type":"structure", @@ -770,13 +776,21 @@ "type":"structure", "members":{ }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Specifies that AWS WAF should block requests.

This is used only in the context of other settings, for example to specify values for RuleAction and web ACL DefaultAction.

" + "documentation":"

Specifies that AWS WAF should block requests.

This is used only in the context of other settings, for example to specify values for RuleAction and web ACL DefaultAction.

" }, "Body":{ "type":"structure", "members":{ }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

The body of a web request. This immediately follows the request headers.

This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.

" + "documentation":"

The body of a web request. This immediately follows the request headers.

This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.

" + }, + "BodyParsingFallbackBehavior":{ + "type":"string", + "enum":[ + "MATCH", + "NO_MATCH", + "EVALUATE_AS_STRING" + ] }, "Boolean":{"type":"boolean"}, "ByteMatchStatement":{ @@ -805,7 +819,7 @@ "documentation":"

The area within the portion of a web request that you want AWS WAF to search for SearchString. Valid values include the following:

CONTAINS

The specified part of the web request must include the value of SearchString, but the location doesn't matter.

CONTAINS_WORD

The specified part of the web request must include the value of SearchString, and SearchString must contain only alphanumeric characters or underscore (A-Z, a-z, 0-9, or _). In addition, SearchString must be a word, which means that both of the following are true:

EXACTLY

The value of the specified part of the web request must exactly match the value of SearchString.

STARTS_WITH

The value of SearchString must appear at the beginning of the specified part of the web request.

ENDS_WITH

The value of SearchString must appear at the end of the specified part of the web request.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

A rule statement that defines a string match search for AWS WAF to apply to web requests. The byte match statement provides the bytes to search for, the location in requests that you want AWS WAF to search, and other settings. The bytes to search for are typically a string that corresponds with ASCII characters. In the AWS WAF console and the developer guide, this is refered to as a string match statement.

" + "documentation":"

A rule statement that defines a string match search for AWS WAF to apply to web requests. The byte match statement provides the bytes to search for, the location in requests that you want AWS WAF to search, and other settings. The bytes to search for are typically a string that corresponds with ASCII characters. In the AWS WAF console and the developer guide, this is refered to as a string match statement.

" }, "CapacityUnit":{ "type":"long", @@ -856,7 +870,7 @@ "type":"structure", "members":{ }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Specifies that AWS WAF should count requests.

This is used only in the context of other settings, for example to specify values for RuleAction and web ACL DefaultAction.

" + "documentation":"

Specifies that AWS WAF should count requests.

This is used only in the context of other settings, for example to specify values for RuleAction and web ACL DefaultAction.

" }, "Country":{"type":"string"}, "CountryCode":{ @@ -1137,7 +1151,7 @@ }, "Description":{ "shape":"EntityDescription", - "documentation":"

A description of the IP set that helps with identification. You cannot change the description of an IP set after you create it.

" + "documentation":"

A description of the IP set that helps with identification.

" }, "IPAddressVersion":{ "shape":"IPAddressVersion", @@ -1145,7 +1159,7 @@ }, "Addresses":{ "shape":"IPAddresses", - "documentation":"

Contains an array of strings that specify one or more IP addresses or blocks of IP addresses in Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports all address ranges for IP versions IPv4 and IPv6.

Examples:

For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

" + "documentation":"

Contains an array of strings that specify one or more IP addresses or blocks of IP addresses in Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports all IPv4 and IPv6 CIDR ranges except for /0.

Examples:

For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

" }, "Tags":{ "shape":"TagList", @@ -1180,7 +1194,7 @@ }, "Description":{ "shape":"EntityDescription", - "documentation":"

A description of the set that helps with identification. You cannot change the description of a set after you create it.

" + "documentation":"

A description of the set that helps with identification.

" }, "RegularExpressionList":{ "shape":"RegularExpressionList", @@ -1224,7 +1238,7 @@ }, "Description":{ "shape":"EntityDescription", - "documentation":"

A description of the rule group that helps with identification. You cannot change the description of a rule group after you create it.

" + "documentation":"

A description of the rule group that helps with identification.

" }, "Rules":{ "shape":"Rules", @@ -1272,7 +1286,7 @@ }, "Description":{ "shape":"EntityDescription", - "documentation":"

A description of the Web ACL that helps with identification. You cannot change the description of a Web ACL after you create it.

" + "documentation":"

A description of the Web ACL that helps with identification.

" }, "Rules":{ "shape":"Rules", @@ -1309,7 +1323,7 @@ "documentation":"

Specifies that AWS WAF should allow requests by default.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

In a WebACL, this is the action that you want AWS WAF to perform when a web request doesn't match any of the rules in the WebACL. The default action must be a terminating action, so count is not allowed.

" + "documentation":"

In a WebACL, this is the action that you want AWS WAF to perform when a web request doesn't match any of the rules in the WebACL. The default action must be a terminating action, so count is not allowed.

" }, "DeleteFirewallManagerRuleGroupsRequest":{ "type":"structure", @@ -1574,7 +1588,7 @@ "documentation":"

The name of the rule to exclude.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Specifies a single rule to exclude from the rule group. Excluding a rule overrides its action setting for the rule group in the web ACL, setting it to COUNT. This effectively excludes the rule from acting on web requests.

" + "documentation":"

Specifies a single rule to exclude from the rule group. Excluding a rule overrides its action setting for the rule group in the web ACL, setting it to COUNT. This effectively excludes the rule from acting on web requests.

" }, "ExcludedRules":{ "type":"list", @@ -1592,11 +1606,11 @@ "members":{ "SingleHeader":{ "shape":"SingleHeader", - "documentation":"

Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer. This setting isn't case sensitive.

" + "documentation":"

Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer. This setting isn't case sensitive.

Example JSON: \"SingleHeader\": { \"Name\": \"haystack\" }

" }, "SingleQueryArgument":{ "shape":"SingleQueryArgument", - "documentation":"

Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion. The name can be up to 30 characters long and isn't case sensitive.

This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.

" + "documentation":"

Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion. The name can be up to 30 characters long and isn't case sensitive.

This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.

Example JSON: \"SingleQueryArgument\": { \"Name\": \"myArgument\" }

" }, "AllQueryArguments":{ "shape":"AllQueryArguments", @@ -1612,14 +1626,18 @@ }, "Body":{ "shape":"Body", - "documentation":"

Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.

Note that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don't need to inspect more than 8 KB, you can guarantee that you don't allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement, with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn't support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.

" + "documentation":"

Inspect the request body as plain text. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.

Note that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don't need to inspect more than 8 KB, you can guarantee that you don't allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement, with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn't support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.

" }, "Method":{ "shape":"Method", "documentation":"

Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.

" + }, + "JsonBody":{ + "shape":"JsonBody", + "documentation":"

Inspect the request body as JSON. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.

Note that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don't need to inspect more than 8 KB, you can guarantee that you don't allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement, with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn't support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

The part of a web request that you want AWS WAF to inspect. Include the single FieldToMatch type that you want to inspect, with additional specifications as needed, according to the type. You specify a single request component in FieldToMatch for each rule statement that requires it. To inspect more than one component of a web request, create a separate rule statement for each component.

" + "documentation":"

The part of a web request that you want AWS WAF to inspect. Include the single FieldToMatch type that you want to inspect, with additional specifications as needed, according to the type. You specify a single request component in FieldToMatch for each rule statement that requires it. To inspect more than one component of a web request, create a separate rule statement for each component.

" }, "FieldToMatchData":{ "type":"string", @@ -1679,7 +1697,7 @@ }, "FallbackBehavior":{ "shape":"FallbackBehavior", - "documentation":"

The match status to assign to the web request if the request doesn't have a valid IP address in the specified position.

If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all.

You can specify the following fallback behaviors:

" + "documentation":"

The match status to assign to the web request if the request doesn't have a valid IP address in the specified position.

If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all.

You can specify the following fallback behaviors:

" } }, "documentation":"

The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.

If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all.

This configuration is used for GeoMatchStatement and RateBasedStatement. For IPSetReferenceStatement, use IPSetForwardedIPConfig instead.

AWS WAF only evaluates the first IP address found in the specified HTTP header.

" @@ -1710,7 +1728,7 @@ "documentation":"

The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.

If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

A rule statement used to identify web requests based on country of origin.

" + "documentation":"

A rule statement used to identify web requests based on country of origin.

" }, "GetIPSetRequest":{ "type":"structure", @@ -1919,7 +1937,7 @@ }, "TimeWindow":{ "shape":"TimeWindow", - "documentation":"

The start date and time and the end date and time of the range for which you want GetSampledRequests to return a sample of requests. You must specify the times in Coordinated Universal Time (UTC) format. UTC format includes the special designator, Z. For example, \"2016-09-27T14:50Z\". You can specify any time range in the previous three hours.

" + "documentation":"

The start date and time and the end date and time of the range for which you want GetSampledRequests to return a sample of requests. You must specify the times in Coordinated Universal Time (UTC) format. UTC format includes the special designator, Z. For example, \"2016-09-27T14:50Z\". You can specify any time range in the previous three hours. If you specify a start time that's earlier than three hours ago, AWS WAF sets it to three hours ago.

" }, "MaxItems":{ "shape":"ListMaxItems", @@ -2010,7 +2028,7 @@ "documentation":"

The value of the HTTP header.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Part of the response from GetSampledRequests. This is a complex type that appears as Headers in the response syntax. HTTPHeader contains the names and values of all of the headers that appear in one of the web requests.

" + "documentation":"

Part of the response from GetSampledRequests. This is a complex type that appears as Headers in the response syntax. HTTPHeader contains the names and values of all of the headers that appear in one of the web requests.

" }, "HTTPHeaders":{ "type":"list", @@ -2045,7 +2063,7 @@ "documentation":"

A complex type that contains the name and value for each header in the sampled web request.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Part of the response from GetSampledRequests. This is a complex type that appears as Request in the response syntax. HTTPRequest contains information about one of the web requests.

" + "documentation":"

Part of the response from GetSampledRequests. This is a complex type that appears as Request in the response syntax. HTTPRequest contains information about one of the web requests.

" }, "HTTPVersion":{"type":"string"}, "HeaderName":{"type":"string"}, @@ -2091,7 +2109,7 @@ }, "Description":{ "shape":"EntityDescription", - "documentation":"

A description of the IP set that helps with identification. You cannot change the description of an IP set after you create it.

" + "documentation":"

A description of the IP set that helps with identification.

" }, "IPAddressVersion":{ "shape":"IPAddressVersion", @@ -2099,10 +2117,10 @@ }, "Addresses":{ "shape":"IPAddresses", - "documentation":"

Contains an array of strings that specify one or more IP addresses or blocks of IP addresses in Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports all address ranges for IP versions IPv4 and IPv6.

Examples:

For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

" + "documentation":"

Contains an array of strings that specify one or more IP addresses or blocks of IP addresses in Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports all IPv4 and IPv6 CIDR ranges except for /0.

Examples:

For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Contains one or more IP addresses or blocks of IP addresses specified in Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports any CIDR range. For information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

AWS WAF assigns an ARN to each IPSet that you create. To use an IP set in a rule, you provide the ARN to the Rule statement IPSetReferenceStatement.

" + "documentation":"

Contains one or more IP addresses or blocks of IP addresses specified in Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports all IPv4 and IPv6 CIDR ranges except for /0. For information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

AWS WAF assigns an ARN to each IPSet that you create. To use an IP set in a rule, you provide the ARN to the Rule statement IPSetReferenceStatement.

" }, "IPSetForwardedIPConfig":{ "type":"structure", @@ -2118,7 +2136,7 @@ }, "FallbackBehavior":{ "shape":"FallbackBehavior", - "documentation":"

The match status to assign to the web request if the request doesn't have a valid IP address in the specified position.

If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all.

You can specify the following fallback behaviors:

" + "documentation":"

The match status to assign to the web request if the request doesn't have a valid IP address in the specified position.

If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all.

You can specify the following fallback behaviors:

" }, "Position":{ "shape":"ForwardedIPPosition", @@ -2140,7 +2158,7 @@ "documentation":"

The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.

If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

A rule statement used to detect web requests coming from particular IP addresses or address ranges. To use this, create an IPSet that specifies the addresses you want to detect, then use the ARN of that set in this statement. To create an IP set, see CreateIPSet.

Each IP set rule statement references an IP set. You create and maintain the set independent of your rules. This allows you to use the single set in multiple rules. When you update the referenced set, AWS WAF automatically updates all rules that reference it.

" + "documentation":"

A rule statement used to detect web requests coming from particular IP addresses or address ranges. To use this, create an IPSet that specifies the addresses you want to detect, then use the ARN of that set in this statement. To create an IP set, see CreateIPSet.

Each IP set rule statement references an IP set. You create and maintain the set independent of your rules. This allows you to use the single set in multiple rules. When you update the referenced set, AWS WAF automatically updates all rules that reference it.

" }, "IPSetSummaries":{ "type":"list", @@ -2159,7 +2177,7 @@ }, "Description":{ "shape":"EntityDescription", - "documentation":"

A description of the IP set that helps with identification. You cannot change the description of an IP set after you create it.

" + "documentation":"

A description of the IP set that helps with identification.

" }, "LockToken":{ "shape":"LockToken", @@ -2170,9 +2188,64 @@ "documentation":"

The Amazon Resource Name (ARN) of the entity.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

High-level information about an IPSet, returned by operations like create and list. This provides information like the ID, that you can use to retrieve and manage an IPSet, and the ARN, that you provide to the IPSetReferenceStatement to use the address set in a Rule.

" + "documentation":"

High-level information about an IPSet, returned by operations like create and list. This provides information like the ID, that you can use to retrieve and manage an IPSet, and the ARN, that you provide to the IPSetReferenceStatement to use the address set in a Rule.

" }, "IPString":{"type":"string"}, + "JsonBody":{ + "type":"structure", + "required":[ + "MatchPattern", + "MatchScope" + ], + "members":{ + "MatchPattern":{ + "shape":"JsonMatchPattern", + "documentation":"

The patterns to look for in the JSON body. AWS WAF inspects the results of these pattern matches against the rule inspection criteria.

" + }, + "MatchScope":{ + "shape":"JsonMatchScope", + "documentation":"

The parts of the JSON to match against using the MatchPattern. If you specify All, AWS WAF matches against keys and values.

" + }, + "InvalidFallbackBehavior":{ + "shape":"BodyParsingFallbackBehavior", + "documentation":"

What AWS WAF should do if it fails to completely parse the JSON body. The options are the following:

If you don't provide this setting, AWS WAF parses and evaluates the content only up to the first parsing failure that it encounters.

AWS WAF does its best to parse the entire JSON body, but might be forced to stop for reasons such as invalid characters, duplicate keys, truncation, and any content whose root node isn't an object or an array.

AWS WAF parses the JSON in the following examples as two valid key, value pairs:

" + } + }, + "documentation":"

The body of a web request, inspected as JSON. The body immediately follows the request headers. This is used in the FieldToMatch specification.

Use the specifications in this object to indicate which parts of the JSON body to inspect using the rule's inspection criteria. AWS WAF inspects only the parts of the JSON that result from the matches that you indicate.

" + }, + "JsonMatchPattern":{ + "type":"structure", + "members":{ + "All":{ + "shape":"All", + "documentation":"

Match all of the elements. See also MatchScope in JsonBody.

You must specify either this setting or the IncludedPaths setting, but not both.

" + }, + "IncludedPaths":{ + "shape":"JsonPointerPaths", + "documentation":"

Match only the specified include paths. See also MatchScope in JsonBody.

Provide the include paths using JSON Pointer syntax. For example, \"IncludedPaths\": [\"/dogs/0/name\", \"/dogs/1/name\"]. For information about this syntax, see the Internet Engineering Task Force (IETF) documentation JavaScript Object Notation (JSON) Pointer.

You must specify either this setting or the All setting, but not both.

Don't use this option to include all paths. Instead, use the All setting.

" + } + }, + "documentation":"

The patterns to look for in the JSON body. AWS WAF inspects the results of these pattern matches against the rule inspection criteria. This is used with the FieldToMatch option JsonBody.

" + }, + "JsonMatchScope":{ + "type":"string", + "enum":[ + "ALL", + "KEY", + "VALUE" + ] + }, + "JsonPointerPath":{ + "type":"string", + "max":512, + "min":1, + "pattern":"([/])|([/](([^~])|(~[01]))+)" + }, + "JsonPointerPaths":{ + "type":"list", + "member":{"shape":"JsonPointerPath"}, + "min":1 + }, "ListAvailableManagedRuleGroupsRequest":{ "type":"structure", "required":["Scope"], @@ -2453,7 +2526,7 @@ "documentation":"

Indicates whether the logging configuration was created by AWS Firewall Manager, as part of an AWS WAF policy configuration. If true, only Firewall Manager can modify or delete the configuration.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Defines an association between Amazon Kinesis Data Firehose destinations and a web ACL resource, for logging from AWS WAF. As part of the association, you can specify parts of the standard logging fields to keep out of the logs.

" + "documentation":"

Defines an association between Amazon Kinesis Data Firehose destinations and a web ACL resource, for logging from AWS WAF. As part of the association, you can specify parts of the standard logging fields to keep out of the logs.

" }, "LoggingConfigurations":{ "type":"list", @@ -2479,7 +2552,7 @@ "documentation":"

The rules whose actions are set to COUNT by the web ACL, regardless of the action that is set on the rule. This effectively excludes the rule from acting on web requests.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

A rule statement used to run the rules that are defined in a managed rule group. To use this, provide the vendor name and the name of the rule group in this statement. You can retrieve the required names by calling ListAvailableManagedRuleGroups.

You can't nest a ManagedRuleGroupStatement, for example for use inside a NotStatement or OrStatement. It can only be referenced as a top-level statement within a rule.

" + "documentation":"

A rule statement used to run the rules that are defined in a managed rule group. To use this, provide the vendor name and the name of the rule group in this statement. You can retrieve the required names by calling ListAvailableManagedRuleGroups.

You can't nest a ManagedRuleGroupStatement, for example for use inside a NotStatement or OrStatement. It can only be referenced as a top-level statement within a rule.

" }, "ManagedRuleGroupSummaries":{ "type":"list", @@ -2501,13 +2574,13 @@ "documentation":"

The description of the managed rule group, provided by AWS Managed Rules or the AWS Marketplace seller who manages it.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

High-level information about a managed rule group, returned by ListAvailableManagedRuleGroups. This provides information like the name and vendor name, that you provide when you add a ManagedRuleGroupStatement to a web ACL. Managed rule groups include AWS Managed Rules rule groups, which are free of charge to AWS WAF customers, and AWS Marketplace managed rule groups, which you can subscribe to through AWS Marketplace.

" + "documentation":"

High-level information about a managed rule group, returned by ListAvailableManagedRuleGroups. This provides information like the name and vendor name, that you provide when you add a ManagedRuleGroupStatement to a web ACL. Managed rule groups include AWS Managed Rules rule groups, which are free of charge to AWS WAF customers, and AWS Marketplace managed rule groups, which you can subscribe to through AWS Marketplace.

" }, "Method":{ "type":"structure", "members":{ }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

The HTTP method of a web request. The method indicates the type of operation that the request is asking the origin to perform.

This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.

" + "documentation":"

The HTTP method of a web request. The method indicates the type of operation that the request is asking the origin to perform.

This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.

" }, "MetricName":{ "type":"string", @@ -2525,7 +2598,7 @@ "type":"structure", "members":{ }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Specifies that AWS WAF should do nothing. This is generally used to try out a rule without performing any actions. You set the OverrideAction on the Rule.

This is used only in the context of other settings, for example to specify values for RuleAction and web ACL DefaultAction.

" + "documentation":"

Specifies that AWS WAF should do nothing. This is generally used to try out a rule without performing any actions. You set the OverrideAction on the Rule.

This is used only in the context of other settings, for example to specify values for RuleAction and web ACL DefaultAction.

" }, "NotStatement":{ "type":"structure", @@ -2536,7 +2609,7 @@ "documentation":"

The statement to negate. You can use any statement that can be nested.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

A logical rule statement used to negate the results of another rule statement. You provide one Statement within the NotStatement.

" + "documentation":"

A logical rule statement used to negate the results of another rule statement. You provide one Statement within the NotStatement.

" }, "OrStatement":{ "type":"structure", @@ -2547,7 +2620,7 @@ "documentation":"

The statements to combine with OR logic. You can use any statements that can be nested.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

A logical rule statement used to combine other rule statements with OR logic. You provide more than one Statement within the OrStatement.

" + "documentation":"

A logical rule statement used to combine other rule statements with OR logic. You provide more than one Statement within the OrStatement.

" }, "OverrideAction":{ "type":"structure", @@ -2613,7 +2686,10 @@ "POSITION", "FORWARDED_IP_CONFIG", "IP_SET_FORWARDED_IP_CONFIG", - "HEADER_NAME" + "HEADER_NAME", + "JSON_MATCH_PATTERN", + "JSON_MATCH_SCOPE", + "BODY_PARSING_FALLBACK_BEHAVIOR" ] }, "ParameterExceptionParameter":{ @@ -2682,7 +2758,7 @@ "type":"structure", "members":{ }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

The query string of a web request. This is the part of a URL that appears after a ? character, if any.

This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.

" + "documentation":"

The query string of a web request. This is the part of a URL that appears after a ? character, if any.

This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.

" }, "RateBasedStatement":{ "type":"structure", @@ -2708,7 +2784,7 @@ "documentation":"

The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.

If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all.

This is required if AggregateKeyType is set to FORWARDED_IP.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

A rate-based rule tracks the rate of requests for each originating IP address, and triggers the rule action when the rate exceeds a limit that you specify on the number of requests in any 5-minute time span. You can use this to put a temporary block on requests from an IP address that is sending excessive requests.

When the rule action triggers, AWS WAF blocks additional requests from the IP address until the request rate falls below the limit.

You can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts requests that match the nested statement. For example, based on recent requests that you have seen from an attacker, you might create a rate-based rule with a nested AND rule statement that contains the following nested statements:

In this rate-based rule, you also define a rate limit. For this example, the rate limit is 1,000. Requests that meet both of the conditions in the statements are counted. If the count exceeds 1,000 requests per five minutes, the rule action triggers. Requests that do not meet both conditions are not counted towards the rate limit and are not affected by this rule.

You cannot nest a RateBasedStatement, for example for use inside a NotStatement or OrStatement. It can only be referenced as a top-level statement within a rule.

" + "documentation":"

A rate-based rule tracks the rate of requests for each originating IP address, and triggers the rule action when the rate exceeds a limit that you specify on the number of requests in any 5-minute time span. You can use this to put a temporary block on requests from an IP address that is sending excessive requests.

When the rule action triggers, AWS WAF blocks additional requests from the IP address until the request rate falls below the limit.

You can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts requests that match the nested statement. For example, based on recent requests that you have seen from an attacker, you might create a rate-based rule with a nested AND rule statement that contains the following nested statements:

In this rate-based rule, you also define a rate limit. For this example, the rate limit is 1,000. Requests that meet both of the conditions in the statements are counted. If the count exceeds 1,000 requests per five minutes, the rule action triggers. Requests that do not meet both conditions are not counted towards the rate limit and are not affected by this rule.

You cannot nest a RateBasedStatement, for example for use inside a NotStatement or OrStatement. It can only be referenced as a top-level statement within a rule.

" }, "RateBasedStatementAggregateKeyType":{ "type":"string", @@ -2726,7 +2802,7 @@ "documentation":"

The IP addresses that are currently blocked.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

The set of IP addresses that are currently blocked for a rate-based statement.

" + "documentation":"

The set of IP addresses that are currently blocked for a rate-based statement.

" }, "RateLimit":{ "type":"long", @@ -2746,7 +2822,7 @@ "documentation":"

The string representing the regular expression.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

A single regular expression. This is used in a RegexPatternSet.

" + "documentation":"

A single regular expression. This is used in a RegexPatternSet.

" }, "RegexPatternSet":{ "type":"structure", @@ -2765,14 +2841,14 @@ }, "Description":{ "shape":"EntityDescription", - "documentation":"

A description of the set that helps with identification. You cannot change the description of a set after you create it.

" + "documentation":"

A description of the set that helps with identification.

" }, "RegularExpressionList":{ "shape":"RegularExpressionList", "documentation":"

The regular expression patterns in the set.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Contains one or more regular expressions.

AWS WAF assigns an ARN to each RegexPatternSet that you create. To use a set in a rule, you provide the ARN to the Rule statement RegexPatternSetReferenceStatement.

" + "documentation":"

Contains one or more regular expressions.

AWS WAF assigns an ARN to each RegexPatternSet that you create. To use a set in a rule, you provide the ARN to the Rule statement RegexPatternSetReferenceStatement.

" }, "RegexPatternSetReferenceStatement":{ "type":"structure", @@ -2795,7 +2871,7 @@ "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch, starting from the lowest priority setting, before inspecting the content for a match.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

A rule statement used to search web request components for matches with regular expressions. To use this, create a RegexPatternSet that specifies the expressions that you want to detect, then use the ARN of that set in this statement. A web request matches the pattern set rule statement if the request component matches any of the patterns in the set. To create a regex pattern set, see CreateRegexPatternSet.

Each regex pattern set rule statement references a regex pattern set. You create and maintain the set independent of your rules. This allows you to use the single set in multiple rules. When you update the referenced set, AWS WAF automatically updates all rules that reference it.

" + "documentation":"

A rule statement used to search web request components for matches with regular expressions. To use this, create a RegexPatternSet that specifies the expressions that you want to detect, then use the ARN of that set in this statement. A web request matches the pattern set rule statement if the request component matches any of the patterns in the set. To create a regex pattern set, see CreateRegexPatternSet.

Each regex pattern set rule statement references a regex pattern set. You create and maintain the set independent of your rules. This allows you to use the single set in multiple rules. When you update the referenced set, AWS WAF automatically updates all rules that reference it.

" }, "RegexPatternSetSummaries":{ "type":"list", @@ -2814,7 +2890,7 @@ }, "Description":{ "shape":"EntityDescription", - "documentation":"

A description of the set that helps with identification. You cannot change the description of a set after you create it.

" + "documentation":"

A description of the set that helps with identification.

" }, "LockToken":{ "shape":"LockToken", @@ -2825,7 +2901,7 @@ "documentation":"

The Amazon Resource Name (ARN) of the entity.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

High-level information about a RegexPatternSet, returned by operations like create and list. This provides information like the ID, that you can use to retrieve and manage a RegexPatternSet, and the ARN, that you provide to the RegexPatternSetReferenceStatement to use the pattern set in a Rule.

" + "documentation":"

High-level information about a RegexPatternSet, returned by operations like create and list. This provides information like the ID, that you can use to retrieve and manage a RegexPatternSet, and the ARN, that you provide to the RegexPatternSetReferenceStatement to use the pattern set in a Rule.

" }, "RegexPatternString":{ "type":"string", @@ -2889,7 +2965,7 @@ "documentation":"

Defines and enables Amazon CloudWatch metrics and web request sample collection.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

A single rule, which you can use in a WebACL or RuleGroup to identify web requests that you want to allow, block, or count. Each rule includes one top-level Statement that AWS WAF uses to identify matching web requests, and parameters that govern how AWS WAF handles them.

" + "documentation":"

A single rule, which you can use in a WebACL or RuleGroup to identify web requests that you want to allow, block, or count. Each rule includes one top-level Statement that AWS WAF uses to identify matching web requests, and parameters that govern how AWS WAF handles them.

" }, "RuleAction":{ "type":"structure", @@ -2907,7 +2983,7 @@ "documentation":"

Instructs AWS WAF to count the web request and allow it.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

The action that AWS WAF should take on a web request when it matches a rule's statement. Settings at the web ACL level can override the rule action setting.

" + "documentation":"

The action that AWS WAF should take on a web request when it matches a rule's statement. Settings at the web ACL level can override the rule action setting.

" }, "RuleGroup":{ "type":"structure", @@ -2937,7 +3013,7 @@ }, "Description":{ "shape":"EntityDescription", - "documentation":"

A description of the rule group that helps with identification. You cannot change the description of a rule group after you create it.

" + "documentation":"

A description of the rule group that helps with identification.

" }, "Rules":{ "shape":"Rules", @@ -2948,7 +3024,7 @@ "documentation":"

Defines and enables Amazon CloudWatch metrics and web request sample collection.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

A rule group defines a collection of rules to inspect and control web requests that you can use in a WebACL. When you create a rule group, you define an immutable capacity limit. If you update a rule group, you must stay within the capacity. This allows others to reuse the rule group with confidence in its capacity requirements.

" + "documentation":"

A rule group defines a collection of rules to inspect and control web requests that you can use in a WebACL. When you create a rule group, you define an immutable capacity limit. If you update a rule group, you must stay within the capacity. This allows others to reuse the rule group with confidence in its capacity requirements.

" }, "RuleGroupReferenceStatement":{ "type":"structure", @@ -2963,7 +3039,7 @@ "documentation":"

The names of rules that are in the referenced rule group, but that you want AWS WAF to exclude from processing for this rule statement.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

A rule statement used to run the rules that are defined in a RuleGroup. To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.

You cannot nest a RuleGroupReferenceStatement, for example for use inside a NotStatement or OrStatement. It can only be referenced as a top-level statement within a rule.

" + "documentation":"

A rule statement used to run the rules that are defined in a RuleGroup. To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.

You cannot nest a RuleGroupReferenceStatement, for example for use inside a NotStatement or OrStatement. It can only be referenced as a top-level statement within a rule.

" }, "RuleGroupSummaries":{ "type":"list", @@ -2982,7 +3058,7 @@ }, "Description":{ "shape":"EntityDescription", - "documentation":"

A description of the rule group that helps with identification. You cannot change the description of a rule group after you create it.

" + "documentation":"

A description of the rule group that helps with identification.

" }, "LockToken":{ "shape":"LockToken", @@ -2993,7 +3069,7 @@ "documentation":"

The Amazon Resource Name (ARN) of the entity.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

High-level information about a RuleGroup, returned by operations like create and list. This provides information like the ID, that you can use to retrieve and manage a RuleGroup, and the ARN, that you provide to the RuleGroupReferenceStatement to use the rule group in a Rule.

" + "documentation":"

High-level information about a RuleGroup, returned by operations like create and list. This provides information like the ID, that you can use to retrieve and manage a RuleGroup, and the ARN, that you provide to the RuleGroupReferenceStatement to use the rule group in a Rule.

" }, "RulePriority":{ "type":"integer", @@ -3012,7 +3088,7 @@ }, "Action":{"shape":"RuleAction"} }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

High-level information about a Rule, returned by operations like DescribeManagedRuleGroup. This provides information like the ID, that you can use to retrieve and manage a RuleGroup, and the ARN, that you provide to the RuleGroupReferenceStatement to use the rule group in a Rule.

" + "documentation":"

High-level information about a Rule, returned by operations like DescribeManagedRuleGroup. This provides information like the ID, that you can use to retrieve and manage a RuleGroup, and the ARN, that you provide to the RuleGroupReferenceStatement to use the rule group in a Rule.

" }, "Rules":{ "type":"list", @@ -3050,7 +3126,7 @@ "documentation":"

The name of the Rule that the request matched. For managed rule groups, the format for this name is <vendor name>#<managed rule group name>#<rule name>. For your own rule groups, the format for this name is <rule group name>#<rule name>. If the rule is not in a rule group, this field is absent.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Represents a single sampled web request. The response from GetSampledRequests includes a SampledHTTPRequests complex type that appears as SampledRequests in the response syntax. SampledHTTPRequests contains an array of SampledHTTPRequest objects.

" + "documentation":"

Represents a single sampled web request. The response from GetSampledRequests includes a SampledHTTPRequests complex type that appears as SampledRequests in the response syntax. SampledHTTPRequests contains an array of SampledHTTPRequest objects.

" }, "SampledHTTPRequests":{ "type":"list", @@ -3073,7 +3149,7 @@ "documentation":"

The name of the query header to inspect.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

One of the headers in a web request, identified by name, for example, User-Agent or Referer. This setting isn't case sensitive.

This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.

" + "documentation":"

One of the headers in a web request, identified by name, for example, User-Agent or Referer. This setting isn't case sensitive.

This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.

" }, "SingleQueryArgument":{ "type":"structure", @@ -3084,7 +3160,7 @@ "documentation":"

The name of the query argument to inspect.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

One query argument in a web request, identified by name, for example UserName or SalesRegion. The name can be up to 30 characters long and isn't case sensitive.

" + "documentation":"

One query argument in a web request, identified by name, for example UserName or SalesRegion. The name can be up to 30 characters long and isn't case sensitive.

" }, "Size":{ "type":"long", @@ -3117,7 +3193,7 @@ "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch, starting from the lowest priority setting, before inspecting the content for a match.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.

If you configure AWS WAF to inspect the request body, AWS WAF inspects only the first 8192 bytes (8 KB). If the request body for your web requests never exceeds 8192 bytes, you can create a size constraint condition and block requests that have a request body greater than 8192 bytes.

If you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI /logo.jpg is nine characters long.

" + "documentation":"

A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.

If you configure AWS WAF to inspect the request body, AWS WAF inspects only the first 8192 bytes (8 KB). If the request body for your web requests never exceeds 8192 bytes, you can create a size constraint condition and block requests that have a request body greater than 8192 bytes.

If you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI /logo.jpg is nine characters long.

" }, "SqliMatchStatement":{ "type":"structure", @@ -3135,7 +3211,7 @@ "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch, starting from the lowest priority setting, before inspecting the content for a match.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Attackers sometimes insert malicious SQL code into web requests in an effort to extract data from your database. To allow or block web requests that appear to contain malicious SQL code, create one or more SQL injection match conditions. An SQL injection match condition identifies the part of web requests, such as the URI or the query string, that you want AWS WAF to inspect. Later in the process, when you create a web ACL, you specify whether to allow or block requests that appear to contain malicious SQL code.

" + "documentation":"

Attackers sometimes insert malicious SQL code into web requests in an effort to extract data from your database. To allow or block web requests that appear to contain malicious SQL code, create one or more SQL injection match conditions. An SQL injection match condition identifies the part of web requests, such as the URI or the query string, that you want AWS WAF to inspect. Later in the process, when you create a web ACL, you specify whether to allow or block requests that appear to contain malicious SQL code.

" }, "Statement":{ "type":"structure", @@ -3193,7 +3269,7 @@ "documentation":"

A rule statement used to run the rules that are defined in a managed rule group. To use this, provide the vendor name and the name of the rule group in this statement. You can retrieve the required names by calling ListAvailableManagedRuleGroups.

You can't nest a ManagedRuleGroupStatement, for example for use inside a NotStatement or OrStatement. It can only be referenced as a top-level statement within a rule.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

The processing guidance for a Rule, used by AWS WAF to determine whether a web request matches the rule.

" + "documentation":"

The processing guidance for a Rule, used by AWS WAF to determine whether a web request matches the rule.

" }, "Statements":{ "type":"list", @@ -3215,7 +3291,7 @@ "documentation":"

Part of the key:value pair that defines a tag. You can use a tag value to describe a specific value within a category, such as \"companyA\" or \"companyB.\" Tag values are case-sensitive.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

A tag associated with an AWS resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing or other management. Typically, the tag key represents a category, such as \"environment\", and the tag value represents a specific value within that category, such as \"test,\" \"development,\" or \"production\". Or you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.

You can tag the AWS resources that you manage through AWS WAF: web ACLs, rule groups, IP sets, and regex pattern sets. You can't manage or view tags through the AWS WAF console.

" + "documentation":"

A tag associated with an AWS resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing or other management. Typically, the tag key represents a category, such as \"environment\", and the tag value represents a specific value within that category, such as \"test,\" \"development,\" or \"production\". Or you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.

You can tag the AWS resources that you manage through AWS WAF: web ACLs, rule groups, IP sets, and regex pattern sets. You can't manage or view tags through the AWS WAF console.

" }, "TagInfoForResource":{ "type":"structure", @@ -3229,7 +3305,7 @@ "documentation":"

The array of Tag objects defined for the resource.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

The collection of tagging definitions for an AWS resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing or other management. Typically, the tag key represents a category, such as \"environment\", and the tag value represents a specific value within that category, such as \"test,\" \"development,\" or \"production\". Or you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.

You can tag the AWS resources that you manage through AWS WAF: web ACLs, rule groups, IP sets, and regex pattern sets. You can't manage or view tags through the AWS WAF console.

" + "documentation":"

The collection of tagging definitions for an AWS resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing or other management. Typically, the tag key represents a category, such as \"environment\", and the tag value represents a specific value within that category, such as \"test,\" \"development,\" or \"production\". Or you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.

You can tag the AWS resources that you manage through AWS WAF: web ACLs, rule groups, IP sets, and regex pattern sets. You can't manage or view tags through the AWS WAF console.

" }, "TagKey":{ "type":"string", @@ -3291,7 +3367,7 @@ "documentation":"

You can specify the following transformation types:

CMD_LINE

When you're concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

NONE

Specify NONE if you don't want any text transformations.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.

" + "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.

" }, "TextTransformationPriority":{ "type":"integer", @@ -3329,7 +3405,7 @@ "documentation":"

The end of the time range from which you want GetSampledRequests to return a sample of the requests that your AWS resource received. You must specify the times in Coordinated Universal Time (UTC) format. UTC format includes the special designator, Z. For example, \"2016-09-27T14:50Z\". You can specify any time range in the previous three hours.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

In a GetSampledRequests request, the StartTime and EndTime objects specify the time range for which you want AWS WAF to return a sample of web requests.

You must specify the times in Coordinated Universal Time (UTC) format. UTC format includes the special designator, Z. For example, \"2016-09-27T14:50Z\". You can specify any time range in the previous three hours.

In a GetSampledRequests response, the StartTime and EndTime objects specify the time range for which AWS WAF actually returned a sample of web requests. AWS WAF gets the specified number of requests from among the first 5,000 requests that your AWS resource receives during the specified time period. If your resource receives more than 5,000 requests during that period, AWS WAF stops sampling after the 5,000th request. In that case, EndTime is the time that AWS WAF received the 5,000th request.

" + "documentation":"

In a GetSampledRequests request, the StartTime and EndTime objects specify the time range for which you want AWS WAF to return a sample of web requests.

You must specify the times in Coordinated Universal Time (UTC) format. UTC format includes the special designator, Z. For example, \"2016-09-27T14:50Z\". You can specify any time range in the previous three hours.

In a GetSampledRequests response, the StartTime and EndTime objects specify the time range for which AWS WAF actually returned a sample of web requests. AWS WAF gets the specified number of requests from among the first 5,000 requests that your AWS resource receives during the specified time period. If your resource receives more than 5,000 requests during that period, AWS WAF stops sampling after the 5,000th request. In that case, EndTime is the time that AWS WAF received the 5,000th request.

" }, "Timestamp":{"type":"timestamp"}, "URIString":{"type":"string"}, @@ -3379,11 +3455,11 @@ }, "Description":{ "shape":"EntityDescription", - "documentation":"

A description of the IP set that helps with identification. You cannot change the description of an IP set after you create it.

" + "documentation":"

A description of the IP set that helps with identification.

" }, "Addresses":{ "shape":"IPAddresses", - "documentation":"

Contains an array of strings that specify one or more IP addresses or blocks of IP addresses in Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports all address ranges for IP versions IPv4 and IPv6.

Examples:

For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

" + "documentation":"

Contains an array of strings that specify one or more IP addresses or blocks of IP addresses in Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports all IPv4 and IPv6 CIDR ranges except for /0.

Examples:

For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

" }, "LockToken":{ "shape":"LockToken", @@ -3424,7 +3500,7 @@ }, "Description":{ "shape":"EntityDescription", - "documentation":"

A description of the set that helps with identification. You cannot change the description of a set after you create it.

" + "documentation":"

A description of the set that helps with identification.

" }, "RegularExpressionList":{ "shape":"RegularExpressionList", @@ -3469,7 +3545,7 @@ }, "Description":{ "shape":"EntityDescription", - "documentation":"

A description of the rule group that helps with identification. You cannot change the description of a rule group after you create it.

" + "documentation":"

A description of the rule group that helps with identification.

" }, "Rules":{ "shape":"Rules", @@ -3523,7 +3599,7 @@ }, "Description":{ "shape":"EntityDescription", - "documentation":"

A description of the Web ACL that helps with identification. You cannot change the description of a Web ACL after you create it.

" + "documentation":"

A description of the Web ACL that helps with identification.

" }, "Rules":{ "shape":"Rules", @@ -3552,7 +3628,7 @@ "type":"structure", "members":{ }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

The path component of the URI of a web request. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg.

This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.

" + "documentation":"

The path component of the URI of a web request. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg.

This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.

" }, "VendorName":{ "type":"string", @@ -3581,7 +3657,7 @@ "documentation":"

A name of the CloudWatch metric. The name can contain only the characters: A-Z, a-z, 0-9, - (hyphen), and _ (underscore). The name can be from one to 128 characters long. It can't contain whitespace or metric names reserved for AWS WAF, for example \"All\" and \"Default_Action.\"

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Defines and enables Amazon CloudWatch metrics and web request sample collection.

" + "documentation":"

Defines and enables Amazon CloudWatch metrics and web request sample collection.

" }, "WAFAssociatedItemException":{ "type":"structure", @@ -3736,7 +3812,7 @@ }, "Description":{ "shape":"EntityDescription", - "documentation":"

A description of the Web ACL that helps with identification. You cannot change the description of a Web ACL after you create it.

" + "documentation":"

A description of the Web ACL that helps with identification.

" }, "Rules":{ "shape":"Rules", @@ -3763,7 +3839,7 @@ "documentation":"

Indicates whether this web ACL is managed by AWS Firewall Manager. If true, then only AWS Firewall Manager can delete the web ACL or any Firewall Manager rule groups in the web ACL.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

A Web ACL defines a collection of rules to use to inspect and control web requests. Each rule has an action defined (allow, block, or count) for requests that match the statement of the rule. In the Web ACL, you assign a default action to take (allow, block) for any request that does not match any of the rules. The rules in a Web ACL can be a combination of the types Rule, RuleGroup, and managed rule group. You can associate a Web ACL with one or more AWS resources to protect. The resources can be Amazon CloudFront, an Amazon API Gateway REST API, an Application Load Balancer, or an AWS AppSync GraphQL API.

" + "documentation":"

A Web ACL defines a collection of rules to use to inspect and control web requests. Each rule has an action defined (allow, block, or count) for requests that match the statement of the rule. In the Web ACL, you assign a default action to take (allow, block) for any request that does not match any of the rules. The rules in a Web ACL can be a combination of the types Rule, RuleGroup, and managed rule group. You can associate a Web ACL with one or more AWS resources to protect. The resources can be Amazon CloudFront, an Amazon API Gateway REST API, an Application Load Balancer, or an AWS AppSync GraphQL API.

" }, "WebACLSummaries":{ "type":"list", @@ -3782,7 +3858,7 @@ }, "Description":{ "shape":"EntityDescription", - "documentation":"

A description of the Web ACL that helps with identification. You cannot change the description of a Web ACL after you create it.

" + "documentation":"

A description of the Web ACL that helps with identification.

" }, "LockToken":{ "shape":"LockToken", @@ -3793,7 +3869,7 @@ "documentation":"

The Amazon Resource Name (ARN) of the entity.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

High-level information about a WebACL, returned by operations like create and list. This provides information like the ID, that you can use to retrieve and manage a WebACL, and the ARN, that you provide to operations like AssociateWebACL.

" + "documentation":"

High-level information about a WebACL, returned by operations like create and list. This provides information like the ID, that you can use to retrieve and manage a WebACL, and the ARN, that you provide to operations like AssociateWebACL.

" }, "XssMatchStatement":{ "type":"structure", @@ -3811,7 +3887,7 @@ "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch, starting from the lowest priority setting, before inspecting the content for a match.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

A rule statement that defines a cross-site scripting (XSS) match search for AWS WAF to apply to web requests. XSS attacks are those where the attacker uses vulnerabilities in a benign website as a vehicle to inject malicious client-site scripts into other legitimate web browsers. The XSS match statement provides the location in requests that you want AWS WAF to search and text transformations to use on the search area before AWS WAF searches for character sequences that are likely to be malicious strings.

" + "documentation":"

A rule statement that defines a cross-site scripting (XSS) match search for AWS WAF to apply to web requests. XSS attacks are those where the attacker uses vulnerabilities in a benign website as a vehicle to inject malicious client-site scripts into other legitimate web browsers. The XSS match statement provides the location in requests that you want AWS WAF to search and text transformations to use on the search area before AWS WAF searches for character sequences that are likely to be malicious strings.

" } }, "documentation":"

This is the latest version of the AWS WAF API, released in November, 2019. The names of the entities that you use to access this API, like endpoints and namespaces, all have the versioning information added, like \"V2\" or \"v2\", to distinguish from the prior version. We recommend migrating your resources to this version, because it has a number of significant improvements.

If you used AWS WAF prior to this release, you can't use this AWS WAFV2 API to access any AWS WAF resources that you created before. You can access your old rules, web ACLs, and other AWS WAF resources only through the AWS WAF Classic APIs. The AWS WAF Classic APIs have retained the prior names, endpoints, and namespaces.

For information, including how to migrate your AWS WAF resources to this version, see the AWS WAF Developer Guide.

AWS WAF is a web application firewall that lets you monitor the HTTP and HTTPS requests that are forwarded to Amazon CloudFront, an Amazon API Gateway REST API, an Application Load Balancer, or an AWS AppSync GraphQL API. AWS WAF also lets you control access to your content. Based on conditions that you specify, such as the IP addresses that requests originate from or the values of query strings, the API Gateway REST API, CloudFront distribution, the Application Load Balancer, or the AWS AppSync GraphQL API responds to requests either with the requested content or with an HTTP 403 status code (Forbidden). You also can configure CloudFront to return a custom error page when a request is blocked.

This API guide is for developers who need detailed information about AWS WAF API actions, data types, and errors. For detailed information about AWS WAF features and an overview of how to use AWS WAF, see the AWS WAF Developer Guide.

You can make calls using the endpoints listed in AWS Service Endpoints for AWS WAF.

Alternatively, you can use one of the AWS SDKs to access an API that's tailored to the programming language or platform that you're using. For more information, see AWS SDKs.

We currently provide two versions of the AWS WAF API: this API and the prior versions, the classic AWS WAF APIs. This new API provides the same functionality as the older versions, with the following major improvements:

" diff --git a/botocore/data/wellarchitected/2020-03-31/service-2.json b/botocore/data/wellarchitected/2020-03-31/service-2.json index d24432ea..85712a93 100644 --- a/botocore/data/wellarchitected/2020-03-31/service-2.json +++ b/botocore/data/wellarchitected/2020-03-31/service-2.json @@ -355,6 +355,20 @@ ], "documentation":"

List the workload invitations.

" }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{WorkloadArn}" + }, + "input":{"shape":"ListTagsForResourceInput"}, + "output":{"shape":"ListTagsForResourceOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

List the tags for a resource.

" + }, "ListWorkloadShares":{ "name":"ListWorkloadShares", "http":{ @@ -388,6 +402,34 @@ ], "documentation":"

List workloads. Paginated.

" }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{WorkloadArn}" + }, + "input":{"shape":"TagResourceInput"}, + "output":{"shape":"TagResourceOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Adds one or more tags to the specified resource.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{WorkloadArn}" + }, + "input":{"shape":"UntagResourceInput"}, + "output":{"shape":"UntagResourceOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Deletes specified tags from a resource.

" + }, "UpdateAnswer":{ "name":"UpdateAnswer", "http":{ @@ -404,7 +446,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Update the answer.

" + "documentation":"

Update the answer to a specific question in a workload review.

" }, "UpdateLensReview":{ "name":"UpdateLensReview", @@ -685,6 +727,10 @@ "ClientRequestToken":{ "shape":"ClientRequestToken", "idempotencyToken":true + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

The tags to be associated with the workload.

" } }, "documentation":"

Input for workload creation.

" @@ -1433,6 +1479,26 @@ }, "documentation":"

Input for List Share Invitations

" }, + "ListTagsForResourceInput":{ + "type":"structure", + "required":["WorkloadArn"], + "members":{ + "WorkloadArn":{ + "shape":"WorkloadArn", + "location":"uri", + "locationName":"WorkloadArn" + } + } + }, + "ListTagsForResourceOutput":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagMap", + "documentation":"

The tags for the resource.

" + } + } + }, "ListWorkloadSharesInput":{ "type":"structure", "required":["WorkloadId"], @@ -1804,6 +1870,52 @@ "type":"string", "max":100 }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":1 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50, + "min":1 + }, + "TagResourceInput":{ + "type":"structure", + "required":[ + "WorkloadArn", + "Tags" + ], + "members":{ + "WorkloadArn":{ + "shape":"WorkloadArn", + "location":"uri", + "locationName":"WorkloadArn" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

The tags for the resource.

" + } + } + }, + "TagResourceOutput":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, "ThrottlingException":{ "type":"structure", "required":["Message"], @@ -1820,6 +1932,31 @@ "type":"timestamp", "documentation":"

The date and time recorded.

" }, + "UntagResourceInput":{ + "type":"structure", + "required":[ + "WorkloadArn", + "TagKeys" + ], + "members":{ + "WorkloadArn":{ + "shape":"WorkloadArn", + "location":"uri", + "locationName":"WorkloadArn" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

The keys of the tags to be removed.

", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceOutput":{ + "type":"structure", + "members":{ + } + }, "UpdateAnswerInput":{ "type":"structure", "required":[ @@ -2085,6 +2222,10 @@ "ShareInvitationId":{ "shape":"ShareInvitationId", "documentation":"

The ID assigned to the share invitation.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

The tags associated with the workload.

" } }, "documentation":"

A workload return object.

" diff --git a/botocore/data/workmail/2017-10-01/service-2.json b/botocore/data/workmail/2017-10-01/service-2.json index affbfdcb..2b2ce033 100644 --- a/botocore/data/workmail/2017-10-01/service-2.json +++ b/botocore/data/workmail/2017-10-01/service-2.json @@ -2155,7 +2155,7 @@ "IpRangeList":{ "type":"list", "member":{"shape":"IpRange"}, - "max":10, + "max":1024, "min":0 }, "Jobs":{ diff --git a/botocore/data/workmailmessageflow/2019-05-01/service-2.json b/botocore/data/workmailmessageflow/2019-05-01/service-2.json index 6d4d4fda..aa8aef07 100644 --- a/botocore/data/workmailmessageflow/2019-05-01/service-2.json +++ b/botocore/data/workmailmessageflow/2019-05-01/service-2.json @@ -22,7 +22,23 @@ "errors":[ {"shape":"ResourceNotFoundException"} ], - "documentation":"

Retrieves the raw content of an in-transit email message, in MIME format.

" + "documentation":"

Retrieves the raw content of an in-transit email message, in MIME format.

" + }, + "PutRawMessageContent":{ + "name":"PutRawMessageContent", + "http":{ + "method":"POST", + "requestUri":"/messages/{messageId}" + }, + "input":{"shape":"PutRawMessageContentRequest"}, + "output":{"shape":"PutRawMessageContentResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidContentLocation"}, + {"shape":"MessageRejected"}, + {"shape":"MessageFrozen"} + ], + "documentation":"

Updates the raw content of an in-transit email message, in MIME format.

This example describes how to update in-transit email message. For more information and examples for using this API, see Updating message content with AWS Lambda.

Updates to an in-transit message only appear when you call PutRawMessageContent from an AWS Lambda function configured with a synchronous Run Lambda rule. If you call PutRawMessageContent on a delivered or sent message, the message remains unchanged, even though GetRawMessageContent returns an updated message.

" } }, "shapes":{ @@ -49,6 +65,65 @@ }, "payload":"messageContent" }, + "InvalidContentLocation":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "documentation":"

WorkMail could not access the updated email content. Possible reasons:

", + "exception":true + }, + "MessageFrozen":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "documentation":"

The requested email is not eligible for update. This is usually the case for a redirected email.

", + "exception":true + }, + "MessageRejected":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "documentation":"

The requested email could not be updated due to an error in the MIME content. Check the error message for more information about what caused the error.

", + "exception":true + }, + "PutRawMessageContentRequest":{ + "type":"structure", + "required":[ + "messageId", + "content" + ], + "members":{ + "messageId":{ + "shape":"messageIdType", + "documentation":"

The identifier of the email message being updated.

", + "location":"uri", + "locationName":"messageId" + }, + "content":{ + "shape":"RawMessageContent", + "documentation":"

Describes the raw message content of the updated email message.

" + } + } + }, + "PutRawMessageContentResponse":{ + "type":"structure", + "members":{ + } + }, + "RawMessageContent":{ + "type":"structure", + "required":["s3Reference"], + "members":{ + "s3Reference":{ + "shape":"S3Reference", + "documentation":"

The S3 reference of an email message.

" + } + }, + "documentation":"

Provides the MIME content of the updated email message as an S3 object. All MIME content must meet the following criteria:

" + }, "ResourceNotFoundException":{ "type":"structure", "members":{ @@ -58,6 +133,28 @@ "error":{"httpStatusCode":404}, "exception":true }, + "S3Reference":{ + "type":"structure", + "required":[ + "bucket", + "key" + ], + "members":{ + "bucket":{ + "shape":"s3BucketIdType", + "documentation":"

The S3 bucket name.

" + }, + "key":{ + "shape":"s3KeyIdType", + "documentation":"

The S3 key object name.

" + }, + "objectVersion":{ + "shape":"s3VersionType", + "documentation":"

If you enable versioning for the bucket, you can specify the object version.

" + } + }, + "documentation":"

Amazon S3 object representing the updated message content, in MIME format.

The region for the S3 bucket containing the S3 object must match the region used for WorkMail operations. Also, for WorkMail to process an S3 object, it must have permission to access that object. For more information, see Updating message content with AWS Lambda.

" + }, "errorMessage":{"type":"string"}, "messageContentBlob":{ "type":"blob", @@ -68,6 +165,24 @@ "max":120, "min":1, "pattern":"[a-z0-9\\-]*" + }, + "s3BucketIdType":{ + "type":"string", + "max":63, + "min":3, + "pattern":"^[a-z0-9][a-z0-9\\-]*" + }, + "s3KeyIdType":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"[a-zA-Z0-9\\-/]*" + }, + "s3VersionType":{ + "type":"string", + "max":1024, + "min":1, + "pattern":".+" } }, "documentation":"

The WorkMail Message Flow API provides access to email messages as they are being sent and received by a WorkMail organization.

" diff --git a/botocore/data/workspaces/2015-04-08/service-2.json b/botocore/data/workspaces/2015-04-08/service-2.json index 5577a1e8..01c39ef0 100644 --- a/botocore/data/workspaces/2015-04-08/service-2.json +++ b/botocore/data/workspaces/2015-04-08/service-2.json @@ -134,6 +134,24 @@ ], "documentation":"

Creates the specified tags for the specified WorkSpaces resource.

" }, + "CreateWorkspaceBundle":{ + "name":"CreateWorkspaceBundle", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateWorkspaceBundleRequest"}, + "output":{"shape":"CreateWorkspaceBundleResult"}, + "errors":[ + {"shape":"ResourceUnavailableException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"InvalidParameterValuesException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Creates the specified WorkSpace bundle. For more information about creating WorkSpace bundles, see Create a Custom WorkSpaces Image and Bundle.

" + }, "CreateWorkspaces":{ "name":"CreateWorkspaces", "http":{ @@ -196,6 +214,22 @@ ], "documentation":"

Deletes the specified tags from the specified WorkSpaces resource.

" }, + "DeleteWorkspaceBundle":{ + "name":"DeleteWorkspaceBundle", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteWorkspaceBundleRequest"}, + "output":{"shape":"DeleteWorkspaceBundleResult"}, + "errors":[ + {"shape":"InvalidParameterValuesException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceAssociatedException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Deletes the specified WorkSpace bundle. For more information about deleting WorkSpace bundles, see Delete a Custom WorkSpaces Bundle or Image.

" + }, "DeleteWorkspaceImage":{ "name":"DeleteWorkspaceImage", "http":{ @@ -754,6 +788,22 @@ ], "documentation":"

Replaces the current rules of the specified IP access control group with the specified rules.

" }, + "UpdateWorkspaceBundle":{ + "name":"UpdateWorkspaceBundle", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateWorkspaceBundleRequest"}, + "output":{"shape":"UpdateWorkspaceBundleResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValuesException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceUnavailableException"} + ], + "documentation":"

Updates a WorkSpace bundle with a new image. For more information about updating WorkSpace bundles, see Update a Custom WorkSpaces Bundle.

Existing WorkSpaces aren't automatically updated when you update the bundle that they're based on. To update existing WorkSpaces that are based on a bundle that you've updated, you must either rebuild the WorkSpaces or delete and recreate them.

" + }, "UpdateWorkspaceImagePermission":{ "name":"UpdateWorkspaceImagePermission", "http":{ @@ -988,7 +1038,7 @@ "documentation":"

The compute type.

" } }, - "documentation":"

Describes the compute type.

" + "documentation":"

Describes the compute type of the bundle.

" }, "ComputerName":{"type":"string"}, "ConnectionAlias":{ @@ -1230,6 +1280,43 @@ "members":{ } }, + "CreateWorkspaceBundleRequest":{ + "type":"structure", + "required":[ + "BundleName", + "BundleDescription", + "ImageId", + "ComputeType", + "UserStorage" + ], + "members":{ + "BundleName":{ + "shape":"WorkspaceBundleName", + "documentation":"

The name of the bundle.

" + }, + "BundleDescription":{ + "shape":"WorkspaceBundleDescription", + "documentation":"

The description of the bundle.

" + }, + "ImageId":{ + "shape":"WorkspaceImageId", + "documentation":"

The identifier of the image that is used to create the bundle.

" + }, + "ComputeType":{"shape":"ComputeType"}, + "UserStorage":{"shape":"UserStorage"}, + "RootStorage":{"shape":"RootStorage"}, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags associated with the bundle.

To add tags at the same time that you're creating the bundle, you must create an IAM policy that grants your IAM user permissions to use workspaces:CreateTags.

" + } + } + }, + "CreateWorkspaceBundleResult":{ + "type":"structure", + "members":{ + "WorkspaceBundle":{"shape":"WorkspaceBundle"} + } + }, "CreateWorkspacesRequest":{ "type":"structure", "required":["Workspaces"], @@ -1363,6 +1450,20 @@ "members":{ } }, + "DeleteWorkspaceBundleRequest":{ + "type":"structure", + "members":{ + "BundleId":{ + "shape":"BundleId", + "documentation":"

The identifier of the bundle.

" + } + } + }, + "DeleteWorkspaceBundleResult":{ + "type":"structure", + "members":{ + } + }, "DeleteWorkspaceImageRequest":{ "type":"structure", "required":["ImageId"], @@ -1411,7 +1512,7 @@ }, "NextToken":{ "shape":"PaginationToken", - "documentation":"

The token to use to retrieve the next set of results, or null if no more results are available.

" + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

" } } }, @@ -1483,7 +1584,7 @@ }, "NextToken":{ "shape":"PaginationToken", - "documentation":"

The token to use to retrieve the next set of results, or null if no more results are available.

" + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

" } } }, @@ -1517,7 +1618,7 @@ }, "NextToken":{ "shape":"PaginationToken", - "documentation":"

The token to use to retrieve the next set of results, or null if no more results are available.

" + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

" } } }, @@ -1547,7 +1648,7 @@ }, "NextToken":{ "shape":"PaginationToken", - "documentation":"

The token to use to retrieve the next set of results, or null if no more results are available.

" + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

" } } }, @@ -1579,7 +1680,7 @@ }, "Owner":{ "shape":"BundleOwner", - "documentation":"

The owner of the bundles. You cannot combine this parameter with any other filter.

Specify AMAZON to describe the bundles provided by AWS or null to describe the bundles that belong to your account.

" + "documentation":"

The owner of the bundles. You cannot combine this parameter with any other filter.

To describe the bundles provided by AWS, specify AMAZON. To describe the bundles that belong to your account, don't specify a value.

" }, "NextToken":{ "shape":"PaginationToken", @@ -1596,7 +1697,7 @@ }, "NextToken":{ "shape":"PaginationToken", - "documentation":"

The token to use to retrieve the next set of results, or null if there are no more results available. This token is valid for one day and must be used within that time frame.

" + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return. This token is valid for one day and must be used within that time frame.

" } } }, @@ -1626,7 +1727,7 @@ }, "NextToken":{ "shape":"PaginationToken", - "documentation":"

The token to use to retrieve the next set of results, or null if no more results are available.

" + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

" } } }, @@ -1661,7 +1762,7 @@ }, "NextToken":{ "shape":"PaginationToken", - "documentation":"

The token to use to retrieve the next set of results, or null if no more results are available.

" + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

" } } }, @@ -1695,7 +1796,7 @@ }, "NextToken":{ "shape":"PaginationToken", - "documentation":"

The token to use to retrieve the next set of results, or null if no more results are available.

" + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

" } } }, @@ -1744,7 +1845,7 @@ }, "NextToken":{ "shape":"PaginationToken", - "documentation":"

The token to use to retrieve the next set of results, or null if no more results are available.

" + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

" } } }, @@ -1786,7 +1887,7 @@ }, "NextToken":{ "shape":"PaginationToken", - "documentation":"

The token to use to retrieve the next set of results, or null if no more results are available.

" + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

" } } }, @@ -2066,7 +2167,7 @@ }, "NextToken":{ "shape":"PaginationToken", - "documentation":"

The token to use to retrieve the next set of results, or null if no more results are available.

" + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

" } } }, @@ -2859,6 +2960,24 @@ "members":{ } }, + "UpdateWorkspaceBundleRequest":{ + "type":"structure", + "members":{ + "BundleId":{ + "shape":"BundleId", + "documentation":"

The identifier of the bundle.

" + }, + "ImageId":{ + "shape":"WorkspaceImageId", + "documentation":"

The identifier of the image.

" + } + } + }, + "UpdateWorkspaceBundleResult":{ + "type":"structure", + "members":{ + } + }, "UpdateWorkspaceImagePermissionRequest":{ "type":"structure", "required":[ @@ -2896,10 +3015,10 @@ "members":{ "Capacity":{ "shape":"NonEmptyString", - "documentation":"

The size of the user storage.

" + "documentation":"

The size of the user volume.

" } }, - "documentation":"

Describes the user storage for a WorkSpace bundle.

" + "documentation":"

Describes the user volume for a WorkSpace bundle.

" }, "UserVolumeSizeGib":{"type":"integer"}, "VolumeEncryptionKey":{"type":"string"}, @@ -3008,7 +3127,7 @@ "members":{ "BundleId":{ "shape":"BundleId", - "documentation":"

The bundle identifier.

" + "documentation":"

The identifier of the bundle.

" }, "Name":{ "shape":"NonEmptyString", @@ -3020,11 +3139,11 @@ }, "Description":{ "shape":"Description", - "documentation":"

A description.

" + "documentation":"

The description of the bundle.

" }, "ImageId":{ "shape":"WorkspaceImageId", - "documentation":"

The image identifier of the bundle.

" + "documentation":"

The identifier of the image that was used to create the bundle.

" }, "RootStorage":{ "shape":"RootStorage", @@ -3032,19 +3151,35 @@ }, "UserStorage":{ "shape":"UserStorage", - "documentation":"

The size of the user storage.

" + "documentation":"

The size of the user volume.

" }, "ComputeType":{ "shape":"ComputeType", - "documentation":"

The compute type. For more information, see Amazon WorkSpaces Bundles.

" + "documentation":"

The compute type of the bundle. For more information, see Amazon WorkSpaces Bundles.

" }, "LastUpdatedTime":{ "shape":"Timestamp", "documentation":"

The last time that the bundle was updated.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The time when the bundle was created.

" } }, "documentation":"

Describes a WorkSpace bundle.

" }, + "WorkspaceBundleDescription":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[a-zA-Z0-9_./() -]+$" + }, + "WorkspaceBundleName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-zA-Z0-9_./()\\\\-]+$" + }, "WorkspaceConnectionStatus":{ "type":"structure", "members":{ diff --git a/botocore/exceptions.py b/botocore/exceptions.py index 21091cea..76330491 100644 --- a/botocore/exceptions.py +++ b/botocore/exceptions.py @@ -500,12 +500,20 @@ class UnsupportedOutpostResourceError(BotoCoreError): ) +class UnsupportedS3ConfigurationError(BotoCoreError): + """Error when an unsupported configuration is used with access-points""" + fmt = ( + 'Unsupported configuration when using S3: {msg}' + ) + + class UnsupportedS3AccesspointConfigurationError(BotoCoreError): """Error when an unsupported configuration is used with access-points""" fmt = ( 'Unsupported configuration when using S3 access-points: {msg}' ) + class InvalidEndpointDiscoveryConfigurationError(BotoCoreError): """Error when invalid value supplied for endpoint_discovery_enabled""" fmt = ( @@ -513,6 +521,7 @@ class InvalidEndpointDiscoveryConfigurationError(BotoCoreError): 'Expected one of ("true", "false", "auto") but got {config_value}.' ) + class UnsupportedS3ControlConfigurationError(BotoCoreError): """Error when an unsupported configuration is used with S3 Control""" fmt = ( diff --git a/botocore/handlers.py b/botocore/handlers.py index 64cfec9f..59a45366 100644 --- a/botocore/handlers.py +++ b/botocore/handlers.py @@ -61,7 +61,7 @@ REGISTER_LAST = object() # (.), hyphens (-), and underscores (_). VALID_BUCKET = re.compile(r'^[a-zA-Z0-9.\-_]{1,255}$') _ACCESSPOINT_ARN = ( - r'^arn:(aws).*:s3:[a-z\-0-9]+:[0-9]{12}:accesspoint[/:]' + r'^arn:(aws).*:(s3|s3-object-lambda):[a-z\-0-9]+:[0-9]{12}:accesspoint[/:]' r'[a-zA-Z0-9\-]{1,63}$' ) _OUTPOST_ARN = ( @@ -80,6 +80,32 @@ def handle_service_name_alias(service_name, **kwargs): return SERVICE_NAME_ALIASES.get(service_name, service_name) +def encode_delete_objects_keys(params, **kwargs): + # Replace \r and \n with the escaped sequence over the whole XML document + # to avoid linebreak normalization modifying customer input when the + # document is parsed. Ideally, we would do this in ElementTree.tostring, + # but it doesn't allow us to override entity escaping for text fields. For + # this operation \r and \n can only appear in the XML document if they were + # passed as part of the customer input. + body = params['body'] + replaced = False + if b'\r' in body: + replaced = True + body = body.replace(b'\r', b' ') + if b'\n' in body: + replaced = True + body = body.replace(b'\n', b' ') + + if not replaced: + return + + params['body'] = body + if 'Content-MD5' in params['headers']: + # The Content-MD5 is now wrong, so we'll need to recalculate it + del params['headers']['Content-MD5'] + conditionally_calculate_md5(params, **kwargs) + + def check_for_200_error(response, **kwargs): # From: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html # There are two opportunities for a copy request to return an error. One @@ -957,6 +983,7 @@ BUILTIN_HANDLERS = [ ('before-call.apigateway', add_accept_header), ('before-call.s3.PutObject', conditionally_calculate_md5), ('before-call.s3.UploadPart', conditionally_calculate_md5), + ('before-call.s3.DeleteObjects', encode_delete_objects_keys), ('before-call.glacier.UploadArchive', add_glacier_checksums), ('before-call.glacier.UploadMultipartPart', add_glacier_checksums), ('before-call.ec2.CopySnapshot', inject_presigned_url_ec2), diff --git a/botocore/httpsession.py b/botocore/httpsession.py index 184b9a87..d9dd1cca 100644 --- a/botocore/httpsession.py +++ b/botocore/httpsession.py @@ -45,7 +45,10 @@ def get_cert_path(verify): if verify is not True: return verify - return where() + cert_path = where() + logger.debug("Certificate path: {0}".format(cert_path)) + + return cert_path def create_urllib3_context(ssl_version=None, cert_reqs=None, @@ -255,6 +258,9 @@ class URLLib3Session(object): context = self._get_ssl_context() try: + # urllib3 disables this by default but we need + # it for proper proxy tls negotiation. + context.check_hostname = True if proxy_ca_bundle is not None: context.load_verify_locations(cafile=proxy_ca_bundle) diff --git a/botocore/utils.py b/botocore/utils.py index 468b4ae6..37897224 100644 --- a/botocore/utils.py +++ b/botocore/utils.py @@ -43,7 +43,7 @@ from botocore.exceptions import ( UnsupportedS3AccesspointConfigurationError, SSOTokenLoadError, InvalidRegionError, InvalidIMDSEndpointError, UnsupportedOutpostResourceError, UnsupportedS3ControlConfigurationError, UnsupportedS3ControlArnError, - InvalidHostLabelError, HTTPClientError + InvalidHostLabelError, HTTPClientError, UnsupportedS3ConfigurationError, ) from urllib3.exceptions import LocationParseError @@ -939,7 +939,7 @@ class ArgumentGenerator(object): return '' elif shape.type_name in ['integer', 'long']: return 0 - elif shape.type_name == 'float': + elif shape.type_name in ['float', 'double']: return 0.0 elif shape.type_name == 'boolean': return True @@ -1524,6 +1524,33 @@ class S3EndpointSetter(object): def register(self, event_emitter): event_emitter.register('before-sign.s3', self.set_endpoint) + event_emitter.register( + 'before-call.s3.WriteGetObjectResponse', + self.update_endpoint_to_s3_object_lambda + ) + + def update_endpoint_to_s3_object_lambda(self, params, context, **kwargs): + if self._use_accelerate_endpoint: + raise UnsupportedS3ConfigurationError( + msg='S3 client does not support accelerate endpoints for S3 Object Lambda operations', + ) + + self._override_signing_name(context, 's3-object-lambda') + if self._endpoint_url: + # Only update the url if an explicit url was not provided + return + + resolver = self._endpoint_resolver + resolved = resolver.construct_endpoint('s3-object-lambda', self._region) + + # Ideally we would be able to replace the endpoint before + # serialization but there's no event to do that currently + new_endpoint = 'https://{host_prefix}{hostname}'.format( + host_prefix=params['host_prefix'], + hostname=resolved['hostname'], + ) + + params['url'] = _get_new_endpoint(params['url'], new_endpoint, False) def set_endpoint(self, request, **kwargs): if self._use_accesspoint_endpoint(request): @@ -1550,14 +1577,22 @@ class S3EndpointSetter(object): 'when an access-point ARN is specified.' ) ) - request_partion = request.context['s3_accesspoint']['partition'] - if request_partion != self._partition: + request_partition = request.context['s3_accesspoint']['partition'] + if request_partition != self._partition: raise UnsupportedS3AccesspointConfigurationError( msg=( 'Client is configured for "%s" partition, but access-point' ' ARN provided is for "%s" partition. The client and ' ' access-point partition must be the same.' % ( - self._partition, request_partion) + self._partition, request_partition) + ) + ) + s3_service = request.context['s3_accesspoint'].get('service') + if s3_service == 's3-object-lambda' and self._s3_config.get('use_dualstack_endpoint'): + raise UnsupportedS3AccesspointConfigurationError( + msg=( + 'Client does not support s3 dualstack configuration ' + 'when an S3 Object Lambda access point ARN is specified.' ) ) outpost_name = request.context['s3_accesspoint'].get('outpost_name') @@ -1581,7 +1616,7 @@ class S3EndpointSetter(object): def _resolve_signing_name_for_accesspoint_endpoint(self, request): accesspoint_service = request.context['s3_accesspoint']['service'] - self._override_signing_name(request, accesspoint_service) + self._override_signing_name(request.context, accesspoint_service) def _switch_to_accesspoint_endpoint(self, request, region_name): original_components = urlsplit(request.url) @@ -1612,6 +1647,8 @@ class S3EndpointSetter(object): if outpost_name: outpost_host = [outpost_name, 's3-outposts'] accesspoint_netloc_components.extend(outpost_host) + elif s3_accesspoint['service'] == 's3-object-lambda': + accesspoint_netloc_components.append('s3-object-lambda') else: accesspoint_netloc_components.append('s3-accesspoint') if self._s3_config.get('use_dualstack_endpoint'): @@ -1650,14 +1687,14 @@ class S3EndpointSetter(object): signing_context['region'] = region_name request.context['signing'] = signing_context - def _override_signing_name(self, request, signing_name): - signing_context = request.context.get('signing', {}) + def _override_signing_name(self, context, signing_name): + signing_context = context.get('signing', {}) # S3SigV4Auth will use the context['signing']['signing_name'] value to # sign with if present. This is used by the Bucket redirector # as well but we should be fine because the redirector is never # used in combination with the accesspoint setting logic. signing_context['signing_name'] = signing_name - request.context['signing'] = signing_context + context['signing'] = signing_context @CachedProperty def _use_accelerate_endpoint(self): @@ -1729,6 +1766,7 @@ class S3EndpointSetter(object): return fix_s3_host + class S3ControlEndpointSetter(object): _DEFAULT_PARTITION = 'aws' _DEFAULT_DNS_SUFFIX = 'amazonaws.com' diff --git a/docs/source/conf.py b/docs/source/conf.py index b934a97d..7cf74518 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -52,9 +52,9 @@ copyright = u'2013, Mitch Garnaat' # built documents. # # The short X.Y version. -version = '1.20' +version = '1.20.' # The full version, including alpha/beta/rc tags. -release = '1.20.0' +release = '1.20.35' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/requirements.txt b/requirements.txt index d5296a83..494bcc6c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ tox>=2.5.0,<3.0.0 nose==1.3.7 mock==1.3.0 -wheel==0.24.0 +wheel==0.36.2 behave==1.2.5 jsonschema==2.5.1 diff --git a/setup.cfg b/setup.cfg index e680b271..4347b609 100644 --- a/setup.cfg +++ b/setup.cfg @@ -2,11 +2,14 @@ universal = 1 [metadata] -requires-dist = +requires_dist = python-dateutil>=2.1,<3.0.0 jmespath>=0.7.1,<1.0.0 urllib3>=1.25.4,<1.27 +[options.extras_require] +crt = awscrt==0.10.8 + [egg_info] tag_build = tag_date = 0 diff --git a/setup.py b/setup.py index 90d68b6c..b33a39ff 100644 --- a/setup.py +++ b/setup.py @@ -50,7 +50,6 @@ setup( 'botocore.vendored.requests': ['*.pem']}, include_package_data=True, install_requires=requires, - extras_require={}, license="Apache License 2.0", python_requires=">= 2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*", classifiers=[ diff --git a/tests/functional/test_s3.py b/tests/functional/test_s3.py index b545bea9..eceb1244 100644 --- a/tests/functional/test_s3.py +++ b/tests/functional/test_s3.py @@ -10,6 +10,7 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. +import base64 import re from tests import temporary_file @@ -18,9 +19,13 @@ from nose.tools import assert_equal import botocore.session from botocore.config import Config -from botocore.compat import datetime, urlsplit, parse_qs -from botocore.exceptions import ParamValidationError, ClientError -from botocore.exceptions import InvalidS3UsEast1RegionalEndpointConfigError +from botocore.compat import datetime, urlsplit, parse_qs, get_md5 +from botocore.exceptions import ( + ParamValidationError, ClientError, + UnsupportedS3ConfigurationError, + UnsupportedS3AccesspointConfigurationError, + InvalidS3UsEast1RegionalEndpointConfigError, +) from botocore.parsers import ResponseParserError from botocore import UNSIGNED @@ -44,10 +49,41 @@ class BaseS3OperationTest(BaseSessionTest): class BaseS3ClientConfigurationTest(BaseSessionTest): + _V4_AUTH_REGEX = re.compile( + r'AWS4-HMAC-SHA256 ' + r'Credential=\w+/\d+/' + r'(?P[a-z0-9-]+)/' + r'(?P[a-z0-9-]+)/' + ) + def setUp(self): super(BaseS3ClientConfigurationTest, self).setUp() self.region = 'us-west-2' + def assert_signing_region(self, request, expected_region): + auth_header = request.headers['Authorization'].decode('utf-8') + actual_region = None + match = self._V4_AUTH_REGEX.match(auth_header) + if match: + actual_region = match.group('signing_region') + self.assertEqual(expected_region, actual_region) + + def assert_signing_name(self, request, expected_name): + auth_header = request.headers['Authorization'].decode('utf-8') + actual_name = None + match = self._V4_AUTH_REGEX.match(auth_header) + if match: + actual_name = match.group('signing_name') + self.assertEqual(expected_name, actual_name) + + def assert_signing_region_in_url(self, url, expected_region): + qs_components = parse_qs(urlsplit(url).query) + self.assertIn(expected_region, qs_components['X-Amz-Credential'][0]) + + def assert_endpoint(self, request, expected_endpoint): + actual_endpoint = urlsplit(request.url).netloc + self.assertEqual(actual_endpoint, expected_endpoint) + def create_s3_client(self, **kwargs): client_kwargs = { 'region_name': self.region @@ -467,13 +503,6 @@ class TestS3Copy(BaseS3OperationTest): class TestAccesspointArn(BaseS3ClientConfigurationTest): - _V4_AUTH_REGEX = re.compile( - r'AWS4-HMAC-SHA256 ' - r'Credential=\w+/\d+/' - r'(?P[a-z0-9-]+)/' - r'(?P[a-z0-9-]+)/' - ) - def setUp(self): super(TestAccesspointArn, self).setUp() self.client, self.http_stubber = self.create_stubbed_s3_client() @@ -484,22 +513,6 @@ class TestAccesspointArn(BaseS3ClientConfigurationTest): http_stubber.start() return client, http_stubber - def assert_signing_region(self, request, expected_region): - auth_header = request.headers['Authorization'].decode('utf-8') - actual_region = self._V4_AUTH_REGEX.match( - auth_header).group('signing_region') - self.assertEqual(expected_region, actual_region) - - def assert_signing_name(self, request, expected_name): - auth_header = request.headers['Authorization'].decode('utf-8') - actual_name = self._V4_AUTH_REGEX.match( - auth_header).group('signing_name') - self.assertEqual(expected_name, actual_name) - - def assert_signing_region_in_url(self, url, expected_region): - qs_components = parse_qs(urlsplit(url).query) - self.assertIn(expected_region, qs_components['X-Amz-Credential'][0]) - def assert_expected_copy_source_header(self, http_stubber, expected_copy_source): request = self.http_stubber.requests[0] @@ -512,10 +525,6 @@ class TestAccesspointArn(BaseS3ClientConfigurationTest): body=b'' ) - def assert_endpoint(self, request, expected_endpoint): - actual_endpoint = urlsplit(request.url).netloc - self.assertEqual(actual_endpoint, expected_endpoint) - def test_missing_region_in_arn(self): accesspoint_arn = ( 'arn:aws:s3::123456789012:accesspoint:myendpoint' @@ -782,9 +791,7 @@ class TestAccesspointArn(BaseS3ClientConfigurationTest): ) self.client, _ = self.create_stubbed_s3_client( config=Config(s3={'use_accelerate_endpoint': True})) - with self.assertRaises( - botocore.exceptions. - UnsupportedS3AccesspointConfigurationError): + with self.assertRaises(UnsupportedS3AccesspointConfigurationError): self.client.list_objects(Bucket=outpost_arn) def test_outpost_arn_with_s3_dualstack(self): @@ -794,9 +801,7 @@ class TestAccesspointArn(BaseS3ClientConfigurationTest): ) self.client, _ = self.create_stubbed_s3_client( config=Config(s3={'use_dualstack_endpoint': True})) - with self.assertRaises( - botocore.exceptions. - UnsupportedS3AccesspointConfigurationError): + with self.assertRaises(UnsupportedS3AccesspointConfigurationError): self.client.list_objects(Bucket=outpost_arn) def test_incorrect_outpost_format(self): @@ -837,6 +842,54 @@ class TestAccesspointArn(BaseS3ClientConfigurationTest): with self.assertRaises(botocore.exceptions.ParamValidationError): self.client.list_objects(Bucket=outpost_arn) + def test_s3_object_lambda_arn_with_s3_dualstack(self): + s3_object_lambda_arn = ( + 'arn:aws:s3-object-lambda:us-west-2:123456789012:' + 'accesspoint/myBanner' + ) + self.client, _ = self.create_stubbed_s3_client( + config=Config(s3={'use_dualstack_endpoint': True})) + with self.assertRaises(UnsupportedS3AccesspointConfigurationError): + self.client.list_objects(Bucket=s3_object_lambda_arn) + + def test_s3_object_lambda_arn_with_us_east_1(self): + # test that us-east-1 region is not resolved + # into s3 global endpoint + s3_object_lambda_arn = ( + 'arn:aws:s3-object-lambda:us-east-1:123456789012:' + 'accesspoint/myBanner' + ) + self.client, self.http_stubber = self.create_stubbed_s3_client( + region_name='us-east-1', + config=Config(s3={'use_arn_region': False}) + ) + self.http_stubber.add_response() + self.client.list_objects(Bucket=s3_object_lambda_arn) + request = self.http_stubber.requests[0] + self.assert_signing_name(request, 's3-object-lambda') + self.assert_signing_region(request, 'us-east-1') + expected_endpoint = ( + 'myBanner-123456789012.s3-object-lambda.us-east-1.amazonaws.com' + ) + self.assert_endpoint(request, expected_endpoint) + + def test_basic_s3_object_lambda_arn(self): + s3_object_lambda_arn = ( + 'arn:aws:s3-object-lambda:us-west-2:123456789012:' + 'accesspoint/myBanner' + ) + self.client, self.http_stubber = self.create_stubbed_s3_client( + region_name='us-east-1') + self.http_stubber.add_response() + self.client.list_objects(Bucket=s3_object_lambda_arn) + request = self.http_stubber.requests[0] + self.assert_signing_name(request, 's3-object-lambda') + self.assert_signing_region(request, 'us-west-2') + expected_endpoint = ( + 'myBanner-123456789012.s3-object-lambda.us-west-2.amazonaws.com' + ) + self.assert_endpoint(request, expected_endpoint) + class TestOnlyAsciiCharsAllowed(BaseS3OperationTest): def test_validates_non_ascii_chars_trigger_validation_error(self): @@ -933,6 +986,59 @@ class TestS3PutObject(BaseS3OperationTest): self.assertEqual(len(http_stubber.requests), 2) +class TestWriteGetObjectResponse(BaseS3ClientConfigurationTest): + def create_stubbed_s3_client(self, **kwargs): + client = self.create_s3_client(**kwargs) + http_stubber = ClientHTTPStubber(client) + http_stubber.start() + return client, http_stubber + + def test_endpoint_redirection(self): + regions = ['us-west-2', 'us-east-1'] + for region in regions: + self.client, self.http_stubber = self.create_stubbed_s3_client( + region_name=region) + self.http_stubber.add_response() + self.client.write_get_object_response( + RequestRoute='endpoint-io.a1c1d5c7', + RequestToken='SecretToken', + ) + request = self.http_stubber.requests[0] + self.assert_signing_name(request, 's3-object-lambda') + self.assert_signing_region(request, region) + expected_endpoint = ( + 'endpoint-io.a1c1d5c7.s3-object-lambda.' + '%s.amazonaws.com' % region + ) + self.assert_endpoint(request, expected_endpoint) + + def test_endpoint_redirection_fails_with_custom_endpoint(self): + self.client, self.http_stubber = self.create_stubbed_s3_client( + region_name='us-west-2', endpoint_url="https://example.com") + self.http_stubber.add_response() + self.client.write_get_object_response( + RequestRoute='endpoint-io.a1c1d5c7', + RequestToken='SecretToken', + ) + request = self.http_stubber.requests[0] + self.assert_signing_name(request, 's3-object-lambda') + self.assert_signing_region(request, 'us-west-2') + self.assert_endpoint(request, 'endpoint-io.a1c1d5c7.example.com') + + def test_endpoint_redirection_fails_with_accelerate_endpoint(self): + config = Config(s3={'use_accelerate_endpoint': True}) + self.client, self.http_stubber = self.create_stubbed_s3_client( + region_name='us-west-2', + config=config, + ) + self.http_stubber.add_response() + with self.assertRaises(UnsupportedS3ConfigurationError): + self.client.write_get_object_response( + RequestRoute='endpoint-io.a1c1d5c7', + RequestToken='SecretToken', + ) + + class TestS3SigV4(BaseS3OperationTest): def setUp(self): super(TestS3SigV4, self).setUp() @@ -1000,7 +1106,6 @@ class TestS3SigV4(BaseS3OperationTest): self.assertNotIn('content-md5', sent_headers) - class TestCanSendIntegerHeaders(BaseSessionTest): def test_int_values_with_sigv4(self): @@ -2246,3 +2351,21 @@ def _verify_presigned_url_addressing(region, bucket, key, s3_config, parts = urlsplit(url) actual = '%s://%s%s' % parts[:3] assert_equal(actual, expected_url) + + +class TestS3DeleteObjects(BaseS3OperationTest): + def test_escape_keys_in_xml_payload(self): + self.http_stubber.add_response() + with self.http_stubber: + response = self.client.delete_objects( + Bucket='mybucket', + Delete={ + 'Objects': [{'Key': 'some\r\n\rkey'}] + }, + ) + request = self.http_stubber.requests[0] + self.assertNotIn(b'\r\n\r', request.body) + self.assertIn(b' ', request.body) + content_md5_bytes = get_md5(request.body).digest() + content_md5 = base64.b64encode(content_md5_bytes) + self.assertEqual(content_md5, request.headers['Content-MD5']) diff --git a/tests/unit/test_credentials.py b/tests/unit/test_credentials.py index 62bbef93..9277494a 100644 --- a/tests/unit/test_credentials.py +++ b/tests/unit/test_credentials.py @@ -2695,6 +2695,22 @@ class TestJSONCache(unittest.TestCase): filename = os.path.join(self.tempdir, 'mykey.json') self.assertEqual(os.stat(filename).st_mode & 0xFFF, 0o600) + def test_cache_with_custom_dumps_func(self): + def _custom_serializer(obj): + return "custom foo" + def _custom_dumps(obj): + return json.dumps(obj, default=_custom_serializer) + custom_dir = os.path.join(self.tempdir, 'custom') + custom_cache = credentials.JSONFileCache( + custom_dir, + dumps_func=_custom_dumps + ) + custom_cache['test'] = {'bar': object()} + self.assertEqual( + custom_cache['test'], + {'bar': 'custom foo'} + ) + class TestRefreshLogic(unittest.TestCase): def test_mandatory_refresh_needed(self): diff --git a/tests/unit/test_http_session.py b/tests/unit/test_http_session.py index b782ef56..1e836426 100644 --- a/tests/unit/test_http_session.py +++ b/tests/unit/test_http_session.py @@ -294,6 +294,20 @@ class TestURLLib3Session(unittest.TestCase): ) self.assert_request_sent() + def test_proxy_ssl_context_uses_check_hostname(self): + cert = ('/some/cert', '/some/key') + proxies = {'https': 'https://proxy.com'} + proxies_config = {'proxy_client_cert': "path/to/cert"} + with patch('botocore.httpsession.create_urllib3_context'): + session = URLLib3Session( + proxies=proxies, client_cert=cert, + proxies_config=proxies_config + ) + self.request.url = 'https://example.com/' + session.send(self.request.prepare()) + last_call = self.proxy_manager_fun.call_args[-1] + self.assertIs(last_call['ssl_context'].check_hostname, True) + def test_basic_request(self): session = URLLib3Session() session.send(self.request.prepare()) diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index 0d4a93ba..23c641dc 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -488,14 +488,16 @@ class TestArgumentGenerator(unittest.TestCase): 'B': {'type': 'integer'}, 'C': {'type': 'float'}, 'D': {'type': 'boolean'}, - 'E': {'type': 'timestamp'} + 'E': {'type': 'timestamp'}, + 'F': {'type': 'double'}, }, generated_skeleton={ 'A': '', 'B': 0, 'C': 0.0, 'D': True, - 'E': datetime.datetime(1970, 1, 1, 0, 0, 0) + 'E': datetime.datetime(1970, 1, 1, 0, 0, 0), + 'F': 0.0, } ) @@ -1926,8 +1928,13 @@ class TestS3EndpointSetter(unittest.TestCase): def test_register(self): event_emitter = mock.Mock() self.endpoint_setter.register(event_emitter) - event_emitter.register.assert_called_with( - 'before-sign.s3', self.endpoint_setter.set_endpoint) + event_emitter.register.assert_has_calls([ + mock.call('before-sign.s3', self.endpoint_setter.set_endpoint), + mock.call( + 'before-call.s3.WriteGetObjectResponse', + self.endpoint_setter.update_endpoint_to_s3_object_lambda, + ) + ]) def test_outpost_endpoint(self): request = self.get_s3_outpost_request()