From cb148423d15de65a4e777f6e63333b14f472446a Mon Sep 17 00:00:00 2001 From: Noah Meyerhans Date: Thu, 18 Jun 2020 12:07:50 -0700 Subject: [PATCH] New upstream version 1.17.5+repack --- PKG-INFO | 2 +- botocore.egg-info/PKG-INFO | 2 +- botocore.egg-info/SOURCES.txt | 4 + botocore/__init__.py | 2 +- botocore/credentials.py | 163 +- .../2017-11-09/service-2.json | 29 +- .../data/apigateway/2015-07-09/service-2.json | 26 +- .../data/appconfig/2019-10-09/service-2.json | 349 +- .../data/appmesh/2019-01-25/service-2.json | 170 +- .../data/athena/2017-05-18/paginators-1.json | 24 + .../data/athena/2017-05-18/service-2.json | 654 +++- .../autoscaling/2011-01-01/service-2.json | 256 +- botocore/data/chime/2018-05-01/service-2.json | 78 +- .../cloudformation/2010-05-15/service-2.json | 12 +- .../data/cloudfront/2019-03-26/service-2.json | 62 +- .../codeartifact/2018-09-22/paginators-1.json | 40 + .../2018-09-22/paginators-1.sdk-extras.json | 24 + .../codeartifact/2018-09-22/service-2.json | 2962 +++++++++++++++++ .../cognito-idp/2016-04-18/service-2.json | 6 +- .../2019-11-01/service-2.json | 440 ++- .../dataexchange/2017-07-25/service-2.json | 3 +- .../directconnect/2012-10-25/service-2.json | 204 +- botocore/data/dlm/2018-01-12/service-2.json | 2 +- botocore/data/ec2/2016-11-15/service-2.json | 76 +- botocore/data/ecs/2014-11-13/service-2.json | 73 +- .../elasticache/2015-02-02/service-2.json | 10 + .../2010-12-01/service-2.json | 74 +- botocore/data/emr/2009-03-31/service-2.json | 12 + botocore/data/endpoints.json | 121 +- botocore/data/es/2015-01-01/service-2.json | 470 +++ botocore/data/fsx/2018-03-01/service-2.json | 112 +- botocore/data/glue/2017-03-31/service-2.json | 44 +- .../data/guardduty/2017-11-28/service-2.json | 223 +- botocore/data/iam/2010-05-08/service-2.json | 56 +- .../imagebuilder/2019-12-02/service-2.json | 70 +- .../iot-data/2015-05-28/paginators-1.json | 3 + .../data/iot-data/2015-05-28/service-2.json | 118 +- botocore/data/iot/2015-05-28/service-2.json | 161 +- botocore/data/kms/2014-11-01/service-2.json | 91 +- .../data/lambda/2015-03-31/service-2.json | 99 +- .../data/lex-models/2017-04-19/service-2.json | 58 +- .../data/lightsail/2016-11-28/service-2.json | 56 +- .../data/macie2/2020-01-01/service-2.json | 183 +- .../mediaconvert/2017-08-29/service-2.json | 417 ++- .../2018-11-07/service-2.json | 147 +- .../2016-01-14/service-2.json | 2 +- .../2018-05-22/service-2.json | 6 +- .../personalize/2018-05-22/service-2.json | 240 ++ .../data/pinpoint/2016-12-01/service-2.json | 130 +- botocore/data/polly/2016-06-10/service-2.json | 1 + botocore/data/qldb/2019-01-02/service-2.json | 14 +- .../data/route53/2013-04-01/service-2.json | 3 +- .../2017-05-13/service-2.json | 19 +- .../data/sagemaker/2017-07-24/service-2.json | 78 +- .../servicecatalog/2015-12-10/service-2.json | 98 +- .../2017-03-14/service-2.json | 226 +- .../data/shield/2016-06-02/service-2.json | 146 +- .../data/snowball/2016-06-30/service-2.json | 56 +- botocore/data/ssm/2014-11-06/service-2.json | 21 + .../storagegateway/2013-06-30/service-2.json | 357 +- .../data/transfer/2018-11-05/service-2.json | 119 +- .../data/worklink/2018-09-25/service-2.json | 161 +- botocore/exceptions.py | 16 + botocore/handlers.py | 55 +- botocore/model.py | 4 + botocore/serialize.py | 13 + botocore/session.py | 2 +- botocore/signers.py | 2 + botocore/utils.py | 63 +- docs/source/conf.py | 4 +- tests/__init__.py | 4 +- tests/functional/retries/test_bucket.py | 4 +- tests/functional/test_credentials.py | 70 +- tests/functional/test_s3.py | 73 +- tests/unit/test_credentials.py | 199 +- tests/unit/test_handlers.py | 19 +- tests/unit/test_signers.py | 2 + tests/unit/test_utils.py | 30 + 78 files changed, 9419 insertions(+), 1006 deletions(-) create mode 100644 botocore/data/codeartifact/2018-09-22/paginators-1.json create mode 100644 botocore/data/codeartifact/2018-09-22/paginators-1.sdk-extras.json create mode 100644 botocore/data/codeartifact/2018-09-22/service-2.json create mode 100644 botocore/data/iot-data/2015-05-28/paginators-1.json diff --git a/PKG-INFO b/PKG-INFO index aa498122..825e99be 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: botocore -Version: 1.16.19 +Version: 1.17.5 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services diff --git a/botocore.egg-info/PKG-INFO b/botocore.egg-info/PKG-INFO index aa498122..825e99be 100644 --- a/botocore.egg-info/PKG-INFO +++ b/botocore.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: botocore -Version: 1.16.19 +Version: 1.17.5 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services diff --git a/botocore.egg-info/SOURCES.txt b/botocore.egg-info/SOURCES.txt index 98524b2d..dd8e77d8 100644 --- a/botocore.egg-info/SOURCES.txt +++ b/botocore.egg-info/SOURCES.txt @@ -199,6 +199,9 @@ botocore/data/cloudwatch/2010-08-01/examples-1.json botocore/data/cloudwatch/2010-08-01/paginators-1.json botocore/data/cloudwatch/2010-08-01/service-2.json botocore/data/cloudwatch/2010-08-01/waiters-2.json +botocore/data/codeartifact/2018-09-22/paginators-1.json +botocore/data/codeartifact/2018-09-22/paginators-1.sdk-extras.json +botocore/data/codeartifact/2018-09-22/service-2.json botocore/data/codebuild/2016-10-06/examples-1.json botocore/data/codebuild/2016-10-06/paginators-1.json botocore/data/codebuild/2016-10-06/service-2.json @@ -416,6 +419,7 @@ botocore/data/inspector/2015-08-18/service-2.json botocore/data/inspector/2016-02-16/examples-1.json botocore/data/inspector/2016-02-16/paginators-1.json botocore/data/inspector/2016-02-16/service-2.json +botocore/data/iot-data/2015-05-28/paginators-1.json botocore/data/iot-data/2015-05-28/service-2.json botocore/data/iot-jobs-data/2017-09-29/examples-1.json botocore/data/iot-jobs-data/2017-09-29/paginators-1.json diff --git a/botocore/__init__.py b/botocore/__init__.py index 3040e500..c8e01cd7 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re import logging -__version__ = '1.16.19' +__version__ = '1.17.5' class NullHandler(logging.Handler): diff --git a/botocore/credentials.py b/botocore/credentials.py index 0a551e8e..d2da3d65 100644 --- a/botocore/credentials.py +++ b/botocore/credentials.py @@ -24,7 +24,7 @@ from copy import deepcopy from hashlib import sha1 from dateutil.parser import parse -from dateutil.tz import tzlocal +from dateutil.tz import tzlocal, tzutc import botocore.configloader import botocore.compat @@ -40,9 +40,11 @@ from botocore.exceptions import InfiniteLoopConfigError from botocore.exceptions import RefreshWithMFAUnsupportedError from botocore.exceptions import MetadataRetrievalError from botocore.exceptions import CredentialRetrievalError +from botocore.exceptions import UnauthorizedSSOTokenError from botocore.utils import InstanceMetadataFetcher, parse_key_val_file from botocore.utils import ContainerMetadataFetcher from botocore.utils import FileWebIdentityTokenLoader +from botocore.utils import SSOTokenLoader logger = logging.getLogger(__name__) @@ -138,16 +140,19 @@ class ProfileProviderBuilder(object): This is needed to enable sharing between the default credential chain and the source profile chain created by the assume role provider. """ - def __init__(self, session, cache=None, region_name=None): + def __init__(self, session, cache=None, region_name=None, + sso_token_cache=None): self._session = session self._cache = cache self._region_name = region_name + self._sso_token_cache = sso_token_cache def providers(self, profile_name, disable_env_vars=False): return [ self._create_web_identity_provider( profile_name, disable_env_vars, ), + self._create_sso_provider(profile_name), self._create_shared_credential_provider(profile_name), self._create_process_provider(profile_name), self._create_config_provider(profile_name), @@ -183,6 +188,15 @@ class ProfileProviderBuilder(object): disable_env_vars=disable_env_vars, ) + def _create_sso_provider(self, profile_name): + return SSOProvider( + load_config=lambda: self._session.full_config, + client_creator=self._session.create_client, + profile_name=profile_name, + cache=self._cache, + token_cache=self._sso_token_cache, + ) + def get_credentials(session): resolver = create_credential_resolver(session) @@ -1956,3 +1970,148 @@ class CredentialResolver(object): # +1 # -js return None + + +class SSOCredentialFetcher(CachedCredentialFetcher): + def __init__(self, start_url, sso_region, role_name, account_id, + client_creator, token_loader=None, cache=None, + expiry_window_seconds=None): + self._client_creator = client_creator + self._sso_region = sso_region + self._role_name = role_name + self._account_id = account_id + self._start_url = start_url + self._token_loader = token_loader + + super(SSOCredentialFetcher, self).__init__( + cache, expiry_window_seconds + ) + + def _create_cache_key(self): + """Create a predictable cache key for the current configuration. + + The cache key is intended to be compatible with file names. + """ + args = { + 'startUrl': self._start_url, + 'roleName': self._role_name, + 'accountId': self._account_id, + } + # NOTE: It would be good to hoist this cache key construction logic + # into the CachedCredentialFetcher class as we should be consistent. + # Unfortunately, the current assume role fetchers that sub class don't + # pass separators resulting in non-minified JSON. In the long term, + # all fetchers should use the below caching scheme. + args = json.dumps(args, sort_keys=True, separators=(',', ':')) + argument_hash = sha1(args.encode('utf-8')).hexdigest() + return self._make_file_safe(argument_hash) + + def _parse_timestamp(self, timestamp_ms): + # fromtimestamp expects seconds so: milliseconds / 1000 = seconds + timestamp_seconds = timestamp_ms / 1000.0 + timestamp = datetime.datetime.fromtimestamp(timestamp_seconds, tzutc()) + return _serialize_if_needed(timestamp) + + def _get_credentials(self): + """Get credentials by calling SSO get role credentials.""" + config = Config( + signature_version=UNSIGNED, + region_name=self._sso_region, + ) + client = self._client_creator('sso', config=config) + + kwargs = { + 'roleName': self._role_name, + 'accountId': self._account_id, + 'accessToken': self._token_loader(self._start_url), + } + try: + response = client.get_role_credentials(**kwargs) + except client.exceptions.UnauthorizedException: + raise UnauthorizedSSOTokenError() + credentials = response['roleCredentials'] + + credentials = { + 'ProviderType': 'sso', + 'Credentials': { + 'AccessKeyId': credentials['accessKeyId'], + 'SecretAccessKey': credentials['secretAccessKey'], + 'SessionToken': credentials['sessionToken'], + 'Expiration': self._parse_timestamp(credentials['expiration']), + } + } + return credentials + + +class SSOProvider(CredentialProvider): + METHOD = 'sso' + + _SSO_TOKEN_CACHE_DIR = os.path.expanduser( + os.path.join('~', '.aws', 'sso', 'cache') + ) + _SSO_CONFIG_VARS = [ + 'sso_start_url', + 'sso_region', + 'sso_role_name', + 'sso_account_id', + ] + + def __init__(self, load_config, client_creator, profile_name, + cache=None, token_cache=None): + if token_cache is None: + token_cache = JSONFileCache(self._SSO_TOKEN_CACHE_DIR) + self._token_cache = token_cache + if cache is None: + cache = {} + self.cache = cache + self._load_config = load_config + self._client_creator = client_creator + self._profile_name = profile_name + + def _load_sso_config(self): + loaded_config = self._load_config() + profiles = loaded_config.get('profiles', {}) + profile_name = self._profile_name + profile_config = profiles.get(self._profile_name, {}) + + if all(c not in profile_config for c in self._SSO_CONFIG_VARS): + return None + + config = {} + missing_config_vars = [] + for config_var in self._SSO_CONFIG_VARS: + if config_var in profile_config: + config[config_var] = profile_config[config_var] + else: + missing_config_vars.append(config_var) + + if missing_config_vars: + missing = ', '.join(missing_config_vars) + raise InvalidConfigError( + error_msg=( + 'The profile "%s" is configured to use SSO but is missing ' + 'required configuration: %s' % (profile_name, missing) + ) + ) + + return config + + def load(self): + sso_config = self._load_sso_config() + if not sso_config: + return None + + sso_fetcher = SSOCredentialFetcher( + sso_config['sso_start_url'], + sso_config['sso_region'], + sso_config['sso_role_name'], + sso_config['sso_account_id'], + self._client_creator, + token_loader=SSOTokenLoader(cache=self._token_cache), + cache=self.cache, + ) + + return DeferredRefreshableCredentials( + method=self.METHOD, + refresh_using=sso_fetcher.fetch_credentials, + ) diff --git a/botocore/data/alexaforbusiness/2017-11-09/service-2.json b/botocore/data/alexaforbusiness/2017-11-09/service-2.json index 868cf9aa..5a5eea08 100644 --- a/botocore/data/alexaforbusiness/2017-11-09/service-2.json +++ b/botocore/data/alexaforbusiness/2017-11-09/service-2.json @@ -1516,6 +1516,7 @@ }, "BusinessReportContentRange":{ "type":"structure", + "required":["Interval"], "members":{ "Interval":{ "shape":"BusinessReportInterval", @@ -1917,6 +1918,10 @@ "shape":"ClientRequestToken", "documentation":"

The client request token.

", "idempotencyToken":true + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags for the business report schedule.

" } } }, @@ -2227,6 +2232,10 @@ "MeetingRoomConfiguration":{ "shape":"CreateMeetingRoomConfiguration", "documentation":"

The meeting room settings of a room profile.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags for the profile.

" } } }, @@ -2271,7 +2280,7 @@ }, "ProfileArn":{ "shape":"Arn", - "documentation":"

The profile ARN for the room.

" + "documentation":"

The profile ARN for the room. This is required.

" }, "ProviderCalendarId":{ "shape":"ProviderCalendarId", @@ -2313,6 +2322,10 @@ "shape":"ClientRequestToken", "documentation":"

A unique, user-specified identifier for this request that ensures idempotency.

", "idempotencyToken":true + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags for the skill group.

" } } }, @@ -2726,7 +2739,7 @@ "documentation":"

The room ARN associated with a device.

" }, "RoomName":{ - "shape":"RoomName", + "shape":"DeviceRoomName", "documentation":"

The name of the room associated with a device.

" }, "DeviceStatusInfo":{ @@ -2813,6 +2826,12 @@ "documentation":"

The request failed because this device is no longer registered and therefore no longer managed by this account.

", "exception":true }, + "DeviceRoomName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u0085\\u00A0-\\uD7FF\\uE000-\\uFFFD\\u10000-\\u10FFFF]*" + }, "DeviceSerialNumber":{ "type":"string", "pattern":"[a-zA-Z0-9]{1,200}" @@ -2862,7 +2881,9 @@ "INVALID_CERTIFICATE_AUTHORITY", "NETWORK_PROFILE_NOT_FOUND", "INVALID_PASSWORD_STATE", - "PASSWORD_NOT_FOUND" + "PASSWORD_NOT_FOUND", + "PASSWORD_MANAGER_ACCESS_DENIED", + "CERTIFICATE_AUTHORITY_ACCESS_DENIED" ] }, "DeviceStatusDetails":{ @@ -5098,7 +5119,7 @@ }, "Reviews":{ "shape":"Reviews", - "documentation":"

The list of reviews for the skill, including Key and Value pair.

" + "documentation":"

This member has been deprecated.

The list of reviews for the skill, including Key and Value pair.

" }, "DeveloperInfo":{ "shape":"DeveloperInfo", diff --git a/botocore/data/apigateway/2015-07-09/service-2.json b/botocore/data/apigateway/2015-07-09/service-2.json index 5a9879b3..f5724ee8 100644 --- a/botocore/data/apigateway/2015-07-09/service-2.json +++ b/botocore/data/apigateway/2015-07-09/service-2.json @@ -4957,7 +4957,7 @@ }, "body":{ "shape":"Blob", - "documentation":"

[Required] The POST request body containing external API definitions. Currently, only OpenAPI definition JSON/YAML files are supported. The maximum size of the API definition file is 2MB.

" + "documentation":"

[Required] The POST request body containing external API definitions. Currently, only OpenAPI definition JSON/YAML files are supported. The maximum size of the API definition file is 6MB.

" } }, "documentation":"

A POST request to import an API to API Gateway using an input of an API definition file.

", @@ -5013,7 +5013,7 @@ }, "cacheNamespace":{ "shape":"String", - "documentation":"

An API-specific tag group of related cached parameters. To be valid values for cacheKeyParameters, these parameters must also be specified for Method requestParameters.

" + "documentation":"

Specifies a group of related cached parameters. By default, API Gateway uses the resource ID as the cacheNamespace. You can specify the same cacheNamespace across resources to return the same cached data for requests to different resources.

" }, "cacheKeyParameters":{ "shape":"ListOfString", @@ -5022,6 +5022,10 @@ "integrationResponses":{ "shape":"MapOfIntegrationResponse", "documentation":"

Specifies the integration's responses.

Example: Get integration responses of a method

Request

GET /restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200 HTTP/1.1 Content-Type: application/json Host: apigateway.us-east-1.amazonaws.com X-Amz-Date: 20160607T191449Z Authorization: AWS4-HMAC-SHA256 Credential={access_key_ID}/20160607/us-east-1/apigateway/aws4_request, SignedHeaders=content-type;host;x-amz-date, Signature={sig4_hash} 
Response

The successful response returns 200 OK status and a payload as follows:

{ \"_links\": { \"curies\": { \"href\": \"https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-integration-response-{rel}.html\", \"name\": \"integrationresponse\", \"templated\": true }, \"self\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200\", \"title\": \"200\" }, \"integrationresponse:delete\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200\" }, \"integrationresponse:update\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200\" } }, \"responseParameters\": { \"method.response.header.Content-Type\": \"'application/xml'\" }, \"responseTemplates\": { \"application/json\": \"$util.urlDecode(\\\"%3CkinesisStreams%3E#foreach($stream in $input.path('$.StreamNames'))%3Cstream%3E%3Cname%3E$stream%3C/name%3E%3C/stream%3E#end%3C/kinesisStreams%3E\\\")\\n\" }, \"statusCode\": \"200\" }

Creating an API
" + }, + "tlsConfig":{ + "shape":"TlsConfig", + "documentation":"

Specifies the TLS configuration for an integration.

" } }, "documentation":"

Represents an HTTP, HTTP_PROXY, AWS, AWS_PROXY, or Mock integration.

In the API Gateway console, the built-in Lambda integration is an AWS integration.
Creating an API
" @@ -5560,11 +5564,11 @@ }, "cacheNamespace":{ "shape":"String", - "documentation":"

A list of request parameters whose values are to be cached.

" + "documentation":"

Specifies a group of related cached parameters. By default, API Gateway uses the resource ID as the cacheNamespace. You can specify the same cacheNamespace across resources to return the same cached data for requests to different resources.

" }, "cacheKeyParameters":{ "shape":"ListOfString", - "documentation":"

An API-specific tag group of related cached parameters.

" + "documentation":"

A list of request parameters whose values API Gateway caches. To be valid values for cacheKeyParameters, these parameters must also be specified for Method requestParameters.

" }, "contentHandling":{ "shape":"ContentHandlingStrategy", @@ -5573,7 +5577,8 @@ "timeoutInMillis":{ "shape":"NullableInteger", "documentation":"

Custom timeout between 50 and 29,000 milliseconds. The default value is 29,000 milliseconds or 29 seconds.

" - } + }, + "tlsConfig":{"shape":"TlsConfig"} }, "documentation":"

Sets up a method's integration.

" }, @@ -5774,7 +5779,7 @@ }, "body":{ "shape":"Blob", - "documentation":"

[Required] The PUT request body containing external API definitions. Currently, only OpenAPI definition JSON/YAML files are supported. The maximum size of the API definition file is 2MB.

" + "documentation":"

[Required] The PUT request body containing external API definitions. Currently, only OpenAPI definition JSON/YAML files are supported. The maximum size of the API definition file is 6MB.

" } }, "documentation":"

A PUT request to update an existing API, with external API definitions specified as the request body.

", @@ -6368,6 +6373,15 @@ "documentation":"

The API request rate limits.

" }, "Timestamp":{"type":"timestamp"}, + "TlsConfig":{ + "type":"structure", + "members":{ + "insecureSkipVerification":{ + "shape":"Boolean", + "documentation":"

Specifies whether or not API Gateway skips verification that the certificate for an integration endpoint is issued by a supported certificate authority. This isn’t recommended, but it enables you to use certificates that are signed by private certificate authorities, or certificates that are self-signed. If enabled, API Gateway still performs basic certificate validation, which includes checking the certificate's expiration date, hostname, and presence of a root certificate authority. Supported only for HTTP and HTTP_PROXY integrations.

" + } + } + }, "TooManyRequestsException":{ "type":"structure", "members":{ diff --git a/botocore/data/appconfig/2019-10-09/service-2.json b/botocore/data/appconfig/2019-10-09/service-2.json index c040ac9b..a94258d3 100644 --- a/botocore/data/appconfig/2019-10-09/service-2.json +++ b/botocore/data/appconfig/2019-10-09/service-2.json @@ -75,6 +75,25 @@ ], "documentation":"

For each application, you define one or more environments. An environment is a logical deployment group of AppConfig targets, such as applications in a Beta or Production environment. You can also define environments for application subcomponents such as the Web, Mobile and Back-end components for your application. You can configure Amazon CloudWatch alarms for each environment. The system monitors alarms during a configuration deployment. If an alarm is triggered, the system rolls back the configuration.

" }, + "CreateHostedConfigurationVersion":{ + "name":"CreateHostedConfigurationVersion", + "http":{ + "method":"POST", + "requestUri":"/applications/{ApplicationId}/configurationprofiles/{ConfigurationProfileId}/hostedconfigurationversions", + "responseCode":201 + }, + "input":{"shape":"CreateHostedConfigurationVersionRequest"}, + "output":{"shape":"HostedConfigurationVersion"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"PayloadTooLargeException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Create a new configuration in the AppConfig configuration store.

" + }, "DeleteApplication":{ "name":"DeleteApplication", "http":{ @@ -137,6 +156,21 @@ ], "documentation":"

Delete an environment. Deleting an environment does not delete a configuration from a host.

" }, + "DeleteHostedConfigurationVersion":{ + "name":"DeleteHostedConfigurationVersion", + "http":{ + "method":"DELETE", + "requestUri":"/applications/{ApplicationId}/configurationprofiles/{ConfigurationProfileId}/hostedconfigurationversions/{VersionNumber}", + "responseCode":204 + }, + "input":{"shape":"DeleteHostedConfigurationVersionRequest"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Delete a version of a configuration from the AppConfig configuration store.

" + }, "GetApplication":{ "name":"GetApplication", "http":{ @@ -165,7 +199,6 @@ "errors":[ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"}, - {"shape":"ResourceNotFoundException"}, {"shape":"BadRequestException"} ], "documentation":"

Receive information about a configuration.

AWS AppConfig uses the value of the ClientConfigurationVersion parameter to identify the configuration version on your clients. If you don’t send ClientConfigurationVersion with each call to GetConfiguration, your clients receive the current configuration. You are charged each time your clients receive a configuration.

To avoid excess charges, we recommend that you include the ClientConfigurationVersion value with every call to GetConfiguration. This value must be saved on your client. Subsequent calls to GetConfiguration must pass this value by using the ClientConfigurationVersion parameter.

" @@ -234,6 +267,22 @@ ], "documentation":"

Retrieve information about an environment. An environment is a logical deployment group of AppConfig applications, such as applications in a Production environment or in an EU_Region environment. Each configuration deployment targets an environment. You can enable one or more Amazon CloudWatch alarms for an environment. If an alarm is triggered during a deployment, AppConfig roles back the configuration.

" }, + "GetHostedConfigurationVersion":{ + "name":"GetHostedConfigurationVersion", + "http":{ + "method":"GET", + "requestUri":"/applications/{ApplicationId}/configurationprofiles/{ConfigurationProfileId}/hostedconfigurationversions/{VersionNumber}", + "responseCode":200 + }, + "input":{"shape":"GetHostedConfigurationVersionRequest"}, + "output":{"shape":"HostedConfigurationVersion"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Get information about a specific configuration version.

" + }, "ListApplications":{ "name":"ListApplications", "http":{ @@ -312,6 +361,22 @@ ], "documentation":"

List the environments for an application.

" }, + "ListHostedConfigurationVersions":{ + "name":"ListHostedConfigurationVersions", + "http":{ + "method":"GET", + "requestUri":"/applications/{ApplicationId}/configurationprofiles/{ConfigurationProfileId}/hostedconfigurationversions", + "responseCode":200 + }, + "input":{"shape":"ListHostedConfigurationVersionsRequest"}, + "output":{"shape":"HostedConfigurationVersions"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

View a list of configurations stored in the AppConfig configuration store by version.

" + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -521,7 +586,14 @@ "error":{"httpStatusCode":400}, "exception":true }, - "Blob":{"type":"blob"}, + "Blob":{ + "type":"blob", + "sensitive":true + }, + "BytesMeasure":{ + "type":"string", + "enum":["KILOBYTES"] + }, "Configuration":{ "type":"structure", "members":{ @@ -568,7 +640,7 @@ "documentation":"

The URI location of the configuration.

" }, "RetrievalRoleArn":{ - "shape":"Arn", + "shape":"RoleArn", "documentation":"

The ARN of an IAM role with permission to access the configuration at the specified LocationUri.

" }, "Validators":{ @@ -652,8 +724,7 @@ "required":[ "ApplicationId", "Name", - "LocationUri", - "RetrievalRoleArn" + "LocationUri" ], "members":{ "ApplicationId":{ @@ -675,7 +746,7 @@ "documentation":"

A URI to locate the configuration. You can specify a Systems Manager (SSM) document, an SSM Parameter Store parameter, or an Amazon S3 object. For an SSM document, specify either the document name in the format ssm-document://<Document_name> or the Amazon Resource Name (ARN). For a parameter, specify either the parameter name in the format ssm-parameter://<Parameter_name> or the ARN. For an Amazon S3 object, specify the URI in the following format: s3://<bucket>/<objectKey> . Here is an example: s3://my-bucket/my-app/us-east-1/my-config.json

" }, "RetrievalRoleArn":{ - "shape":"Arn", + "shape":"RoleArn", "documentation":"

The ARN of an IAM role with permission to access the configuration at the specified LocationUri.

" }, "Validators":{ @@ -764,6 +835,53 @@ } } }, + "CreateHostedConfigurationVersionRequest":{ + "type":"structure", + "required":[ + "ApplicationId", + "ConfigurationProfileId", + "Content", + "ContentType" + ], + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

The application ID.

", + "location":"uri", + "locationName":"ApplicationId" + }, + "ConfigurationProfileId":{ + "shape":"Id", + "documentation":"

The configuration profile ID.

", + "location":"uri", + "locationName":"ConfigurationProfileId" + }, + "Description":{ + "shape":"Description", + "documentation":"

A description of the configuration.

", + "location":"header", + "locationName":"Description" + }, + "Content":{ + "shape":"Blob", + "documentation":"

The content of the configuration or the configuration data.

" + }, + "ContentType":{ + "shape":"StringWithLengthBetween1And255", + "documentation":"

A standard MIME type describing the format of the configuration content. For more information, see Content-Type.

", + "location":"header", + "locationName":"Content-Type" + }, + "LatestVersionNumber":{ + "shape":"Integer", + "documentation":"

An optional locking token used to prevent race conditions from overwriting configuration updates when creating a new version. To ensure your data is not overwritten when creating multiple hosted configuration versions in rapid succession, specify the version of the latest hosted configuration version.

", + "box":true, + "location":"header", + "locationName":"Latest-Version-Number" + } + }, + "payload":"Content" + }, "DeleteApplicationRequest":{ "type":"structure", "required":["ApplicationId"], @@ -830,6 +948,34 @@ } } }, + "DeleteHostedConfigurationVersionRequest":{ + "type":"structure", + "required":[ + "ApplicationId", + "ConfigurationProfileId", + "VersionNumber" + ], + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

The application ID.

", + "location":"uri", + "locationName":"ApplicationId" + }, + "ConfigurationProfileId":{ + "shape":"Id", + "documentation":"

The configuration profile ID.

", + "location":"uri", + "locationName":"ConfigurationProfileId" + }, + "VersionNumber":{ + "shape":"Integer", + "documentation":"

The versions number to delete.

", + "location":"uri", + "locationName":"VersionNumber" + } + } + }, "Deployment":{ "type":"structure", "members":{ @@ -1011,7 +1157,7 @@ }, "DeploymentStrategyId":{ "type":"string", - "pattern":"([a-z0-9]{4,7}|arn:aws.*)" + "pattern":"(^[a-z0-9]{4,7}$|^AppConfig\\.[A-Za-z0-9]{9,40}$)" }, "DeploymentStrategyList":{ "type":"list", @@ -1140,6 +1286,7 @@ } } }, + "Float":{"type":"float"}, "GetApplicationRequest":{ "type":"structure", "required":["ApplicationId"], @@ -1276,6 +1423,34 @@ } } }, + "GetHostedConfigurationVersionRequest":{ + "type":"structure", + "required":[ + "ApplicationId", + "ConfigurationProfileId", + "VersionNumber" + ], + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

The application ID.

", + "location":"uri", + "locationName":"ApplicationId" + }, + "ConfigurationProfileId":{ + "shape":"Id", + "documentation":"

The configuration profile ID.

", + "location":"uri", + "locationName":"ConfigurationProfileId" + }, + "VersionNumber":{ + "shape":"Integer", + "documentation":"

The version.

", + "location":"uri", + "locationName":"VersionNumber" + } + } + }, "GrowthFactor":{ "type":"float", "max":100.0, @@ -1288,6 +1463,89 @@ "EXPONENTIAL" ] }, + "HostedConfigurationVersion":{ + "type":"structure", + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

The application ID.

", + "location":"header", + "locationName":"Application-Id" + }, + "ConfigurationProfileId":{ + "shape":"Id", + "documentation":"

The configuration profile ID.

", + "location":"header", + "locationName":"Configuration-Profile-Id" + }, + "VersionNumber":{ + "shape":"Integer", + "documentation":"

The configuration version.

", + "location":"header", + "locationName":"Version-Number" + }, + "Description":{ + "shape":"Description", + "documentation":"

A description of the configuration.

", + "location":"header", + "locationName":"Description" + }, + "Content":{ + "shape":"Blob", + "documentation":"

The content of the configuration or the configuration data.

" + }, + "ContentType":{ + "shape":"StringWithLengthBetween1And255", + "documentation":"

A standard MIME type describing the format of the configuration content. For more information, see Content-Type.

", + "location":"header", + "locationName":"Content-Type" + } + }, + "payload":"Content" + }, + "HostedConfigurationVersionSummary":{ + "type":"structure", + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

The application ID.

" + }, + "ConfigurationProfileId":{ + "shape":"Id", + "documentation":"

The configuration profile ID.

" + }, + "VersionNumber":{ + "shape":"Integer", + "documentation":"

The configuration version.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

A description of the configuration.

" + }, + "ContentType":{ + "shape":"StringWithLengthBetween1And255", + "documentation":"

A standard MIME type describing the format of the configuration content. For more information, see Content-Type.

" + } + }, + "documentation":"

Information about the configuration.

" + }, + "HostedConfigurationVersionSummaryList":{ + "type":"list", + "member":{"shape":"HostedConfigurationVersionSummary"} + }, + "HostedConfigurationVersions":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"HostedConfigurationVersionSummaryList", + "documentation":"

The elements from this collection.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of items to return. Use this token to get the next set of results.

" + } + } + }, "Id":{ "type":"string", "pattern":"[a-z0-9]{4,7}" @@ -1427,6 +1685,40 @@ } } }, + "ListHostedConfigurationVersionsRequest":{ + "type":"structure", + "required":[ + "ApplicationId", + "ConfigurationProfileId" + ], + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

The application ID.

", + "location":"uri", + "locationName":"ApplicationId" + }, + "ConfigurationProfileId":{ + "shape":"Id", + "documentation":"

The configuration profile ID.

", + "location":"uri", + "locationName":"ConfigurationProfileId" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.

", + "box":true, + "location":"querystring", + "locationName":"max_results" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token to start the list. Use this token to get the next set of results.

", + "location":"querystring", + "locationName":"next_token" + } + } + }, "ListTagsForResourceRequest":{ "type":"structure", "required":["ResourceArn"], @@ -1457,7 +1749,7 @@ "documentation":"

ARN of the Amazon CloudWatch alarm.

" }, "AlarmRoleArn":{ - "shape":"Arn", + "shape":"RoleArn", "documentation":"

ARN of an IAM role for AppConfig to monitor AlarmArn.

" } }, @@ -1479,6 +1771,18 @@ "max":2048, "min":1 }, + "PayloadTooLargeException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "Measure":{"shape":"BytesMeasure"}, + "Limit":{"shape":"Float"}, + "Size":{"shape":"Float"} + }, + "documentation":"

The configuration size is too large.

", + "error":{"httpStatusCode":413}, + "exception":true + }, "Percentage":{ "type":"float", "max":100.0, @@ -1510,6 +1814,21 @@ } } }, + "RoleArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"^((arn):(aws|aws-cn|aws-iso|aws-iso-[a-z]{1}|aws-us-gov):(iam)::\\d{12}:role[/].*)$" + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The number of hosted configuration versions exceeds the limit for the AppConfig configuration store. Delete one or more versions and try again.

", + "error":{"httpStatusCode":402}, + "exception":true + }, "StartDeploymentRequest":{ "type":"structure", "required":[ @@ -1587,7 +1906,13 @@ "StringWithLengthBetween0And32768":{ "type":"string", "max":32768, - "min":0 + "min":0, + "sensitive":true + }, + "StringWithLengthBetween1And255":{ + "type":"string", + "max":255, + "min":1 }, "StringWithLengthBetween1And64":{ "type":"string", @@ -1713,7 +2038,7 @@ "documentation":"

A description of the configuration profile.

" }, "RetrievalRoleArn":{ - "shape":"Arn", + "shape":"RoleArn", "documentation":"

The ARN of an IAM role with permission to access the configuration at the specified LocationUri.

" }, "Validators":{ @@ -1862,9 +2187,9 @@ }, "Version":{ "type":"string", - "max":128, + "max":1024, "min":1 } }, - "documentation":"AWS AppConfig

Use AWS AppConfig, a capability of AWS Systems Manager, to create, manage, and quickly deploy application configurations. AppConfig supports controlled deployments to applications of any size and includes built-in validation checks and monitoring. You can use AppConfig with applications hosted on Amazon EC2 instances, AWS Lambda, containers, mobile applications, or IoT devices.

To prevent errors when deploying application configurations, especially for production systems where a simple typo could cause an unexpected outage, AppConfig includes validators. A validator provides a syntactic or semantic check to ensure that the configuration you want to deploy works as intended. To validate your application configuration data, you provide a schema or a Lambda function that runs against the configuration. The configuration deployment or update can only proceed when the configuration data is valid.

During a configuration deployment, AppConfig monitors the application to ensure that the deployment is successful. If the system encounters an error, AppConfig rolls back the change to minimize impact for your application users. You can configure a deployment strategy for each application or environment that includes deployment criteria, including velocity, bake time, and alarms to monitor. Similar to error monitoring, if a deployment triggers an alarm, AppConfig automatically rolls back to the previous version.

AppConfig supports multiple use cases. Here are some examples.

This reference is intended to be used with the AWS AppConfig User Guide.

" + "documentation":"AWS AppConfig

Use AWS AppConfig, a capability of AWS Systems Manager, to create, manage, and quickly deploy application configurations. AppConfig supports controlled deployments to applications of any size and includes built-in validation checks and monitoring. You can use AppConfig with applications hosted on Amazon EC2 instances, AWS Lambda, containers, mobile applications, or IoT devices.

To prevent errors when deploying application configurations, especially for production systems where a simple typo could cause an unexpected outage, AppConfig includes validators. A validator provides a syntactic or semantic check to ensure that the configuration you want to deploy works as intended. To validate your application configuration data, you provide a schema or a Lambda function that runs against the configuration. The configuration deployment or update can only proceed when the configuration data is valid.

During a configuration deployment, AppConfig monitors the application to ensure that the deployment is successful. If the system encounters an error, AppConfig rolls back the change to minimize impact for your application users. You can configure a deployment strategy for each application or environment that includes deployment criteria, including velocity, bake time, and alarms to monitor. Similar to error monitoring, if a deployment triggers an alarm, AppConfig automatically rolls back to the previous version.

AppConfig supports multiple use cases. Here are some examples.

This reference is intended to be used with the AWS AppConfig User Guide.

" } diff --git a/botocore/data/appmesh/2019-01-25/service-2.json b/botocore/data/appmesh/2019-01-25/service-2.json index 20e79166..a17b0c56 100644 --- a/botocore/data/appmesh/2019-01-25/service-2.json +++ b/botocore/data/appmesh/2019-01-25/service-2.json @@ -1230,11 +1230,11 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

" + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

" }, "resourceOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's\n the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with Shared Meshes.

" + "documentation": "

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's\n the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with shared meshes.

" }, "uid": { "shape": "String", @@ -1505,7 +1505,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -1674,7 +1674,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then\n the account that you specify must share the mesh with your account before you can create \n the resource in the service mesh. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then\n the account that you specify must share the mesh with your account before you can create \n the resource in the service mesh. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -1881,6 +1881,23 @@ "senderFault": true } }, + "ListenerTimeout": { + "type": "structure", + "members": { + "grpc": { + "shape": "GrpcTimeout" + }, + "http": { + "shape": "HttpTimeout" + }, + "http2": { + "shape": "HttpTimeout" + }, + "tcp": { + "shape": "TcpTimeout" + } + } + }, "MeshList": { "type": "list", "member": { @@ -1984,10 +2001,12 @@ "documentation": "

The full Amazon Resource Name (ARN) for the route.

" }, "createdAt": { - "shape": "Timestamp" + "shape": "Timestamp", + "documentation": "

The Unix epoch timestamp in seconds for when the resource was created.

" }, "lastUpdatedAt": { - "shape": "Timestamp" + "shape": "Timestamp", + "documentation": "

The Unix epoch timestamp in seconds for when the resource was last updated.

" }, "meshName": { "shape": "ResourceName", @@ -1995,18 +2014,19 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

" + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

" }, "resourceOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's\n the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with Shared Meshes.

" + "documentation": "

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's\n the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with shared meshes.

" }, "routeName": { "shape": "ResourceName", "documentation": "

The name of the route.

" }, "version": { - "shape": "Long" + "shape": "Long", + "documentation": "

The version of the resource. Resources are created at version 1, and this version is incremented each time that they're updated.

" }, "virtualRouterName": { "shape": "ResourceName", @@ -2030,7 +2050,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -2329,7 +2349,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then\n the account that you specify must share the mesh with your account before you can create \n the resource in the service mesh. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then\n the account that you specify must share the mesh with your account before you can create \n the resource in the service mesh. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -2432,6 +2452,17 @@ }, "documentation": "

An object that represents a virtual node service provider.

" }, + "HttpTimeout": { + "type": "structure", + "members": { + "idle": { + "shape": "Duration" + }, + "perRequest": { + "shape": "Duration" + } + } + }, "DeleteVirtualServiceInput": { "type": "structure", "required": [ @@ -2447,7 +2478,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -2528,7 +2559,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -2607,7 +2638,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then\n the account that you specify must share the mesh with your account before you can create \n the resource in the service mesh. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then\n the account that you specify must share the mesh with your account before you can create \n the resource in the service mesh. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -2650,7 +2681,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -2746,7 +2777,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -2775,7 +2806,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -2863,7 +2894,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -2948,7 +2979,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -2985,10 +3016,12 @@ "documentation": "

The full Amazon Resource Name (ARN) for the virtual service.

" }, "createdAt": { - "shape": "Timestamp" + "shape": "Timestamp", + "documentation": "

The Unix epoch timestamp in seconds for when the resource was created.

" }, "lastUpdatedAt": { - "shape": "Timestamp" + "shape": "Timestamp", + "documentation": "

The Unix epoch timestamp in seconds for when the resource was last updated.

" }, "meshName": { "shape": "ResourceName", @@ -2996,14 +3029,15 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

" + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

" }, "resourceOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's\n the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with Shared Meshes.

" + "documentation": "

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's\n the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with shared meshes.

" }, "version": { - "shape": "Long" + "shape": "Long", + "documentation": "

The version of the resource. Resources are created at version 1, and this version is incremented each time that they're updated.

" }, "virtualServiceName": { "shape": "ServiceName", @@ -3012,6 +3046,17 @@ }, "documentation": "

An object that represents a virtual service returned by a list operation.

" }, + "GrpcTimeout": { + "type": "structure", + "members": { + "idle": { + "shape": "Duration" + }, + "perRequest": { + "shape": "Duration" + } + } + }, "VirtualNodeStatus": { "type": "structure", "required": [ @@ -3043,10 +3088,12 @@ "documentation": "

The full Amazon Resource Name (ARN) for the virtual router.

" }, "createdAt": { - "shape": "Timestamp" + "shape": "Timestamp", + "documentation": "

The Unix epoch timestamp in seconds for when the resource was created.

" }, "lastUpdatedAt": { - "shape": "Timestamp" + "shape": "Timestamp", + "documentation": "

The Unix epoch timestamp in seconds for when the resource was last updated.

" }, "meshName": { "shape": "ResourceName", @@ -3054,14 +3101,15 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

" + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

" }, "resourceOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's\n the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with Shared Meshes.

" + "documentation": "

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's\n the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with shared meshes.

" }, "version": { - "shape": "Long" + "shape": "Long", + "documentation": "

The version of the resource. Resources are created at version 1, and this version is incremented each time that they're updated.

" }, "virtualRouterName": { "shape": "ResourceName", @@ -3152,10 +3200,12 @@ "documentation": "

The full Amazon Resource Name (ARN) for the virtual node.

" }, "createdAt": { - "shape": "Timestamp" + "shape": "Timestamp", + "documentation": "

The Unix epoch timestamp in seconds for when the resource was created.

" }, "lastUpdatedAt": { - "shape": "Timestamp" + "shape": "Timestamp", + "documentation": "

The Unix epoch timestamp in seconds for when the resource was last updated.

" }, "meshName": { "shape": "ResourceName", @@ -3163,14 +3213,15 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

" + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

" }, "resourceOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's\n the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with Shared Meshes.

" + "documentation": "

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's\n the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with shared meshes.

" }, "version": { - "shape": "Long" + "shape": "Long", + "documentation": "

The version of the resource. Resources are created at version 1, and this version is incremented each time that they're updated.

" }, "virtualNodeName": { "shape": "ResourceName", @@ -3290,7 +3341,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -3385,6 +3436,9 @@ "action": { "shape": "TcpRouteAction", "documentation": "

The action to take if a match is determined.

" + }, + "timeout": { + "shape": "TcpTimeout" } }, "documentation": "

An object that represents a TCP route type.

" @@ -3415,7 +3469,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -3461,7 +3515,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -3504,7 +3558,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -3649,10 +3703,12 @@ "documentation": "

The full Amazon Resource Name (ARN) of the service mesh.

" }, "createdAt": { - "shape": "Timestamp" + "shape": "Timestamp", + "documentation": "

The Unix epoch timestamp in seconds for when the resource was created.

" }, "lastUpdatedAt": { - "shape": "Timestamp" + "shape": "Timestamp", + "documentation": "

The Unix epoch timestamp in seconds for when the resource was last updated.

" }, "meshName": { "shape": "ResourceName", @@ -3660,14 +3716,15 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

" + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

" }, "resourceOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's\n the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with Shared Meshes.

" + "documentation": "

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's\n the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with shared meshes.

" }, "version": { - "shape": "Long" + "shape": "Long", + "documentation": "

The version of the resource. Resources are created at version 1, and this version is incremented each time that they're updated.

" } }, "documentation": "

An object that represents a service mesh returned by a list operation.

" @@ -3749,7 +3806,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -3789,6 +3846,9 @@ "shape": "PortMapping", "documentation": "

The port mapping information for the listener.

" }, + "timeout": { + "shape": "ListenerTimeout" + }, "tls": { "shape": "ListenerTls", "documentation": "

A reference to an object that represents the Transport Layer Security (TLS) properties for a listener.

" @@ -3814,6 +3874,9 @@ "retryPolicy": { "shape": "GrpcRetryPolicy", "documentation": "

An object that represents a retry policy.

" + }, + "timeout": { + "shape": "GrpcTimeout" } }, "documentation": "

An object that represents a gRPC route type.

" @@ -3942,7 +4005,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -4042,7 +4105,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then\n the account that you specify must share the mesh with your account before you can create \n the resource in the service mesh. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then\n the account that you specify must share the mesh with your account before you can create \n the resource in the service mesh. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -4089,6 +4152,14 @@ "String": { "type": "string" }, + "TcpTimeout": { + "type": "structure", + "members": { + "idle": { + "shape": "Duration" + } + } + }, "HttpScheme": { "type": "string", "enum": [ @@ -4118,7 +4189,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -4159,6 +4230,9 @@ "retryPolicy": { "shape": "HttpRetryPolicy", "documentation": "

An object that represents a retry policy.

" + }, + "timeout": { + "shape": "HttpTimeout" } }, "documentation": "

An object that represents an HTTP or HTTP/2 route type.

" @@ -4177,7 +4251,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" } diff --git a/botocore/data/athena/2017-05-18/paginators-1.json b/botocore/data/athena/2017-05-18/paginators-1.json index 41bbb2da..3b126bab 100644 --- a/botocore/data/athena/2017-05-18/paginators-1.json +++ b/botocore/data/athena/2017-05-18/paginators-1.json @@ -21,6 +21,30 @@ "ResultSet.ResultSetMetadata", "UpdateCount" ] + }, + "ListDataCatalogs": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "DataCatalogsSummary" + }, + "ListDatabases": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "DatabaseList" + }, + "ListTableMetadata": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "TableMetadataList" + }, + "ListTagsForResource": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Tags" } } } diff --git a/botocore/data/athena/2017-05-18/service-2.json b/botocore/data/athena/2017-05-18/service-2.json index 35988536..9a2fdf81 100644 --- a/botocore/data/athena/2017-05-18/service-2.json +++ b/botocore/data/athena/2017-05-18/service-2.json @@ -40,6 +40,20 @@ ], "documentation":"

Returns the details of a single query execution or a list of up to 50 query executions, which you provide as an array of query execution ID strings. Requires you to have access to the workgroup in which the queries ran. To get a list of query execution IDs, use ListQueryExecutionsInput$WorkGroup. Query executions differ from named (saved) queries. Use BatchGetNamedQueryInput to get details about named queries.

" }, + "CreateDataCatalog":{ + "name":"CreateDataCatalog", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDataCatalogInput"}, + "output":{"shape":"CreateDataCatalogOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Creates (registers) a data catalog with the specified name and properties. Catalogs created are visible to all users of the same AWS account.

" + }, "CreateNamedQuery":{ "name":"CreateNamedQuery", "http":{ @@ -69,6 +83,20 @@ ], "documentation":"

Creates a workgroup with the specified name.

" }, + "DeleteDataCatalog":{ + "name":"DeleteDataCatalog", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDataCatalogInput"}, + "output":{"shape":"DeleteDataCatalogOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Deletes a data catalog.

" + }, "DeleteNamedQuery":{ "name":"DeleteNamedQuery", "http":{ @@ -99,6 +127,35 @@ "documentation":"

Deletes the workgroup with the specified name. The primary workgroup cannot be deleted.

", "idempotent":true }, + "GetDataCatalog":{ + "name":"GetDataCatalog", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDataCatalogInput"}, + "output":{"shape":"GetDataCatalogOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Returns the specified data catalog.

" + }, + "GetDatabase":{ + "name":"GetDatabase", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDatabaseInput"}, + "output":{"shape":"GetDatabaseOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"}, + {"shape":"MetadataException"} + ], + "documentation":"

Returns a database object for the specfied database and data catalog.

" + }, "GetNamedQuery":{ "name":"GetNamedQuery", "http":{ @@ -141,6 +198,21 @@ ], "documentation":"

Streams the results of a single query execution specified by QueryExecutionId from the Athena query results location in Amazon S3. For more information, see Query Results in the Amazon Athena User Guide. This request does not execute the query but returns results. Use StartQueryExecution to run a query.

To stream query results successfully, the IAM principal with permission to call GetQueryResults also must have permissions to the Amazon S3 GetObject action for the Athena query results location.

IAM principals with permission to the Amazon S3 GetObject action for the query results location are able to retrieve query results from Amazon S3 even if permission to the GetQueryResults action is denied. To restrict user or role access, ensure that Amazon S3 permissions to the Athena query location are denied.

" }, + "GetTableMetadata":{ + "name":"GetTableMetadata", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetTableMetadataInput"}, + "output":{"shape":"GetTableMetadataOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"}, + {"shape":"MetadataException"} + ], + "documentation":"

Returns table metadata for the specified catalog, database, and table.

" + }, "GetWorkGroup":{ "name":"GetWorkGroup", "http":{ @@ -155,6 +227,35 @@ ], "documentation":"

Returns information about the workgroup with the specified name.

" }, + "ListDataCatalogs":{ + "name":"ListDataCatalogs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDataCatalogsInput"}, + "output":{"shape":"ListDataCatalogsOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Lists the data catalogs in the current AWS account.

" + }, + "ListDatabases":{ + "name":"ListDatabases", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDatabasesInput"}, + "output":{"shape":"ListDatabasesOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"}, + {"shape":"MetadataException"} + ], + "documentation":"

Lists the databases in the specified data catalog.

" + }, "ListNamedQueries":{ "name":"ListNamedQueries", "http":{ @@ -167,7 +268,7 @@ {"shape":"InternalServerException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Provides a list of available query IDs only for queries saved in the specified workgroup. Requires that you have access to the workgroup. If a workgroup is not specified, lists the saved queries for the primary workgroup.

For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

" + "documentation":"

Provides a list of available query IDs only for queries saved in the specified workgroup. Requires that you have access to the specified workgroup. If a workgroup is not specified, lists the saved queries for the primary workgroup.

For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

" }, "ListQueryExecutions":{ "name":"ListQueryExecutions", @@ -183,6 +284,21 @@ ], "documentation":"

Provides a list of available query execution IDs for the queries in the specified workgroup. If a workgroup is not specified, returns a list of query execution IDs for the primary workgroup. Requires you to have access to the workgroup in which the queries ran.

For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

" }, + "ListTableMetadata":{ + "name":"ListTableMetadata", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTableMetadataInput"}, + "output":{"shape":"ListTableMetadataOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"}, + {"shape":"MetadataException"} + ], + "documentation":"

Lists the metadata for the tables in the specified data catalog database.

" + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -196,7 +312,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Lists the tags associated with this workgroup.

" + "documentation":"

Lists the tags associated with an Athena workgroup or data catalog resource.

" }, "ListWorkGroups":{ "name":"ListWorkGroups", @@ -225,7 +341,7 @@ {"shape":"InvalidRequestException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Runs the SQL query statements contained in the Query. Requires you to have access to the workgroup in which the query ran.

For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

", + "documentation":"

Runs the SQL query statements contained in the Query. Requires you to have access to the workgroup in which the query ran. Running queries against an external catalog requires GetDataCatalog permission to the catalog. For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

", "idempotent":true }, "StopQueryExecution":{ @@ -256,7 +372,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Adds one or more tags to the resource, such as a workgroup. A tag is a label that you assign to an AWS Athena resource (a workgroup). Each tag consists of a key and an optional value, both of which you define. Tags enable you to categorize resources (workgroups) in Athena, for example, by purpose, owner, or environment. Use a consistent set of tag keys to make it easier to search and filter workgroups in your account. For best practices, see AWS Tagging Strategies. The key length is from 1 (minimum) to 128 (maximum) Unicode characters in UTF-8. The tag value length is from 0 (minimum) to 256 (maximum) Unicode characters in UTF-8. You can use letters and numbers representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Tag keys must be unique per resource. If you specify more than one, separate them by commas.

" + "documentation":"

Adds one or more tags to an Athena resource. A tag is a label that you assign to a resource. In Athena, a resource can be a workgroup or data catalog. Each tag consists of a key and an optional value, both of which you define. For example, you can use tags to categorize Athena workgroups or data catalogs by purpose, owner, or environment. Use a consistent set of tag keys to make it easier to search and filter workgroups or data catalogs in your account. For best practices, see Tagging Best Practices. Tag keys can be from 1 to 128 UTF-8 Unicode characters, and tag values can be from 0 to 256 UTF-8 Unicode characters. Tags can use letters and numbers representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Tag keys must be unique per resource. If you specify more than one tag, separate them by commas.

" }, "UntagResource":{ "name":"UntagResource", @@ -271,7 +387,21 @@ {"shape":"InvalidRequestException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Removes one or more tags from the workgroup resource. Takes as an input a list of TagKey Strings separated by commas, and removes their tags at the same time.

" + "documentation":"

Removes one or more tags from a data catalog or workgroup resource.

" + }, + "UpdateDataCatalog":{ + "name":"UpdateDataCatalog", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateDataCatalogInput"}, + "output":{"shape":"UpdateDataCatalogOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Updates the data catalog that has the specified name.

" }, "UpdateWorkGroup":{ "name":"UpdateWorkGroup", @@ -346,6 +476,31 @@ "type":"long", "min":10000000 }, + "CatalogNameString":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + }, + "Column":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name of the column.

" + }, + "Type":{ + "shape":"TypeString", + "documentation":"

The data type of the column.

" + }, + "Comment":{ + "shape":"CommentString", + "documentation":"

Optional information about the column.

" + } + }, + "documentation":"

Contains metadata for a column in a table.

" + }, "ColumnInfo":{ "type":"structure", "required":[ @@ -400,6 +555,10 @@ "type":"list", "member":{"shape":"ColumnInfo"} }, + "ColumnList":{ + "type":"list", + "member":{"shape":"Column"} + }, "ColumnNullable":{ "type":"string", "enum":[ @@ -408,6 +567,46 @@ "UNKNOWN" ] }, + "CommentString":{ + "type":"string", + "max":255, + "min":0, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + }, + "CreateDataCatalogInput":{ + "type":"structure", + "required":[ + "Name", + "Type" + ], + "members":{ + "Name":{ + "shape":"CatalogNameString", + "documentation":"

The name of the data catalog to create. The catalog name must be unique for the AWS account and can use a maximum of 128 alphanumeric, underscore, at sign, or hyphen characters.

" + }, + "Type":{ + "shape":"DataCatalogType", + "documentation":"

The type of data catalog to create: LAMBDA for a federated catalog, GLUE for AWS Glue Catalog, or HIVE for an external hive metastore.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

A description of the data catalog to be created.

" + }, + "Parameters":{ + "shape":"ParametersMap", + "documentation":"

Specifies the Lambda function or functions to use for creating the data catalog. This is a mapping whose values depend on the catalog type.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of comma separated tags to add to the data catalog that is created.

" + } + } + }, + "CreateDataCatalogOutput":{ + "type":"structure", + "members":{ + } + }, "CreateNamedQueryInput":{ "type":"structure", "required":[ @@ -470,7 +669,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

One or more tags, separated by commas, that you want to attach to the workgroup as you create it.

" + "documentation":"

A list of comma separated tags to add to the workgroup that is created.

" } } }, @@ -479,6 +678,81 @@ "members":{ } }, + "DataCatalog":{ + "type":"structure", + "required":[ + "Name", + "Type" + ], + "members":{ + "Name":{ + "shape":"CatalogNameString", + "documentation":"

The name of the data catalog. The catalog name must be unique for the AWS account and can use a maximum of 128 alphanumeric, underscore, at sign, or hyphen characters.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

An optional description of the data catalog.

" + }, + "Type":{ + "shape":"DataCatalogType", + "documentation":"

The type of data catalog: LAMBDA for a federated catalog, GLUE for AWS Glue Catalog, or HIVE for an external hive metastore.

" + }, + "Parameters":{ + "shape":"ParametersMap", + "documentation":"

Specifies the Lambda function or functions to use for the data catalog. This is a mapping whose values depend on the catalog type.

" + } + }, + "documentation":"

Contains information about a data catalog in an AWS account.

" + }, + "DataCatalogSummary":{ + "type":"structure", + "members":{ + "CatalogName":{ + "shape":"CatalogNameString", + "documentation":"

The name of the data catalog.

" + }, + "Type":{ + "shape":"DataCatalogType", + "documentation":"

The data catalog type.

" + } + }, + "documentation":"

The summary information for the data catalog, which includes its name and type.

" + }, + "DataCatalogSummaryList":{ + "type":"list", + "member":{"shape":"DataCatalogSummary"} + }, + "DataCatalogType":{ + "type":"string", + "enum":[ + "LAMBDA", + "GLUE", + "HIVE" + ] + }, + "Database":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name of the database.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

An optional description of the database.

" + }, + "Parameters":{ + "shape":"ParametersMap", + "documentation":"

A set of custom key/value pairs.

" + } + }, + "documentation":"

Contains metadata information for a database in a data catalog.

" + }, + "DatabaseList":{ + "type":"list", + "member":{"shape":"Database"} + }, "DatabaseString":{ "type":"string", "max":255, @@ -495,6 +769,21 @@ }, "documentation":"

A piece of data (a field in the table).

" }, + "DeleteDataCatalogInput":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"CatalogNameString", + "documentation":"

The name of the data catalog to delete.

" + } + } + }, + "DeleteDataCatalogOutput":{ + "type":"structure", + "members":{ + } + }, "DeleteNamedQueryInput":{ "type":"structure", "required":["NamedQueryId"], @@ -565,6 +854,56 @@ "min":1 }, "ErrorMessage":{"type":"string"}, + "ExpressionString":{ + "type":"string", + "max":256, + "min":0 + }, + "GetDataCatalogInput":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"CatalogNameString", + "documentation":"

The name of the data catalog to return.

" + } + } + }, + "GetDataCatalogOutput":{ + "type":"structure", + "members":{ + "DataCatalog":{ + "shape":"DataCatalog", + "documentation":"

The data catalog returned.

" + } + } + }, + "GetDatabaseInput":{ + "type":"structure", + "required":[ + "CatalogName", + "DatabaseName" + ], + "members":{ + "CatalogName":{ + "shape":"CatalogNameString", + "documentation":"

The name of the data catalog that contains the database to return.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the database to return.

" + } + } + }, + "GetDatabaseOutput":{ + "type":"structure", + "members":{ + "Database":{ + "shape":"Database", + "documentation":"

The database returned.

" + } + } + }, "GetNamedQueryInput":{ "type":"structure", "required":["NamedQueryId"], @@ -613,7 +952,7 @@ }, "NextToken":{ "shape":"Token", - "documentation":"

The token that specifies where to start pagination if a previous request was truncated.

" + "documentation":"

A token generated by the Athena service that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

" }, "MaxResults":{ "shape":"MaxQueryResults", @@ -634,7 +973,38 @@ }, "NextToken":{ "shape":"Token", - "documentation":"

A token to be used by the next request if this request is truncated.

" + "documentation":"

A token generated by the Athena service that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

" + } + } + }, + "GetTableMetadataInput":{ + "type":"structure", + "required":[ + "CatalogName", + "DatabaseName", + "TableName" + ], + "members":{ + "CatalogName":{ + "shape":"CatalogNameString", + "documentation":"

The name of the data catalog that contains the database and table metadata to return.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the database that contains the table metadata to return.

" + }, + "TableName":{ + "shape":"NameString", + "documentation":"

The name of the table for which metadata is returned.

" + } + } + }, + "GetTableMetadataOutput":{ + "type":"structure", + "members":{ + "TableMetadata":{ + "shape":"TableMetadata", + "documentation":"

An object that contains table metadata.

" } } }, @@ -681,12 +1051,75 @@ "documentation":"

Indicates that something is wrong with the input to the request. For example, a required parameter may be missing or out of range.

", "exception":true }, + "KeyString":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + }, + "ListDataCatalogsInput":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"Token", + "documentation":"

A token generated by the Athena service that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

" + }, + "MaxResults":{ + "shape":"MaxDataCatalogsCount", + "documentation":"

Specifies the maximum number of data catalogs to return.

" + } + } + }, + "ListDataCatalogsOutput":{ + "type":"structure", + "members":{ + "DataCatalogsSummary":{ + "shape":"DataCatalogSummaryList", + "documentation":"

A summary list of data catalogs.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A token generated by the Athena service that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

" + } + } + }, + "ListDatabasesInput":{ + "type":"structure", + "required":["CatalogName"], + "members":{ + "CatalogName":{ + "shape":"CatalogNameString", + "documentation":"

The name of the data catalog that contains the databases to return.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A token generated by the Athena service that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

" + }, + "MaxResults":{ + "shape":"MaxDatabasesCount", + "documentation":"

Specifies the maximum number of results to return.

" + } + } + }, + "ListDatabasesOutput":{ + "type":"structure", + "members":{ + "DatabaseList":{ + "shape":"DatabaseList", + "documentation":"

A list of databases from a data catalog.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A token generated by the Athena service that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

" + } + } + }, "ListNamedQueriesInput":{ "type":"structure", "members":{ "NextToken":{ "shape":"Token", - "documentation":"

The token that specifies where to start pagination if a previous request was truncated.

" + "documentation":"

A token generated by the Athena service that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

" }, "MaxResults":{ "shape":"MaxNamedQueriesCount", @@ -694,7 +1127,7 @@ }, "WorkGroup":{ "shape":"WorkGroupName", - "documentation":"

The name of the workgroup from which the named queries are returned. If a workgroup is not specified, the saved queries for the primary workgroup are returned.

" + "documentation":"

The name of the workgroup from which the named queries are being returned. If a workgroup is not specified, the saved queries for the primary workgroup are returned.

" } } }, @@ -707,7 +1140,7 @@ }, "NextToken":{ "shape":"Token", - "documentation":"

A token to be used by the next request if this request is truncated.

" + "documentation":"

A token generated by the Athena service that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

" } } }, @@ -716,7 +1149,7 @@ "members":{ "NextToken":{ "shape":"Token", - "documentation":"

The token that specifies where to start pagination if a previous request was truncated.

" + "documentation":"

A token generated by the Athena service that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

" }, "MaxResults":{ "shape":"MaxQueryExecutionsCount", @@ -724,7 +1157,7 @@ }, "WorkGroup":{ "shape":"WorkGroupName", - "documentation":"

The name of the workgroup from which queries are returned. If a workgroup is not specified, a list of available query execution IDs for the queries in the primary workgroup is returned.

" + "documentation":"

The name of the workgroup from which queries are being returned. If a workgroup is not specified, a list of available query execution IDs for the queries in the primary workgroup is returned.

" } } }, @@ -741,21 +1174,63 @@ } } }, + "ListTableMetadataInput":{ + "type":"structure", + "required":[ + "CatalogName", + "DatabaseName" + ], + "members":{ + "CatalogName":{ + "shape":"CatalogNameString", + "documentation":"

The name of the data catalog for which table metadata should be returned.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the database for which table metadata should be returned.

" + }, + "Expression":{ + "shape":"ExpressionString", + "documentation":"

A regex filter that pattern-matches table names. If no expression is supplied, metadata for all tables are listed.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A token generated by the Athena service that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

" + }, + "MaxResults":{ + "shape":"MaxTableMetadataCount", + "documentation":"

Specifies the maximum number of results to return.

" + } + } + }, + "ListTableMetadataOutput":{ + "type":"structure", + "members":{ + "TableMetadataList":{ + "shape":"TableMetadataList", + "documentation":"

A list of table metadata.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A token generated by the Athena service that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

" + } + } + }, "ListTagsForResourceInput":{ "type":"structure", "required":["ResourceARN"], "members":{ "ResourceARN":{ "shape":"AmazonResourceName", - "documentation":"

Lists the tags for the workgroup resource with the specified ARN.

" + "documentation":"

Lists the tags for the resource with the specified ARN.

" }, "NextToken":{ "shape":"Token", - "documentation":"

The token for the next set of results, or null if there are no additional results for this request, where the request lists the tags for the workgroup resource with the specified ARN.

" + "documentation":"

The token for the next set of results, or null if there are no additional results for this request, where the request lists the tags for the resource with the specified ARN.

" }, "MaxResults":{ "shape":"MaxTagsCount", - "documentation":"

The maximum number of results to be returned per request that lists the tags for the workgroup resource.

" + "documentation":"

The maximum number of results to be returned per request that lists the tags for the resource.

" } } }, @@ -764,7 +1239,7 @@ "members":{ "Tags":{ "shape":"TagList", - "documentation":"

The list of tags associated with this workgroup.

" + "documentation":"

The list of tags associated with the specified resource.

" }, "NextToken":{ "shape":"Token", @@ -777,7 +1252,7 @@ "members":{ "NextToken":{ "shape":"Token", - "documentation":"

A token to be used by the next request if this request is truncated.

" + "documentation":"

A token generated by the Athena service that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

" }, "MaxResults":{ "shape":"MaxWorkGroupsCount", @@ -794,11 +1269,23 @@ }, "NextToken":{ "shape":"Token", - "documentation":"

A token to be used by the next request if this request is truncated.

" + "documentation":"

A token generated by the Athena service that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

" } } }, "Long":{"type":"long"}, + "MaxDataCatalogsCount":{ + "type":"integer", + "box":true, + "max":50, + "min":2 + }, + "MaxDatabasesCount":{ + "type":"integer", + "box":true, + "max":50, + "min":1 + }, "MaxNamedQueriesCount":{ "type":"integer", "box":true, @@ -817,6 +1304,12 @@ "max":1000, "min":1 }, + "MaxTableMetadataCount":{ + "type":"integer", + "box":true, + "max":50, + "min":1 + }, "MaxTagsCount":{ "type":"integer", "box":true, @@ -828,6 +1321,14 @@ "max":50, "min":1 }, + "MetadataException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

An exception that Athena received when it called a custom metastore. Occurs if the error is not caused by user input (InvalidRequestException) or from the Athena platform (InternalServerException). For example, if a user-created Lambda function is missing permissions, the Lambda 4XX exception is returned in a MetadataException.

", + "exception":true + }, "NameString":{ "type":"string", "max":128, @@ -879,6 +1380,15 @@ "type":"list", "member":{"shape":"NamedQuery"} }, + "ParametersMap":{ + "type":"map", + "key":{"shape":"KeyString"}, + "value":{"shape":"ParametersMapValue"} + }, + "ParametersMapValue":{ + "type":"string", + "max":51200 + }, "QueryExecution":{ "type":"structure", "members":{ @@ -922,10 +1432,14 @@ "members":{ "Database":{ "shape":"DatabaseString", - "documentation":"

The name of the database.

" + "documentation":"

The name of the database used in the query execution.

" + }, + "Catalog":{ + "shape":"CatalogNameString", + "documentation":"

The name of the data catalog used in the query execution.

" } }, - "documentation":"

The database in which the query execution occurs.

" + "documentation":"

The database and data catalog context in which the query execution occurs.

" }, "QueryExecutionId":{"type":"string"}, "QueryExecutionIdList":{ @@ -987,7 +1501,7 @@ "members":{ "State":{ "shape":"QueryExecutionState", - "documentation":"

The state of query execution. QUEUED indicates that the query has been submitted to the service, and Athena will execute the query as soon as resources are available. RUNNING indicates that the query is in execution phase. SUCCEEDED indicates that the query completed without errors. FAILED indicates that the query experienced an error and did not complete processing. CANCELLED indicates that a user input interrupted query execution.

" + "documentation":"

The state of query execution. QUEUED indicates that the query has been submitted to the service, and Athena will execute the query as soon as resources are available. RUNNING indicates that the query is in execution phase. SUCCEEDED indicates that the query completed without errors. FAILED indicates that the query experienced an error and did not complete processing. CANCELLED indicates that a user input interrupted query execution.

Athena automatically retries your queries in cases of certain transient errors. As a result, you may see the query state transition from RUNNING or FAILED to QUEUED.

" }, "StateChangeReason":{ "shape":"String", @@ -1066,7 +1580,7 @@ "documentation":"

The metadata that describes the column structure and data types of a table of query results.

" } }, - "documentation":"

The metadata and rows that comprise a query result set. The metadata describes the column structure and data types.

" + "documentation":"

The metadata and rows that comprise a query result set. The metadata describes the column structure and data types. To return a ResultSet object, use GetQueryResults.

" }, "ResultSetMetadata":{ "type":"structure", @@ -1076,7 +1590,7 @@ "documentation":"

Information about the columns returned in a query result metadata.

" } }, - "documentation":"

The metadata that describes the column structure and data types of a table of query results.

" + "documentation":"

The metadata that describes the column structure and data types of a table of query results. To return a ResultSetMetadata object, use GetQueryResults.

" }, "Row":{ "type":"structure", @@ -1153,6 +1667,49 @@ } }, "String":{"type":"string"}, + "TableMetadata":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name of the table.

" + }, + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

The time that the table was created.

" + }, + "LastAccessTime":{ + "shape":"Timestamp", + "documentation":"

The last time the table was accessed.

" + }, + "TableType":{ + "shape":"TableTypeString", + "documentation":"

The type of table. In Athena, only EXTERNAL_TABLE is supported.

" + }, + "Columns":{ + "shape":"ColumnList", + "documentation":"

A list of the columns in the table.

" + }, + "PartitionKeys":{ + "shape":"ColumnList", + "documentation":"

A list of the partition keys in the table.

" + }, + "Parameters":{ + "shape":"ParametersMap", + "documentation":"

A set of custom key/value pairs for table properties.

" + } + }, + "documentation":"

Contains metadata for a table.

" + }, + "TableMetadataList":{ + "type":"list", + "member":{"shape":"TableMetadata"} + }, + "TableTypeString":{ + "type":"string", + "max":255 + }, "Tag":{ "type":"structure", "members":{ @@ -1165,7 +1722,7 @@ "documentation":"

A tag value. The tag value length is from 0 to 256 Unicode characters in UTF-8. You can use letters and numbers representable in UTF-8, and the following characters: + - = . _ : / @. Tag values are case-sensitive.

" } }, - "documentation":"

A tag that you can add to a resource. A tag is a label that you assign to an AWS Athena resource (a workgroup). Each tag consists of a key and an optional value, both of which you define. Tags enable you to categorize workgroups in Athena, for example, by purpose, owner, or environment. Use a consistent set of tag keys to make it easier to search and filter workgroups in your account. The maximum tag key length is 128 Unicode characters in UTF-8. The maximum tag value length is 256 Unicode characters in UTF-8. You can use letters and numbers representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Tag keys must be unique per resource.

" + "documentation":"

A label that you assign to a resource. In Athena, a resource can be a workgroup or data catalog. Each tag consists of a key and an optional value, both of which you define. For example, you can use tags to categorize Athena workgroups or data catalogs by purpose, owner, or environment. Use a consistent set of tag keys to make it easier to search and filter workgroups or data catalogs in your account. For best practices, see Tagging Best Practices. Tag keys can be from 1 to 128 UTF-8 Unicode characters, and tag values can be from 0 to 256 UTF-8 Unicode characters. Tags can use letters and numbers representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Tag keys must be unique per resource. If you specify more than one tag, separate them by commas.

" }, "TagKey":{ "type":"string", @@ -1189,11 +1746,11 @@ "members":{ "ResourceARN":{ "shape":"AmazonResourceName", - "documentation":"

Requests that one or more tags are added to the resource (such as a workgroup) for the specified ARN.

" + "documentation":"

Specifies the ARN of the Athena resource (workgroup or data catalog) to which tags are to be added.

" }, "Tags":{ "shape":"TagList", - "documentation":"

One or more tags, separated by commas, to be added to the resource, such as a workgroup.

" + "documentation":"

A collection of one or more tags, separated by commas, to be added to an Athena workgroup or data catalog resource.

" } } }, @@ -1212,6 +1769,7 @@ "documentation":"

The reason for the query throttling, for example, when it exceeds the concurrent query limit.

", "enum":["CONCURRENT_QUERY_LIMIT_EXCEEDED"] }, + "Timestamp":{"type":"timestamp"}, "Token":{ "type":"string", "max":1024, @@ -1226,6 +1784,12 @@ "documentation":"

Indicates that the request was throttled.

", "exception":true }, + "TypeString":{ + "type":"string", + "max":4096, + "min":0, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + }, "UnprocessedNamedQueryId":{ "type":"structure", "members":{ @@ -1279,11 +1843,11 @@ "members":{ "ResourceARN":{ "shape":"AmazonResourceName", - "documentation":"

Removes one or more tags from the workgroup resource for the specified ARN.

" + "documentation":"

Specifies the ARN of the resource from which tags are to be removed.

" }, "TagKeys":{ "shape":"TagKeyList", - "documentation":"

Removes the tags associated with one or more tag keys from the workgroup resource.

" + "documentation":"

A comma-separated list of one or more tag keys whose tags are to be removed from the specified resource.

" } } }, @@ -1292,6 +1856,36 @@ "members":{ } }, + "UpdateDataCatalogInput":{ + "type":"structure", + "required":[ + "Name", + "Type" + ], + "members":{ + "Name":{ + "shape":"CatalogNameString", + "documentation":"

The name of the data catalog to update. The catalog name must be unique for the AWS account and can use a maximum of 128 alphanumeric, underscore, at sign, or hyphen characters.

" + }, + "Type":{ + "shape":"DataCatalogType", + "documentation":"

Specifies the type of data catalog to update. Specify LAMBDA for a federated catalog, GLUE for AWS Glue Catalog, or HIVE for an external hive metastore.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

New or modified text that describes the data catalog.

" + }, + "Parameters":{ + "shape":"ParametersMap", + "documentation":"

Specifies the Lambda function or functions to use for updating the data catalog. This is a mapping whose values depend on the catalog type.

" + } + } + }, + "UpdateDataCatalogOutput":{ + "type":"structure", + "members":{ + } + }, "UpdateWorkGroupInput":{ "type":"structure", "required":["WorkGroup"], @@ -1409,7 +2003,7 @@ }, "WorkGroupName":{ "type":"string", - "pattern":"[a-zA-z0-9._-]{1,128}" + "pattern":"[a-zA-Z0-9._-]{1,128}" }, "WorkGroupState":{ "type":"string", diff --git a/botocore/data/autoscaling/2011-01-01/service-2.json b/botocore/data/autoscaling/2011-01-01/service-2.json index 20d729d0..63e8b509 100644 --- a/botocore/data/autoscaling/2011-01-01/service-2.json +++ b/botocore/data/autoscaling/2011-01-01/service-2.json @@ -92,6 +92,24 @@ ], "documentation":"

Creates or updates one or more scheduled scaling actions for an Auto Scaling group. If you leave a parameter unspecified when updating a scheduled scaling action, the corresponding value remains unchanged.

" }, + "CancelInstanceRefresh":{ + "name":"CancelInstanceRefresh", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelInstanceRefreshType"}, + "output":{ + "shape":"CancelInstanceRefreshAnswer", + "resultWrapper":"CancelInstanceRefreshResult" + }, + "errors":[ + {"shape":"LimitExceededFault"}, + {"shape":"ResourceContentionFault"}, + {"shape":"ActiveInstanceRefreshNotFoundFault"} + ], + "documentation":"

Cancels an instance refresh operation in progress. Cancellation does not roll back any replacements that have already been completed, but it prevents new replacements from being started.

For more information, see Replacing Auto Scaling Instances Based on an Instance Refresh.

" + }, "CompleteLifecycleAction":{ "name":"CompleteLifecycleAction", "http":{ @@ -324,6 +342,23 @@ ], "documentation":"

Describes the notification types that are supported by Amazon EC2 Auto Scaling.

" }, + "DescribeInstanceRefreshes":{ + "name":"DescribeInstanceRefreshes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInstanceRefreshesType"}, + "output":{ + "shape":"DescribeInstanceRefreshesAnswer", + "resultWrapper":"DescribeInstanceRefreshesResult" + }, + "errors":[ + {"shape":"InvalidNextToken"}, + {"shape":"ResourceContentionFault"} + ], + "documentation":"

Describes one or more instance refreshes.

You can determine the status of a request by looking at the Status parameter. The following are the possible statuses:

" + }, "DescribeLaunchConfigurations":{ "name":"DescribeLaunchConfigurations", "http":{ @@ -786,6 +821,24 @@ ], "documentation":"

Updates the instance protection settings of the specified instances.

For more information about preventing instances that are part of an Auto Scaling group from terminating on scale in, see Instance Protection in the Amazon EC2 Auto Scaling User Guide.

" }, + "StartInstanceRefresh":{ + "name":"StartInstanceRefresh", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartInstanceRefreshType"}, + "output":{ + "shape":"StartInstanceRefreshAnswer", + "resultWrapper":"StartInstanceRefreshResult" + }, + "errors":[ + {"shape":"LimitExceededFault"}, + {"shape":"ResourceContentionFault"}, + {"shape":"InstanceRefreshInProgressFault"} + ], + "documentation":"

Starts a new instance refresh operation, which triggers a rolling replacement of all previously launched instances in the Auto Scaling group with a new group of instances.

If successful, this call creates a new instance refresh request with a unique ID that you can use to track its progress. To query its status, call the DescribeInstanceRefreshes API. To describe the instance refreshes that have already run, call the DescribeInstanceRefreshes API. To cancel an active instance refresh operation, use the CancelInstanceRefresh API.

For more information, see Replacing Auto Scaling Instances Based on an Instance Refresh.

" + }, "SuspendProcesses":{ "name":"SuspendProcesses", "http":{ @@ -832,6 +885,19 @@ } }, "shapes":{ + "ActiveInstanceRefreshNotFoundFault":{ + "type":"structure", + "members":{ + "message":{"shape":"XmlStringMaxLen255"} + }, + "documentation":"

The request failed because an active instance refresh for the specified Auto Scaling group was not found.

", + "error":{ + "code":"ActiveInstanceRefreshNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "Activities":{ "type":"list", "member":{"shape":"Activity"} @@ -1367,6 +1433,25 @@ "type":"list", "member":{"shape":"BlockDeviceMapping"} }, + "CancelInstanceRefreshAnswer":{ + "type":"structure", + "members":{ + "InstanceRefreshId":{ + "shape":"XmlStringMaxLen255", + "documentation":"

The instance refresh ID.

" + } + } + }, + "CancelInstanceRefreshType":{ + "type":"structure", + "required":["AutoScalingGroupName"], + "members":{ + "AutoScalingGroupName":{ + "shape":"XmlStringMaxLen255", + "documentation":"

The name of the Auto Scaling group.

" + } + } + }, "ClassicLinkVPCSecurityGroups":{ "type":"list", "member":{"shape":"XmlStringMaxLen255"} @@ -1774,6 +1859,41 @@ } } }, + "DescribeInstanceRefreshesAnswer":{ + "type":"structure", + "members":{ + "InstanceRefreshes":{ + "shape":"InstanceRefreshes", + "documentation":"

The instance refreshes for the specified group.

For more information, see Replacing Auto Scaling Instances Based on an Instance Refresh.

" + }, + "NextToken":{ + "shape":"XmlString", + "documentation":"

A string that indicates that the response contains more items than can be returned in a single response. To receive additional items, specify this string for the NextToken value when requesting the next set of items. This value is null when there are no more items to return.

" + } + } + }, + "DescribeInstanceRefreshesType":{ + "type":"structure", + "required":["AutoScalingGroupName"], + "members":{ + "AutoScalingGroupName":{ + "shape":"XmlStringMaxLen255", + "documentation":"

The name of the Auto Scaling group.

" + }, + "InstanceRefreshIds":{ + "shape":"InstanceRefreshIds", + "documentation":"

One or more instance refresh IDs.

" + }, + "NextToken":{ + "shape":"XmlString", + "documentation":"

The token for the next set of items to return. (You received this token from a previous call.)

" + }, + "MaxRecords":{ + "shape":"MaxRecords", + "documentation":"

The maximum number of items to return with this call. The default value is 50 and the maximum value is 100.

" + } + } + }, "DescribeLifecycleHookTypesAnswer":{ "type":"structure", "members":{ @@ -2362,6 +2482,76 @@ "documentation":"

Describes whether detailed monitoring is enabled for the Auto Scaling instances.

" }, "InstanceProtected":{"type":"boolean"}, + "InstanceRefresh":{ + "type":"structure", + "members":{ + "InstanceRefreshId":{ + "shape":"XmlStringMaxLen255", + "documentation":"

The instance refresh ID.

" + }, + "AutoScalingGroupName":{ + "shape":"XmlStringMaxLen255", + "documentation":"

The name of the Auto Scaling group.

" + }, + "Status":{ + "shape":"InstanceRefreshStatus", + "documentation":"

The current status for the instance refresh operation:

" + }, + "StatusReason":{ + "shape":"XmlStringMaxLen1023", + "documentation":"

Provides more details about the current status of the instance refresh.

" + }, + "StartTime":{ + "shape":"TimestampType", + "documentation":"

The date and time at which the instance refresh began.

" + }, + "EndTime":{ + "shape":"TimestampType", + "documentation":"

The date and time at which the instance refresh ended.

" + }, + "PercentageComplete":{ + "shape":"IntPercent", + "documentation":"

The percentage of the instance refresh that is complete. For each instance replacement, Amazon EC2 Auto Scaling tracks the instance's health status and warm-up time. When the instance's health status changes to healthy and the specified warm-up time passes, the instance is considered updated and added to the percentage complete.

" + }, + "InstancesToUpdate":{ + "shape":"InstancesToUpdate", + "documentation":"

The number of instances remaining to update before the instance refresh is complete.

" + } + }, + "documentation":"

Describes an instance refresh for an Auto Scaling group.

" + }, + "InstanceRefreshIds":{ + "type":"list", + "member":{"shape":"XmlStringMaxLen255"} + }, + "InstanceRefreshInProgressFault":{ + "type":"structure", + "members":{ + "message":{"shape":"XmlStringMaxLen255"} + }, + "documentation":"

The request failed because an active instance refresh operation already exists for the specified Auto Scaling group.

", + "error":{ + "code":"InstanceRefreshInProgress", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InstanceRefreshStatus":{ + "type":"string", + "enum":[ + "Pending", + "InProgress", + "Successful", + "Failed", + "Cancelling", + "Cancelled" + ] + }, + "InstanceRefreshes":{ + "type":"list", + "member":{"shape":"InstanceRefresh"} + }, "Instances":{ "type":"list", "member":{"shape":"Instance"} @@ -2394,7 +2584,16 @@ "documentation":"

The maximum price per unit hour that you are willing to pay for a Spot Instance. If you leave the value of this parameter blank (which is the default), the maximum Spot price is set at the On-Demand price.

To remove a value that you previously set, include the parameter but leave the value blank.

" } }, - "documentation":"

Describes an instances distribution for an Auto Scaling group with MixedInstancesPolicy.

The instances distribution specifies the distribution of On-Demand Instances and Spot Instances, the maximum price to pay for Spot Instances, and how the Auto Scaling group allocates instance types to fulfill On-Demand and Spot capacity.

When you update SpotAllocationStrategy, SpotInstancePools, or SpotMaxPrice, this update action does not deploy any changes across the running Amazon EC2 instances in the group. Your existing Spot Instances continue to run as long as the maximum price for those instances is higher than the current Spot price. When scale out occurs, Amazon EC2 Auto Scaling launches instances based on the new settings. When scale in occurs, Amazon EC2 Auto Scaling terminates instances according to the group's termination policies.

" + "documentation":"

Describes an instances distribution for an Auto Scaling group with a MixedInstancesPolicy.

The instances distribution specifies the distribution of On-Demand Instances and Spot Instances, the maximum price to pay for Spot Instances, and how the Auto Scaling group allocates instance types to fulfill On-Demand and Spot capacity.

When you update SpotAllocationStrategy, SpotInstancePools, or SpotMaxPrice, this update action does not deploy any changes across the running Amazon EC2 instances in the group. Your existing Spot Instances continue to run as long as the maximum price for those instances is higher than the current Spot price. When scale out occurs, Amazon EC2 Auto Scaling launches instances based on the new settings. When scale in occurs, Amazon EC2 Auto Scaling terminates instances according to the group's termination policies.

" + }, + "InstancesToUpdate":{ + "type":"integer", + "min":0 + }, + "IntPercent":{ + "type":"integer", + "max":100, + "min":0 }, "InvalidNextToken":{ "type":"structure", @@ -2966,7 +3165,7 @@ }, "ResourceLabel":{ "shape":"XmlStringMaxLen1023", - "documentation":"

Identifies the resource associated with the metric type. You can't specify a resource label unless the metric type is ALBRequestCountPerTarget and there is a target group attached to the Auto Scaling group.

The format is app/load-balancer-name/load-balancer-id/targetgroup/target-group-name/target-group-id , where

" + "documentation":"

Identifies the resource associated with the metric type. You can't specify a resource label unless the metric type is ALBRequestCountPerTarget and there is a target group attached to the Auto Scaling group.

Elastic Load Balancing sends data about your load balancers to Amazon CloudWatch. CloudWatch collects the data and specifies the format to use to access the data. The format is app/load-balancer-name/load-balancer-id/targetgroup/target-group-name/target-group-id , where

To find the ARN for an Application Load Balancer, use the DescribeLoadBalancers API operation. To find the ARN for the target group, use the DescribeTargetGroups API operation.

" } }, "documentation":"

Represents a predefined metric for a target tracking scaling policy to use with Amazon EC2 Auto Scaling.

" @@ -2981,7 +3180,7 @@ "members":{ "ProcessName":{ "shape":"XmlStringMaxLen255", - "documentation":"

One of the following processes:

" + "documentation":"

One of the following processes:

" } }, "documentation":"

Describes a process type.

For more information, see Scaling Processes in the Amazon EC2 Auto Scaling User Guide.

" @@ -3206,6 +3405,28 @@ } } }, + "RefreshInstanceWarmup":{ + "type":"integer", + "min":0 + }, + "RefreshPreferences":{ + "type":"structure", + "members":{ + "MinHealthyPercentage":{ + "shape":"IntPercent", + "documentation":"

The amount of capacity in the Auto Scaling group that must remain healthy during an instance refresh to allow the operation to continue, as a percentage of the desired capacity of the Auto Scaling group (rounded up to the nearest integer). The default is 90.

" + }, + "InstanceWarmup":{ + "shape":"RefreshInstanceWarmup", + "documentation":"

The number of seconds until a newly launched instance is configured and ready to use. During this time, Amazon EC2 Auto Scaling does not immediately move on to the next replacement. The default is to use the value specified for the health check grace period for the group.

Note: While warming up, a newly launched instance is not counted toward the aggregated metrics of the Auto Scaling group.

" + } + }, + "documentation":"

Describes information used to start an instance refresh.

" + }, + "RefreshStrategy":{ + "type":"string", + "enum":["Rolling"] + }, "ResourceContentionFault":{ "type":"structure", "members":{ @@ -3358,7 +3579,7 @@ }, "ScalingProcesses":{ "shape":"ProcessNames", - "documentation":"

One or more of the following processes. If you omit this parameter, all processes are specified.

" + "documentation":"

One or more of the following processes:

If you omit this parameter, all processes are specified.

" } } }, @@ -3562,6 +3783,33 @@ "max":255, "min":1 }, + "StartInstanceRefreshAnswer":{ + "type":"structure", + "members":{ + "InstanceRefreshId":{ + "shape":"XmlStringMaxLen255", + "documentation":"

A unique ID for tracking the progress of the request.

" + } + } + }, + "StartInstanceRefreshType":{ + "type":"structure", + "required":["AutoScalingGroupName"], + "members":{ + "AutoScalingGroupName":{ + "shape":"XmlStringMaxLen255", + "documentation":"

The name of the Auto Scaling group.

" + }, + "Strategy":{ + "shape":"RefreshStrategy", + "documentation":"

The strategy to use for the instance refresh. The only valid value is Rolling.

A rolling update is an update that is applied to all instances in an Auto Scaling group until all instances have been updated. A rolling update can fail due to failed health checks or if instances are on standby or are protected from scale-in. If the rolling update process fails, any instances that were already replaced are not rolled back to their previous configuration.

" + }, + "Preferences":{ + "shape":"RefreshPreferences", + "documentation":"

Set of preferences associated with the instance refresh request.

" + } + } + }, "StepAdjustment":{ "type":"structure", "required":["ScalingAdjustment"], diff --git a/botocore/data/chime/2018-05-01/service-2.json b/botocore/data/chime/2018-05-01/service-2.json index 7b586bc4..cd845f55 100644 --- a/botocore/data/chime/2018-05-01/service-2.json +++ b/botocore/data/chime/2018-05-01/service-2.json @@ -314,7 +314,27 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Creates a new Amazon Chime SDK meeting in the specified media Region with no initial attendees. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.

" + "documentation":"

Creates a new Amazon Chime SDK meeting in the specified media Region with no initial attendees. For more information about specifying media Regions, see Amazon Chime SDK Media Regions in the Amazon Chime Developer Guide. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.

" + }, + "CreateMeetingWithAttendees":{ + "name":"CreateMeetingWithAttendees", + "http":{ + "method":"POST", + "requestUri":"/meetings?operation=create-attendees", + "responseCode":201 + }, + "input":{"shape":"CreateMeetingWithAttendeesRequest"}, + "output":{"shape":"CreateMeetingWithAttendeesResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Creates a new Amazon Chime SDK meeting in the specified media Region, with attendees. For more information about specifying media Regions, see Amazon Chime SDK Media Regions in the Amazon Chime Developer Guide. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.

" }, "CreatePhoneNumberOrder":{ "name":"CreatePhoneNumberOrder", @@ -3021,7 +3041,7 @@ }, "MediaRegion":{ "shape":"String", - "documentation":"

The Region in which to create the meeting. Available values: ap-northeast-1, ap-southeast-1, ap-southeast-2, ca-central-1, eu-central-1, eu-north-1, eu-west-1, eu-west-2, eu-west-3, sa-east-1, us-east-1, us-east-2, us-west-1, us-west-2.

" + "documentation":"

The Region in which to create the meeting. Default: us-east-1.

Available values: ap-northeast-1, ap-southeast-1, ap-southeast-2, ca-central-1, eu-central-1, eu-north-1, eu-west-1, eu-west-2, eu-west-3, sa-east-1, us-east-1, us-east-2, us-west-1, us-west-2.

" }, "Tags":{ "shape":"MeetingTagList", @@ -3042,6 +3062,58 @@ } } }, + "CreateMeetingWithAttendeesRequest":{ + "type":"structure", + "required":["ClientRequestToken"], + "members":{ + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

The unique identifier for the client request. Use a different token for different meetings.

", + "idempotencyToken":true + }, + "ExternalMeetingId":{ + "shape":"ExternalMeetingIdType", + "documentation":"

The external meeting ID.

" + }, + "MeetingHostId":{ + "shape":"ExternalUserIdType", + "documentation":"

Reserved.

" + }, + "MediaRegion":{ + "shape":"String", + "documentation":"

The Region in which to create the meeting. Default: us-east-1.

Available values: ap-northeast-1, ap-southeast-1, ap-southeast-2, ca-central-1, eu-central-1, eu-north-1, eu-west-1, eu-west-2, eu-west-3, sa-east-1, us-east-1, us-east-2, us-west-1, us-west-2.

" + }, + "Tags":{ + "shape":"MeetingTagList", + "documentation":"

The tag key-value pairs.

" + }, + "NotificationsConfiguration":{"shape":"MeetingNotificationConfiguration"}, + "Attendees":{ + "shape":"CreateMeetingWithAttendeesRequestItemList", + "documentation":"

The request containing the attendees to create.

" + } + } + }, + "CreateMeetingWithAttendeesRequestItemList":{ + "type":"list", + "member":{"shape":"CreateAttendeeRequestItem"}, + "max":5, + "min":1 + }, + "CreateMeetingWithAttendeesResponse":{ + "type":"structure", + "members":{ + "Meeting":{"shape":"Meeting"}, + "Attendees":{ + "shape":"AttendeeList", + "documentation":"

The attendee information, including attendees IDs and join tokens.

" + }, + "Errors":{ + "shape":"BatchCreateAttendeeErrorList", + "documentation":"

If the action fails for one or more of the attendees in the request, a list of the attendees is returned, along with error codes and error messages.

" + } + } + }, "CreatePhoneNumberOrderRequest":{ "type":"structure", "required":[ @@ -5045,7 +5117,7 @@ "documentation":"

The SQS queue ARN.

" } }, - "documentation":"

The configuration for resource targets to receive notifications when Amazon Chime SDK meeting and attendee events occur.

" + "documentation":"

The configuration for resource targets to receive notifications when Amazon Chime SDK meeting and attendee events occur. The Amazon Chime SDK supports resource targets located in the US East (N. Virginia) AWS Region (us-east-1).

" }, "MeetingTagKeyList":{ "type":"list", diff --git a/botocore/data/cloudformation/2010-05-15/service-2.json b/botocore/data/cloudformation/2010-05-15/service-2.json index a6d054b0..d9db94ba 100644 --- a/botocore/data/cloudformation/2010-05-15/service-2.json +++ b/botocore/data/cloudformation/2010-05-15/service-2.json @@ -3365,7 +3365,7 @@ }, "ExecutionRoleArn":{ "shape":"RoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the IAM execution role to use to register the type. If your resource type calls AWS APIs in any of its handlers, you must create an IAM execution role that includes the necessary permissions to call those AWS APIs, and provision that execution role in your account. CloudFormation then assumes that execution role to provide your resource type with the appropriate credentials.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role for CloudFormation to assume when invoking the resource provider. If your resource type calls AWS APIs in any of its handlers, you must create an IAM execution role that includes the necessary permissions to call those AWS APIs, and provision that execution role in your account. When CloudFormation needs to invoke the resource provider handler, CloudFormation assumes this execution role to create a temporary session token, which it then passes to the resource provider handler, thereby supplying your resource provider with the appropriate credentials.

" }, "ClientRequestToken":{ "shape":"RequestToken", @@ -4026,7 +4026,7 @@ }, "OrganizationalUnitId":{ "shape":"OrganizationalUnitId", - "documentation":"

Reserved for internal use. No data returned.

" + "documentation":"

[Service-managed permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets.

" }, "DriftStatus":{ "shape":"StackDriftStatus", @@ -4092,7 +4092,7 @@ }, "OrganizationalUnitId":{ "shape":"OrganizationalUnitId", - "documentation":"

Reserved for internal use. No data returned.

" + "documentation":"

[Service-managed permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets.

" }, "DriftStatus":{ "shape":"StackDriftStatus", @@ -4450,7 +4450,7 @@ }, "OrganizationalUnitIds":{ "shape":"OrganizationalUnitIdList", - "documentation":"

Reserved for internal use. No data returned.

" + "documentation":"

[Service-managed permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets.

" } }, "documentation":"

A structure that contains information about a stack set. A stack set enables you to provision stacks into AWS accounts and across Regions by using a single CloudFormation template. In the stack set, you specify the template to use, as well as any parameters and capabilities that the template requires.

" @@ -4622,7 +4622,7 @@ }, "MaxConcurrentCount":{ "shape":"MaxConcurrentCount", - "documentation":"

The maximum number of accounts in which to perform this operation at one time. This is dependent on the value of FailureToleranceCountMaxConcurrentCount is at most one more than the FailureToleranceCount .

Note that this setting lets you specify the maximum for operations. For large deployments, under certain circumstances the actual number of accounts acted upon concurrently may be lower due to service throttling.

Conditional: You must specify either MaxConcurrentCount or MaxConcurrentPercentage, but not both.

" + "documentation":"

The maximum number of accounts in which to perform this operation at one time. This is dependent on the value of FailureToleranceCount. MaxConcurrentCount is at most one more than the FailureToleranceCount.

Note that this setting lets you specify the maximum for operations. For large deployments, under certain circumstances the actual number of accounts acted upon concurrently may be lower due to service throttling.

Conditional: You must specify either MaxConcurrentCount or MaxConcurrentPercentage, but not both.

" }, "MaxConcurrentPercentage":{ "shape":"MaxConcurrentPercentage", @@ -4670,7 +4670,7 @@ }, "OrganizationalUnitId":{ "shape":"OrganizationalUnitId", - "documentation":"

Reserved for internal use. No data returned.

" + "documentation":"

[Service-managed permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets.

" } }, "documentation":"

The structure that contains information about a specified operation's results for a given account in a given Region.

" diff --git a/botocore/data/cloudfront/2019-03-26/service-2.json b/botocore/data/cloudfront/2019-03-26/service-2.json index f93309e8..cff4cfc5 100644 --- a/botocore/data/cloudfront/2019-03-26/service-2.json +++ b/botocore/data/cloudfront/2019-03-26/service-2.json @@ -1018,7 +1018,7 @@ }, "TargetOriginId":{ "shape":"string", - "documentation":"

The value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior in your distribution.

" + "documentation":"

The value of ID for the origin that you want CloudFront to route requests to when they match this cache behavior.

" }, "ForwardedValues":{ "shape":"ForwardedValues", @@ -1026,11 +1026,11 @@ }, "TrustedSigners":{ "shape":"TrustedSigners", - "documentation":"

A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content.

If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, see Serving Private Content through CloudFront in the Amazon CloudFront Developer Guide.

If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items.

To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.

" + "documentation":"

A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content.

If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, see Serving Private Content with Signed URLs and Signed Cookies in the Amazon CloudFront Developer Guide.

If you don’t want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items.

To add, change, or remove one or more trusted signers, change Enabled to true (if it’s currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.

" }, "ViewerProtocolPolicy":{ "shape":"ViewerProtocolPolicy", - "documentation":"

The protocol that viewers can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. You can specify the following options:

For more information about requiring the HTTPS protocol, see Using an HTTPS Connection to Access Your Objects in the Amazon CloudFront Developer Guide.

The only way to guarantee that viewers retrieve an object that was fetched from the origin using HTTPS is never to use any other protocol to fetch the object. If you have recently changed from HTTP to HTTPS, we recommend that you clear your objects' cache because cached objects are protocol agnostic. That means that an edge location will return an object from the cache regardless of whether the current request protocol matches the protocol used previously. For more information, see Managing How Long Content Stays in an Edge Cache (Expiration) in the Amazon CloudFront Developer Guide.

" + "documentation":"

The protocol that viewers can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. You can specify the following options:

For more information about requiring the HTTPS protocol, see Requiring HTTPS Between Viewers and CloudFront in the Amazon CloudFront Developer Guide.

The only way to guarantee that viewers retrieve an object that was fetched from the origin using HTTPS is never to use any other protocol to fetch the object. If you have recently changed from HTTP to HTTPS, we recommend that you clear your objects’ cache because cached objects are protocol agnostic. That means that an edge location will return an object from the cache regardless of whether the current request protocol matches the protocol used previously. For more information, see Managing Cache Expiration in the Amazon CloudFront Developer Guide.

" }, "MinTTL":{ "shape":"long", @@ -1059,10 +1059,10 @@ }, "FieldLevelEncryptionId":{ "shape":"string", - "documentation":"

The value of ID for the field-level encryption configuration that you want CloudFront to use for encrypting specific fields of data for a cache behavior or for the default cache behavior in your distribution.

" + "documentation":"

The value of ID for the field-level encryption configuration that you want CloudFront to use for encrypting specific fields of data for this cache behavior.

" } }, - "documentation":"

A complex type that describes how CloudFront processes requests.

You must create at least as many cache behaviors (including the default cache behavior) as you have origins if you want CloudFront to distribute objects from all of the origins. Each cache behavior specifies the one origin from which you want CloudFront to get objects. If you have two origins and only the default cache behavior, the default cache behavior will cause CloudFront to get objects from one of the origins, but the other origin is never used.

For the current limit on the number of cache behaviors that you can add to a distribution, see Amazon CloudFront Limits in the AWS General Reference.

If you don't want to specify any cache behaviors, include only an empty CacheBehaviors element. Don't include an empty CacheBehavior element, or CloudFront returns a MalformedXML error.

To delete all cache behaviors in an existing distribution, update the distribution configuration and include only an empty CacheBehaviors element.

To add, change, or remove one or more cache behaviors, update the distribution configuration and specify all of the cache behaviors that you want to include in the updated distribution.

For more information about cache behaviors, see Cache Behaviors in the Amazon CloudFront Developer Guide.

" + "documentation":"

A complex type that describes how CloudFront processes requests.

You must create at least as many cache behaviors (including the default cache behavior) as you have origins if you want CloudFront to serve objects from all of the origins. Each cache behavior specifies the one origin from which you want CloudFront to get objects. If you have two origins and only the default cache behavior, the default cache behavior will cause CloudFront to get objects from one of the origins, but the other origin is never used.

For the current quota (formerly known as limit) on the number of cache behaviors that you can add to a distribution, see Quotas in the Amazon CloudFront Developer Guide.

If you don’t want to specify any cache behaviors, include only an empty CacheBehaviors element. Don’t include an empty CacheBehavior element because this is invalid.

To delete all cache behaviors in an existing distribution, update the distribution configuration and include only an empty CacheBehaviors element.

To add, change, or remove one or more cache behaviors, update the distribution configuration and specify all of the cache behaviors that you want to include in the updated distribution.

For more information about cache behaviors, see Cache Behavior Settings in the Amazon CloudFront Developer Guide.

" }, "CacheBehaviorList":{ "type":"list", @@ -1745,30 +1745,30 @@ "members":{ "HTTPPort":{ "shape":"integer", - "documentation":"

The HTTP port the custom origin listens on.

" + "documentation":"

The HTTP port that CloudFront uses to connect to the origin. Specify the HTTP port that the origin listens on.

" }, "HTTPSPort":{ "shape":"integer", - "documentation":"

The HTTPS port the custom origin listens on.

" + "documentation":"

The HTTPS port that CloudFront uses to connect to the origin. Specify the HTTPS port that the origin listens on.

" }, "OriginProtocolPolicy":{ "shape":"OriginProtocolPolicy", - "documentation":"

The origin protocol policy to apply to your origin.

" + "documentation":"

Specifies the protocol (HTTP or HTTPS) that CloudFront uses to connect to the origin. Valid values are:

" }, "OriginSslProtocols":{ "shape":"OriginSslProtocols", - "documentation":"

The SSL/TLS protocols that you want CloudFront to use when communicating with your origin over HTTPS.

" + "documentation":"

Specifies the minimum SSL/TLS protocol that CloudFront uses when connecting to your origin over HTTPS. Valid values include SSLv3, TLSv1, TLSv1.1, and TLSv1.2.

For more information, see Minimum Origin SSL Protocol in the Amazon CloudFront Developer Guide.

" }, "OriginReadTimeout":{ "shape":"integer", - "documentation":"

You can create a custom origin read timeout. All timeout units are in seconds. The default origin read timeout is 30 seconds, but you can configure custom timeout lengths using the CloudFront API. The minimum timeout length is 4 seconds; the maximum is 60 seconds.

If you need to increase the maximum time limit, contact the AWS Support Center.

" + "documentation":"

Specifies how long, in seconds, CloudFront waits for a response from the origin. This is also known as the origin response timeout. The minimum timeout is 1 second, the maximum is 60 seconds, and the default (if you don’t specify otherwise) is 30 seconds.

For more information, see Origin Response Timeout in the Amazon CloudFront Developer Guide.

" }, "OriginKeepaliveTimeout":{ "shape":"integer", - "documentation":"

You can create a custom keep-alive timeout. All timeout units are in seconds. The default keep-alive timeout is 5 seconds, but you can configure custom timeout lengths using the CloudFront API. The minimum timeout length is 1 second; the maximum is 60 seconds.

If you need to increase the maximum time limit, contact the AWS Support Center.

" + "documentation":"

Specifies how long, in seconds, CloudFront persists its connection to the origin. The minimum timeout is 1 second, the maximum is 60 seconds, and the default (if you don’t specify otherwise) is 5 seconds.

For more information, see Origin Keep-alive Timeout in the Amazon CloudFront Developer Guide.

" } }, - "documentation":"

A custom origin or an Amazon S3 bucket configured as a website endpoint.

" + "documentation":"

A custom origin. A custom origin is any origin that is not an Amazon S3 bucket, with one exception. An Amazon S3 bucket that is configured with static website hosting is a custom origin.

" }, "DefaultCacheBehavior":{ "type":"structure", @@ -1782,7 +1782,7 @@ "members":{ "TargetOriginId":{ "shape":"string", - "documentation":"

The value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior in your distribution.

" + "documentation":"

The value of ID for the origin that you want CloudFront to route requests to when they use the default cache behavior.

" }, "ForwardedValues":{ "shape":"ForwardedValues", @@ -1790,11 +1790,11 @@ }, "TrustedSigners":{ "shape":"TrustedSigners", - "documentation":"

A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content.

If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, see Serving Private Content through CloudFront in the Amazon CloudFront Developer Guide.

If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items.

To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.

" + "documentation":"

A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content.

If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, see Serving Private Content with Signed URLs and Signed Cookies in the Amazon CloudFront Developer Guide.

If you don’t want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items.

To add, change, or remove one or more trusted signers, change Enabled to true (if it’s currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.

" }, "ViewerProtocolPolicy":{ "shape":"ViewerProtocolPolicy", - "documentation":"

The protocol that viewers can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. You can specify the following options:

For more information about requiring the HTTPS protocol, see Using an HTTPS Connection to Access Your Objects in the Amazon CloudFront Developer Guide.

The only way to guarantee that viewers retrieve an object that was fetched from the origin using HTTPS is never to use any other protocol to fetch the object. If you have recently changed from HTTP to HTTPS, we recommend that you clear your objects' cache because cached objects are protocol agnostic. That means that an edge location will return an object from the cache regardless of whether the current request protocol matches the protocol used previously. For more information, see Managing How Long Content Stays in an Edge Cache (Expiration) in the Amazon CloudFront Developer Guide.

" + "documentation":"

The protocol that viewers can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. You can specify the following options:

For more information about requiring the HTTPS protocol, see Requiring HTTPS Between Viewers and CloudFront in the Amazon CloudFront Developer Guide.

The only way to guarantee that viewers retrieve an object that was fetched from the origin using HTTPS is never to use any other protocol to fetch the object. If you have recently changed from HTTP to HTTPS, we recommend that you clear your objects’ cache because cached objects are protocol agnostic. That means that an edge location will return an object from the cache regardless of whether the current request protocol matches the protocol used previously. For more information, see Managing Cache Expiration in the Amazon CloudFront Developer Guide.

" }, "MinTTL":{ "shape":"long", @@ -1823,10 +1823,10 @@ }, "FieldLevelEncryptionId":{ "shape":"string", - "documentation":"

The value of ID for the field-level encryption configuration that you want CloudFront to use for encrypting specific fields of data for a cache behavior or for the default cache behavior in your distribution.

" + "documentation":"

The value of ID for the field-level encryption configuration that you want CloudFront to use for encrypting specific fields of data for the default cache behavior.

" } }, - "documentation":"

A complex type that describes the default cache behavior if you don't specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements. You must create exactly one default cache behavior.

" + "documentation":"

A complex type that describes the default cache behavior if you don’t specify a CacheBehavior element or if request URLs don’t match any of the values of PathPattern in CacheBehavior elements. You must create exactly one default cache behavior.

" }, "DeleteCloudFrontOriginAccessIdentityRequest":{ "type":"structure", @@ -3112,7 +3112,7 @@ "members":{ "Message":{"shape":"string"} }, - "documentation":"

The argument is invalid.

", + "documentation":"

An argument is invalid.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -3929,30 +3929,38 @@ "members":{ "Id":{ "shape":"string", - "documentation":"

A unique identifier for the origin or origin group. The value of Id must be unique within the distribution.

When you specify the value of TargetOriginId for the default cache behavior or for another cache behavior, you indicate the origin to which you want the cache behavior to route requests by specifying the value of the Id element for that origin. When a request matches the path pattern for that cache behavior, CloudFront routes the request to the specified origin. For more information, see Cache Behavior Settings in the Amazon CloudFront Developer Guide.

" + "documentation":"

A unique identifier for the origin. This value must be unique within the distribution.

Use this value to specify the TargetOriginId in a CacheBehavior or DefaultCacheBehavior.

" }, "DomainName":{ "shape":"string", - "documentation":"

Amazon S3 origins: The DNS name of the Amazon S3 bucket from which you want CloudFront to get objects for this origin, for example, myawsbucket.s3.amazonaws.com. If you set up your bucket to be configured as a website endpoint, enter the Amazon S3 static website hosting endpoint for the bucket.

For more information about specifying this value for different types of origins, see Origin Domain Name in the Amazon CloudFront Developer Guide.

Constraints for Amazon S3 origins:

Custom Origins: The DNS domain name for the HTTP server from which you want CloudFront to get objects for this origin, for example, www.example.com.

Constraints for custom origins:

" + "documentation":"

The domain name for the origin.

For more information, see Origin Domain Name in the Amazon CloudFront Developer Guide.

" }, "OriginPath":{ "shape":"string", - "documentation":"

An optional element that causes CloudFront to request your content from a directory in your Amazon S3 bucket or your custom origin. When you include the OriginPath element, specify the directory name, beginning with a /. CloudFront appends the directory name to the value of DomainName, for example, example.com/production. Do not include a / at the end of the directory name.

For example, suppose you've specified the following values for your distribution:

When a user enters example.com/index.html in a browser, CloudFront sends a request to Amazon S3 for myawsbucket/production/index.html.

When a user enters example.com/acme/index.html in a browser, CloudFront sends a request to Amazon S3 for myawsbucket/production/acme/index.html.

" + "documentation":"

An optional path that CloudFront appends to the origin domain name when CloudFront requests content from the origin.

For more information, see Origin Path in the Amazon CloudFront Developer Guide.

" }, "CustomHeaders":{ "shape":"CustomHeaders", - "documentation":"

A complex type that contains names and values for the custom headers that you want.

" + "documentation":"

A list of HTTP header names and values that CloudFront adds to requests it sends to the origin.

For more information, see Adding Custom Headers to Origin Requests in the Amazon CloudFront Developer Guide.

" }, "S3OriginConfig":{ "shape":"S3OriginConfig", - "documentation":"

A complex type that contains information about the Amazon S3 origin. If the origin is a custom origin, use the CustomOriginConfig element instead.

" + "documentation":"

Use this type to specify an origin that is an Amazon S3 bucket that is not configured with static website hosting. To specify any other type of origin, including an Amazon S3 bucket that is configured with static website hosting, use the CustomOriginConfig type instead.

" }, "CustomOriginConfig":{ "shape":"CustomOriginConfig", - "documentation":"

A complex type that contains information about a custom origin. If the origin is an Amazon S3 bucket, use the S3OriginConfig element instead.

" + "documentation":"

Use this type to specify an origin that is a content container or HTTP server, including an Amazon S3 bucket that is configured with static website hosting. To specify an Amazon S3 bucket that is not configured with static website hosting, use the S3OriginConfig type instead.

" + }, + "ConnectionAttempts":{ + "shape":"integer", + "documentation":"

The number of times that CloudFront attempts to connect to the origin. The minimum number is 1, the maximum is 3, and the default (if you don’t specify otherwise) is 3.

For a custom origin (including an Amazon S3 bucket that’s configured with static website hosting), this value also specifies the number of times that CloudFront attempts to get a response from the origin, in the case of an Origin Response Timeout.

For more information, see Origin Connection Attempts in the Amazon CloudFront Developer Guide.

" + }, + "ConnectionTimeout":{ + "shape":"integer", + "documentation":"

The number of seconds that CloudFront waits when trying to establish a connection to the origin. The minimum timeout is 1 second, the maximum is 10 seconds, and the default (if you don’t specify otherwise) is 10 seconds.

For more information, see Origin Connection Timeout in the Amazon CloudFront Developer Guide.

" } }, - "documentation":"

A complex type that describes the Amazon S3 bucket, HTTP server (for example, a web server), Amazon MediaStore, or other server from which CloudFront gets your files. This can also be an origin group, if you've created an origin group. You must specify at least one origin or origin group.

For the current limit on the number of origins or origin groups that you can specify for a distribution, see Amazon CloudFront Limits in the AWS General Reference.

" + "documentation":"

An origin.

An origin is the location where content is stored, and from which CloudFront gets content to serve to viewers. To specify an origin:

For the current maximum number of origins that you can specify per distribution, see General Quotas on Web Distributions in the Amazon CloudFront Developer Guide (quotas were formerly referred to as limits).

" }, "OriginCustomHeader":{ "type":"structure", @@ -4154,7 +4162,7 @@ "members":{ "Message":{"shape":"string"} }, - "documentation":"

The precondition given in one or more of the request-header fields evaluated to false.

", + "documentation":"

The precondition given in one or more of the request header fields evaluated to false.

", "error":{"httpStatusCode":412}, "exception":true }, @@ -4427,7 +4435,7 @@ "documentation":"

The CloudFront origin access identity to associate with the origin. Use an origin access identity to configure the origin so that viewers can only access objects in an Amazon S3 bucket through CloudFront. The format of the value is:

origin-access-identity/cloudfront/ID-of-origin-access-identity

where ID-of-origin-access-identity is the value that CloudFront returned in the ID element when you created the origin access identity.

If you want viewers to be able to access objects using either the CloudFront URL or the Amazon S3 URL, specify an empty OriginAccessIdentity element.

To delete the origin access identity from an existing distribution, update the distribution configuration and include an empty OriginAccessIdentity element.

To replace the origin access identity, update the distribution configuration and specify the new origin access identity.

For more information about the origin access identity, see Serving Private Content through CloudFront in the Amazon CloudFront Developer Guide.

" } }, - "documentation":"

A complex type that contains information about the Amazon S3 origin. If the origin is a custom origin, use the CustomOriginConfig element instead.

" + "documentation":"

A complex type that contains information about the Amazon S3 origin. If the origin is a custom origin or an S3 bucket that is configured as a website endpoint, use the CustomOriginConfig element instead.

" }, "SSLSupportMethod":{ "type":"string", diff --git a/botocore/data/codeartifact/2018-09-22/paginators-1.json b/botocore/data/codeartifact/2018-09-22/paginators-1.json new file mode 100644 index 00000000..ef860284 --- /dev/null +++ b/botocore/data/codeartifact/2018-09-22/paginators-1.json @@ -0,0 +1,40 @@ +{ + "pagination": { + "ListDomains": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "domains" + }, + "ListPackageVersionAssets": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "assets" + }, + "ListPackageVersions": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "versions" + }, + "ListPackages": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "packages" + }, + "ListRepositories": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "repositories" + }, + "ListRepositoriesInDomain": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "repositories" + } + } +} diff --git a/botocore/data/codeartifact/2018-09-22/paginators-1.sdk-extras.json b/botocore/data/codeartifact/2018-09-22/paginators-1.sdk-extras.json new file mode 100644 index 00000000..d58fb3f9 --- /dev/null +++ b/botocore/data/codeartifact/2018-09-22/paginators-1.sdk-extras.json @@ -0,0 +1,24 @@ +{ + "version": 1.0, + "merge": { + "pagination": { + "ListPackageVersionAssets": { + "non_aggregate_keys": [ + "package", + "format", + "namespace", + "version", + "versionRevision" + ] + }, + "ListPackageVersions": { + "non_aggregate_keys": [ + "defaultDisplayVersion", + "format", + "package", + "namespace" + ] + } + } + } +} diff --git a/botocore/data/codeartifact/2018-09-22/service-2.json b/botocore/data/codeartifact/2018-09-22/service-2.json new file mode 100644 index 00000000..1504f3b5 --- /dev/null +++ b/botocore/data/codeartifact/2018-09-22/service-2.json @@ -0,0 +1,2962 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-09-22", + "endpointPrefix":"codeartifact", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"CodeArtifact", + "serviceId":"codeartifact", + "signatureVersion":"v4", + "signingName":"codeartifact", + "uid":"codeartifact-2018-09-22" + }, + "operations":{ + "AssociateExternalConnection":{ + "name":"AssociateExternalConnection", + "http":{ + "method":"POST", + "requestUri":"/v1/repository/external-connection" + }, + "input":{"shape":"AssociateExternalConnectionRequest"}, + "output":{"shape":"AssociateExternalConnectionResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Adds an existing external connection to a repository. One external connection is allowed per repository.

A repository can have one or more upstream repositories, or an external connection.

" + }, + "CopyPackageVersions":{ + "name":"CopyPackageVersions", + "http":{ + "method":"POST", + "requestUri":"/v1/package/versions/copy" + }, + "input":{"shape":"CopyPackageVersionsRequest"}, + "output":{"shape":"CopyPackageVersionsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Copies package versions from one repository to another repository in the same domain.

You must specify versions or versionRevisions. You cannot specify both.

" + }, + "CreateDomain":{ + "name":"CreateDomain", + "http":{ + "method":"POST", + "requestUri":"/v1/domain" + }, + "input":{"shape":"CreateDomainRequest"}, + "output":{"shape":"CreateDomainResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Creates a domain. CodeArtifact domains make it easier to manage multiple repositories across an organization. You can use a domain to apply permissions across many repositories owned by different AWS accounts. An asset is stored only once in a domain, even if it's in multiple repositories.

Although you can have multiple domains, we recommend a single production domain that contains all published artifacts so that your development teams can find and share packages. You can use a second pre-production domain to test changes to the production domain configuration.

" + }, + "CreateRepository":{ + "name":"CreateRepository", + "http":{ + "method":"POST", + "requestUri":"/v1/repository" + }, + "input":{"shape":"CreateRepositoryRequest"}, + "output":{"shape":"CreateRepositoryResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Creates a repository.

" + }, + "DeleteDomain":{ + "name":"DeleteDomain", + "http":{ + "method":"DELETE", + "requestUri":"/v1/domain" + }, + "input":{"shape":"DeleteDomainRequest"}, + "output":{"shape":"DeleteDomainResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Deletes a domain. You cannot delete a domain that contains repositories. If you want to delete a domain with repositories, first delete its repositories.

" + }, + "DeleteDomainPermissionsPolicy":{ + "name":"DeleteDomainPermissionsPolicy", + "http":{ + "method":"DELETE", + "requestUri":"/v1/domain/permissions/policy" + }, + "input":{"shape":"DeleteDomainPermissionsPolicyRequest"}, + "output":{"shape":"DeleteDomainPermissionsPolicyResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Deletes the resource policy set on a domain.

" + }, + "DeletePackageVersions":{ + "name":"DeletePackageVersions", + "http":{ + "method":"POST", + "requestUri":"/v1/package/versions/delete" + }, + "input":{"shape":"DeletePackageVersionsRequest"}, + "output":{"shape":"DeletePackageVersionsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Deletes one or more versions of a package. A deleted package version cannot be restored in your repository. If you want to remove a package version from your repository and be able to restore it later, set its status to Archived. Archived packages cannot be downloaded from a repository and don't show up with list package APIs (for example, ListackageVersions ), but you can restore them using UpdatePackageVersionsStatus .

" + }, + "DeleteRepository":{ + "name":"DeleteRepository", + "http":{ + "method":"DELETE", + "requestUri":"/v1/repository" + }, + "input":{"shape":"DeleteRepositoryRequest"}, + "output":{"shape":"DeleteRepositoryResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Deletes a repository.

" + }, + "DeleteRepositoryPermissionsPolicy":{ + "name":"DeleteRepositoryPermissionsPolicy", + "http":{ + "method":"DELETE", + "requestUri":"/v1/repository/permissions/policies" + }, + "input":{"shape":"DeleteRepositoryPermissionsPolicyRequest"}, + "output":{"shape":"DeleteRepositoryPermissionsPolicyResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Deletes the resource policy that is set on a repository. After a resource policy is deleted, the permissions allowed and denied by the deleted policy are removed. The effect of deleting a resource policy might not be immediate.

Use DeleteRepositoryPermissionsPolicy with caution. After a policy is deleted, AWS users, roles, and accounts lose permissions to perform the repository actions granted by the deleted policy.

" + }, + "DescribeDomain":{ + "name":"DescribeDomain", + "http":{ + "method":"GET", + "requestUri":"/v1/domain" + }, + "input":{"shape":"DescribeDomainRequest"}, + "output":{"shape":"DescribeDomainResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns a DomainDescription object that contains information about the requested domain.

" + }, + "DescribePackageVersion":{ + "name":"DescribePackageVersion", + "http":{ + "method":"GET", + "requestUri":"/v1/package/version" + }, + "input":{"shape":"DescribePackageVersionRequest"}, + "output":{"shape":"DescribePackageVersionResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns a PackageVersionDescription object that contains information about the requested package version.

" + }, + "DescribeRepository":{ + "name":"DescribeRepository", + "http":{ + "method":"GET", + "requestUri":"/v1/repository" + }, + "input":{"shape":"DescribeRepositoryRequest"}, + "output":{"shape":"DescribeRepositoryResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns a RepositoryDescription object that contains detailed information about the requested repository.

" + }, + "DisassociateExternalConnection":{ + "name":"DisassociateExternalConnection", + "http":{ + "method":"DELETE", + "requestUri":"/v1/repository/external-connection" + }, + "input":{"shape":"DisassociateExternalConnectionRequest"}, + "output":{"shape":"DisassociateExternalConnectionResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Removes an existing external connection from a repository.

" + }, + "DisposePackageVersions":{ + "name":"DisposePackageVersions", + "http":{ + "method":"POST", + "requestUri":"/v1/package/versions/dispose" + }, + "input":{"shape":"DisposePackageVersionsRequest"}, + "output":{"shape":"DisposePackageVersionsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Deletes the assets in package versions and sets the package versions' status to Disposed. A disposed package version cannot be restored in your repository because its assets are deleted.

To view all disposed package versions in a repository, use ListackageVersions and set the status parameter to Disposed.

To view information about a disposed package version, use ListPackageVersions and set the status parameter to Disposed.

" + }, + "GetAuthorizationToken":{ + "name":"GetAuthorizationToken", + "http":{ + "method":"POST", + "requestUri":"/v1/authorization-token" + }, + "input":{"shape":"GetAuthorizationTokenRequest"}, + "output":{"shape":"GetAuthorizationTokenResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Generates a temporary authentication token for accessing repositories in the domain. This API requires the codeartifact:GetAuthorizationToken and sts:GetServiceBearerToken permissions.

CodeArtifact authorization tokens are valid for a period of 12 hours when created with the login command. You can call login periodically to refresh the token. When you create an authorization token with the GetAuthorizationToken API, you can set a custom authorization period, up to a maximum of 12 hours, with the durationSeconds parameter.

The authorization period begins after login or GetAuthorizationToken is called. If login or GetAuthorizationToken is called while assuming a role, the token lifetime is independent of the maximum session duration of the role. For example, if you call sts assume-role and specify a session duration of 15 minutes, then generate a CodeArtifact authorization token, the token will be valid for the full authorization period even though this is longer than the 15-minute session duration.

See Using IAM Roles for more information on controlling session duration.

" + }, + "GetDomainPermissionsPolicy":{ + "name":"GetDomainPermissionsPolicy", + "http":{ + "method":"GET", + "requestUri":"/v1/domain/permissions/policy" + }, + "input":{"shape":"GetDomainPermissionsPolicyRequest"}, + "output":{"shape":"GetDomainPermissionsPolicyResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns the resource policy attached to the specified domain.

The policy is a resource-based policy, not an identity-based policy. For more information, see Identity-based policies and resource-based policies in the AWS Identity and Access Management User Guide.

" + }, + "GetPackageVersionAsset":{ + "name":"GetPackageVersionAsset", + "http":{ + "method":"GET", + "requestUri":"/v1/package/version/asset" + }, + "input":{"shape":"GetPackageVersionAssetRequest"}, + "output":{"shape":"GetPackageVersionAssetResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns an asset (or file) that is in a package. For example, for a Maven package version, use GetPackageVersionAsset to download a JAR file, a POM file, or any other assets in the package version.

" + }, + "GetPackageVersionReadme":{ + "name":"GetPackageVersionReadme", + "http":{ + "method":"GET", + "requestUri":"/v1/package/version/readme" + }, + "input":{"shape":"GetPackageVersionReadmeRequest"}, + "output":{"shape":"GetPackageVersionReadmeResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Gets the readme file or descriptive text for a package version. For packages that do not contain a readme file, CodeArtifact extracts a description from a metadata file. For example, from the <description> element in the pom.xml file of a Maven package.

The returned text might contain formatting. For example, it might contain formatting for Markdown or reStructuredText.

" + }, + "GetRepositoryEndpoint":{ + "name":"GetRepositoryEndpoint", + "http":{ + "method":"GET", + "requestUri":"/v1/repository/endpoint" + }, + "input":{"shape":"GetRepositoryEndpointRequest"}, + "output":{"shape":"GetRepositoryEndpointResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns the endpoint of a repository for a specific package format. A repository has one endpoint for each package format:

" + }, + "GetRepositoryPermissionsPolicy":{ + "name":"GetRepositoryPermissionsPolicy", + "http":{ + "method":"GET", + "requestUri":"/v1/repository/permissions/policy" + }, + "input":{"shape":"GetRepositoryPermissionsPolicyRequest"}, + "output":{"shape":"GetRepositoryPermissionsPolicyResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns the resource policy that is set on a repository.

" + }, + "ListDomains":{ + "name":"ListDomains", + "http":{ + "method":"POST", + "requestUri":"/v1/domains" + }, + "input":{"shape":"ListDomainsRequest"}, + "output":{"shape":"ListDomainsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns a list of DomainSummary objects for all domains owned by the AWS account that makes this call. Each returned DomainSummary object contains information about a domain.

" + }, + "ListPackageVersionAssets":{ + "name":"ListPackageVersionAssets", + "http":{ + "method":"POST", + "requestUri":"/v1/package/version/assets" + }, + "input":{"shape":"ListPackageVersionAssetsRequest"}, + "output":{"shape":"ListPackageVersionAssetsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns a list of AssetSummary objects for assets in a package version.

" + }, + "ListPackageVersionDependencies":{ + "name":"ListPackageVersionDependencies", + "http":{ + "method":"POST", + "requestUri":"/v1/package/version/dependencies" + }, + "input":{"shape":"ListPackageVersionDependenciesRequest"}, + "output":{"shape":"ListPackageVersionDependenciesResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns the direct dependencies for a package version. The dependencies are returned as PackageDependency objects. CodeArtifact extracts the dependencies for a package version from the metadata file for the package format (for example, the package.json file for npm packages and the pom.xml file for Maven). Any package version dependencies that are not listed in the configuration file are not returned.

" + }, + "ListPackageVersions":{ + "name":"ListPackageVersions", + "http":{ + "method":"POST", + "requestUri":"/v1/package/versions" + }, + "input":{"shape":"ListPackageVersionsRequest"}, + "output":{"shape":"ListPackageVersionsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns a list of PackageVersionSummary objects for package versions in a repository that match the request parameters.

" + }, + "ListPackages":{ + "name":"ListPackages", + "http":{ + "method":"POST", + "requestUri":"/v1/packages" + }, + "input":{"shape":"ListPackagesRequest"}, + "output":{"shape":"ListPackagesResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns a list of PackageSummary objects for packages in a repository that match the request parameters.

" + }, + "ListRepositories":{ + "name":"ListRepositories", + "http":{ + "method":"POST", + "requestUri":"/v1/repositories" + }, + "input":{"shape":"ListRepositoriesRequest"}, + "output":{"shape":"ListRepositoriesResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns a list of RepositorySummary objects. Each RepositorySummary contains information about a repository in the specified AWS account and that matches the input parameters.

" + }, + "ListRepositoriesInDomain":{ + "name":"ListRepositoriesInDomain", + "http":{ + "method":"POST", + "requestUri":"/v1/domain/repositories" + }, + "input":{"shape":"ListRepositoriesInDomainRequest"}, + "output":{"shape":"ListRepositoriesInDomainResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns a list of RepositorySummary objects. Each RepositorySummary contains information about a repository in the specified domain and that matches the input parameters.

" + }, + "PutDomainPermissionsPolicy":{ + "name":"PutDomainPermissionsPolicy", + "http":{ + "method":"PUT", + "requestUri":"/v1/domain/permissions/policy" + }, + "input":{"shape":"PutDomainPermissionsPolicyRequest"}, + "output":{"shape":"PutDomainPermissionsPolicyResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Sets a resource policy on a domain that specifies permissions to access it.

" + }, + "PutRepositoryPermissionsPolicy":{ + "name":"PutRepositoryPermissionsPolicy", + "http":{ + "method":"PUT", + "requestUri":"/v1/repository/permissions/policy" + }, + "input":{"shape":"PutRepositoryPermissionsPolicyRequest"}, + "output":{"shape":"PutRepositoryPermissionsPolicyResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Sets the resource policy on a repository that specifies permissions to access it.

" + }, + "UpdatePackageVersionsStatus":{ + "name":"UpdatePackageVersionsStatus", + "http":{ + "method":"POST", + "requestUri":"/v1/package/versions/update_status" + }, + "input":{"shape":"UpdatePackageVersionsStatusRequest"}, + "output":{"shape":"UpdatePackageVersionsStatusResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Updates the status of one or more versions of a package.

" + }, + "UpdateRepository":{ + "name":"UpdateRepository", + "http":{ + "method":"PUT", + "requestUri":"/v1/repository" + }, + "input":{"shape":"UpdateRepositoryRequest"}, + "output":{"shape":"UpdateRepositoryResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Update the properties of a repository.

" + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The operation did not succeed because of an unauthorized access attempt.

", + "error":{"httpStatusCode":403}, + "exception":true + }, + "AccountId":{ + "type":"string", + "max":12, + "min":12, + "pattern":"[0-9]{12}" + }, + "Arn":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"\\S+" + }, + "Asset":{ + "type":"blob", + "streaming":true + }, + "AssetHashes":{ + "type":"map", + "key":{"shape":"HashAlgorithm"}, + "value":{"shape":"HashValue"} + }, + "AssetName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"\\P{C}+" + }, + "AssetSummary":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"AssetName", + "documentation":"

The name of the asset.

" + }, + "size":{ + "shape":"LongOptional", + "documentation":"

The size of the asset.

" + }, + "hashes":{ + "shape":"AssetHashes", + "documentation":"

The hashes of the asset.

" + } + }, + "documentation":"

Contains details about a package version asset.

" + }, + "AssetSummaryList":{ + "type":"list", + "member":{"shape":"AssetSummary"} + }, + "AssociateExternalConnectionRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "externalConnection" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain that contains the repository.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository to which the external connection is added.

", + "location":"querystring", + "locationName":"repository" + }, + "externalConnection":{ + "shape":"ExternalConnectionName", + "documentation":"

The name of the external connection to add to the repository. The following values are supported:

", + "location":"querystring", + "locationName":"external-connection" + } + } + }, + "AssociateExternalConnectionResult":{ + "type":"structure", + "members":{ + "repository":{ + "shape":"RepositoryDescription", + "documentation":"

Information about the connected repository after processing the request.

" + } + } + }, + "AuthorizationTokenDurationSeconds":{ + "type":"long", + "max":43200, + "min":0 + }, + "BooleanOptional":{"type":"boolean"}, + "ConflictException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

The ID of the resource.

" + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

The type of AWS resource.

" + } + }, + "documentation":"

The operation did not succeed because prerequisites are not met.

", + "error":{"httpStatusCode":409}, + "exception":true + }, + "CopyPackageVersionsRequest":{ + "type":"structure", + "required":[ + "domain", + "sourceRepository", + "destinationRepository", + "format", + "package" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain that contains the source and destination repositories.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "sourceRepository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository that contains the package versions to copy.

", + "location":"querystring", + "locationName":"source-repository" + }, + "destinationRepository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository into which package versions are copied.

", + "location":"querystring", + "locationName":"destination-repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

The format of the package that is copied. The valid package types are:

", + "location":"querystring", + "locationName":"format" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

", + "location":"querystring", + "locationName":"namespace" + }, + "package":{ + "shape":"PackageName", + "documentation":"

The name of the package that is copied.

", + "location":"querystring", + "locationName":"package" + }, + "versions":{ + "shape":"PackageVersionList", + "documentation":"

The versions of the package to copy.

You must specify versions or versionRevisions. You cannot specify both.

" + }, + "versionRevisions":{ + "shape":"PackageVersionRevisionMap", + "documentation":"

A list of key-value pairs. The keys are package versions and the values are package version revisions. A CopyPackageVersion operation succeeds if the specified versions in the source repository match the specified package version revision.

You must specify versions or versionRevisions. You cannot specify both.

" + }, + "allowOverwrite":{ + "shape":"BooleanOptional", + "documentation":"

Set to true to overwrite a package version that already exists in the destination repository. If set to false and the package version already exists in the destination repository, the package version is returned in the failedVersions field of the response with an ALREADY_EXISTS error code.

" + }, + "includeFromUpstream":{ + "shape":"BooleanOptional", + "documentation":"

Set to true to copy packages from repositories that are upstream from the source repository to the destination repository. The default setting is false. For more information, see Working with upstream repositories.

" + } + } + }, + "CopyPackageVersionsResult":{ + "type":"structure", + "members":{ + "successfulVersions":{ + "shape":"SuccessfulPackageVersionInfoMap", + "documentation":"

A list of the package versions that were successfully copied to your repository.

" + }, + "failedVersions":{ + "shape":"PackageVersionErrorMap", + "documentation":"

A map of package versions that failed to copy and their error codes. The possible error codes are in the PackageVersionError data type. They are:

" + } + } + }, + "CreateDomainRequest":{ + "type":"structure", + "required":["domain"], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain to create. All domain names in an AWS Region that are in the same AWS account must be unique. The domain name is used as the prefix in DNS hostnames. Do not use sensitive information in a domain name because it is publicly discoverable.

", + "location":"querystring", + "locationName":"domain" + }, + "encryptionKey":{ + "shape":"Arn", + "documentation":"

The encryption key for the domain. This is used to encrypt content stored in a domain. An encryption key can be a key ID, a key Amazon Resource Name (ARN), a key alias, or a key alias ARN. To specify an encryptionKey, your IAM role must have kms:DescribeKey and kms:CreateGrant permissions on the encryption key that is used. For more information, see DescribeKey in the AWS Key Management Service API Reference and AWS KMS API Permissions Reference in the AWS Key Management Service Developer Guide.

CodeArtifact supports only symmetric CMKs. Do not associate an asymmetric CMK with your domain. For more information, see Using symmetric and asymmetric keys in the AWS Key Management Service Developer Guide.

" + } + } + }, + "CreateDomainResult":{ + "type":"structure", + "members":{ + "domain":{ + "shape":"DomainDescription", + "documentation":"

Contains information about the created domain after processing the request.

" + } + } + }, + "CreateRepositoryRequest":{ + "type":"structure", + "required":[ + "domain", + "repository" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The domain that contains the created repository.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository to create.

", + "location":"querystring", + "locationName":"repository" + }, + "description":{ + "shape":"Description", + "documentation":"

A description of the created repository.

" + }, + "upstreams":{ + "shape":"UpstreamRepositoryList", + "documentation":"

A list of upstream repositories to associate with the repository. The order of the upstream repositories in the list determines their priority order when AWS CodeArtifact looks for a requested package version. For more information, see Working with upstream repositories.

" + } + } + }, + "CreateRepositoryResult":{ + "type":"structure", + "members":{ + "repository":{ + "shape":"RepositoryDescription", + "documentation":"

Information about the created repository after processing the request.

" + } + } + }, + "DeleteDomainPermissionsPolicyRequest":{ + "type":"structure", + "required":["domain"], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain associated with the resource policy to be deleted.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "policyRevision":{ + "shape":"PolicyRevision", + "documentation":"

The current revision of the resource policy to be deleted. This revision is used for optimistic locking, which prevents others from overwriting your changes to the domain's resource policy.

", + "location":"querystring", + "locationName":"policy-revision" + } + } + }, + "DeleteDomainPermissionsPolicyResult":{ + "type":"structure", + "members":{ + "policy":{ + "shape":"ResourcePolicy", + "documentation":"

Information about the deleted resource policy after processing the request.

" + } + } + }, + "DeleteDomainRequest":{ + "type":"structure", + "required":["domain"], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain to delete.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + } + } + }, + "DeleteDomainResult":{ + "type":"structure", + "members":{ + "domain":{ + "shape":"DomainDescription", + "documentation":"

Contains information about the deleted domain after processing the request.

" + } + } + }, + "DeletePackageVersionsRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "format", + "package", + "versions" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain that contains the package to delete.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository that contains the package versions to delete.

", + "location":"querystring", + "locationName":"repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

The format of the package versions to delete. The valid values are:

", + "location":"querystring", + "locationName":"format" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

", + "location":"querystring", + "locationName":"namespace" + }, + "package":{ + "shape":"PackageName", + "documentation":"

The name of the package with the versions to delete.

", + "location":"querystring", + "locationName":"package" + }, + "versions":{ + "shape":"PackageVersionList", + "documentation":"

An array of strings that specify the versions of the package to delete.

" + }, + "expectedStatus":{ + "shape":"PackageVersionStatus", + "documentation":"

The expected status of the package version to delete. Valid values are:

" + } + } + }, + "DeletePackageVersionsResult":{ + "type":"structure", + "members":{ + "successfulVersions":{ + "shape":"SuccessfulPackageVersionInfoMap", + "documentation":"

A list of the package versions that were successfully deleted.

" + }, + "failedVersions":{ + "shape":"PackageVersionErrorMap", + "documentation":"

A PackageVersionError object that contains a map of errors codes for the deleted package that failed. The possible error codes are:

" + } + } + }, + "DeleteRepositoryPermissionsPolicyRequest":{ + "type":"structure", + "required":[ + "domain", + "repository" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain that contains the repository associated with the resource policy to be deleted.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository that is associated with the resource policy to be deleted

", + "location":"querystring", + "locationName":"repository" + }, + "policyRevision":{ + "shape":"PolicyRevision", + "documentation":"

The revision of the repository's resource policy to be deleted. This revision is used for optimistic locking, which prevents others from accidentally overwriting your changes to the repository's resource policy.

", + "location":"querystring", + "locationName":"policy-revision" + } + } + }, + "DeleteRepositoryPermissionsPolicyResult":{ + "type":"structure", + "members":{ + "policy":{ + "shape":"ResourcePolicy", + "documentation":"

Information about the deleted policy after processing the request.

" + } + } + }, + "DeleteRepositoryRequest":{ + "type":"structure", + "required":[ + "domain", + "repository" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain that contains the repository to delete.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository to delete.

", + "location":"querystring", + "locationName":"repository" + } + } + }, + "DeleteRepositoryResult":{ + "type":"structure", + "members":{ + "repository":{ + "shape":"RepositoryDescription", + "documentation":"

Information about the deleted repository after processing the request.

" + } + } + }, + "DescribeDomainRequest":{ + "type":"structure", + "required":["domain"], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

A string that specifies the name of the requested domain.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + } + } + }, + "DescribeDomainResult":{ + "type":"structure", + "members":{ + "domain":{"shape":"DomainDescription"} + } + }, + "DescribePackageVersionRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "format", + "package", + "packageVersion" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain that contains the repository that contains the package version.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository that contains the package version.

", + "location":"querystring", + "locationName":"repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

A format that specifies the type of the requested package version. The valid values are:

", + "location":"querystring", + "locationName":"format" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

", + "location":"querystring", + "locationName":"namespace" + }, + "package":{ + "shape":"PackageName", + "documentation":"

The name of the requested package version.

", + "location":"querystring", + "locationName":"package" + }, + "packageVersion":{ + "shape":"PackageVersion", + "documentation":"

A string that contains the package version (for example, 3.5.2).

", + "location":"querystring", + "locationName":"version" + } + } + }, + "DescribePackageVersionResult":{ + "type":"structure", + "required":["packageVersion"], + "members":{ + "packageVersion":{ + "shape":"PackageVersionDescription", + "documentation":"

A PackageVersionDescription object that contains information about the requested package version.

" + } + } + }, + "DescribeRepositoryRequest":{ + "type":"structure", + "required":[ + "domain", + "repository" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain that contains the repository to describe.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

A string that specifies the name of the requested repository.

", + "location":"querystring", + "locationName":"repository" + } + } + }, + "DescribeRepositoryResult":{ + "type":"structure", + "members":{ + "repository":{ + "shape":"RepositoryDescription", + "documentation":"

A RepositoryDescription object that contains the requested repository information.

" + } + } + }, + "Description":{ + "type":"string", + "max":1000, + "pattern":"\\P{C}+" + }, + "DisassociateExternalConnectionRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "externalConnection" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain that contains the repository from which to remove the external repository.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository from which the external connection will be removed.

", + "location":"querystring", + "locationName":"repository" + }, + "externalConnection":{ + "shape":"ExternalConnectionName", + "documentation":"

The name of the external connection to be removed from the repository.

", + "location":"querystring", + "locationName":"external-connection" + } + } + }, + "DisassociateExternalConnectionResult":{ + "type":"structure", + "members":{ + "repository":{ + "shape":"RepositoryDescription", + "documentation":"

The repository associated with the removed external connection.

" + } + } + }, + "DisposePackageVersionsRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "format", + "package", + "versions" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain that contains the repository you want to dispose.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository that contains the package versions you want to dispose.

", + "location":"querystring", + "locationName":"repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

A format that specifies the type of package versions you want to dispose. The valid values are:

", + "location":"querystring", + "locationName":"format" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

", + "location":"querystring", + "locationName":"namespace" + }, + "package":{ + "shape":"PackageName", + "documentation":"

The name of the package with the versions you want to dispose.

", + "location":"querystring", + "locationName":"package" + }, + "versions":{ + "shape":"PackageVersionList", + "documentation":"

The versions of the package you want to dispose.

" + }, + "versionRevisions":{ + "shape":"PackageVersionRevisionMap", + "documentation":"

The revisions of the package versions you want to dispose.

" + }, + "expectedStatus":{ + "shape":"PackageVersionStatus", + "documentation":"

The expected status of the package version to dispose. Valid values are:

" + } + } + }, + "DisposePackageVersionsResult":{ + "type":"structure", + "members":{ + "successfulVersions":{ + "shape":"SuccessfulPackageVersionInfoMap", + "documentation":"

A list of the package versions that were successfully disposed.

" + }, + "failedVersions":{ + "shape":"PackageVersionErrorMap", + "documentation":"

A PackageVersionError object that contains a map of errors codes for the disposed package versions that failed. The possible error codes are:

" + } + } + }, + "DomainDescription":{ + "type":"structure", + "members":{ + "name":{ + "shape":"DomainName", + "documentation":"

The name of the domain.

" + }, + "owner":{ + "shape":"AccountId", + "documentation":"

The AWS account ID that owns the domain.

" + }, + "arn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the domain.

" + }, + "status":{ + "shape":"DomainStatus", + "documentation":"

The current status of a domain. The valid values are

" + }, + "createdTime":{ + "shape":"Timestamp", + "documentation":"

A timestamp that represents the date and time the domain was created.

" + }, + "encryptionKey":{ + "shape":"Arn", + "documentation":"

The ARN of an AWS Key Management Service (AWS KMS) key associated with a domain.

" + }, + "repositoryCount":{ + "shape":"Integer", + "documentation":"

The number of repositories in the domain.

" + }, + "assetSizeBytes":{ + "shape":"Long", + "documentation":"

The total size of all assets in the domain.

" + } + }, + "documentation":"

Information about a domain. A domain is a container for repositories. When you create a domain, it is empty until you add one or more repositories.

" + }, + "DomainName":{ + "type":"string", + "max":50, + "min":2, + "pattern":"[a-z][a-z0-9\\-]{0,48}[a-z0-9]" + }, + "DomainStatus":{ + "type":"string", + "enum":[ + "Active", + "Deleted" + ] + }, + "DomainSummary":{ + "type":"structure", + "members":{ + "name":{ + "shape":"DomainName", + "documentation":"

The name of the domain.

" + }, + "owner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

" + }, + "arn":{ + "shape":"Arn", + "documentation":"

The ARN of the domain.

" + }, + "status":{ + "shape":"DomainStatus", + "documentation":"

A string that contains the status of the domain. The valid values are:

" + }, + "createdTime":{ + "shape":"Timestamp", + "documentation":"

A timestamp that contains the date and time the domain was created.

" + }, + "encryptionKey":{ + "shape":"Arn", + "documentation":"

The key used to encrypt the domain.

" + } + }, + "documentation":"

Information about a domain, including its name, Amazon Resource Name (ARN), and status. The ListDomains operation returns a list of DomainSummary objects.

" + }, + "DomainSummaryList":{ + "type":"list", + "member":{"shape":"DomainSummary"} + }, + "ErrorMessage":{"type":"string"}, + "ExternalConnectionName":{ + "type":"string", + "pattern":"[A-Za-z0-9][A-Za-z0-9._\\-:]{1,99}" + }, + "ExternalConnectionStatus":{ + "type":"string", + "enum":["Available"] + }, + "GetAuthorizationTokenRequest":{ + "type":"structure", + "required":["domain"], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain that is in scope for the generated authorization token.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "durationSeconds":{ + "shape":"AuthorizationTokenDurationSeconds", + "documentation":"

The time, in seconds, that the generated authorization token is valid.

", + "location":"querystring", + "locationName":"duration" + } + } + }, + "GetAuthorizationTokenResult":{ + "type":"structure", + "members":{ + "authorizationToken":{ + "shape":"String", + "documentation":"

The returned authentication token.

" + }, + "expiration":{ + "shape":"Timestamp", + "documentation":"

A timestamp that specifies the date and time the authorization token expires.

" + } + } + }, + "GetDomainPermissionsPolicyRequest":{ + "type":"structure", + "required":["domain"], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain to which the resource policy is attached.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + } + } + }, + "GetDomainPermissionsPolicyResult":{ + "type":"structure", + "members":{ + "policy":{ + "shape":"ResourcePolicy", + "documentation":"

The returned resource policy.

" + } + } + }, + "GetPackageVersionAssetRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "format", + "package", + "packageVersion", + "asset" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The domain that contains the repository that contains the package version with the requested asset.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The repository that contains the package version with the requested asset.

", + "location":"querystring", + "locationName":"repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

A format that specifies the type of the package version with the requested asset file. The valid values are:

", + "location":"querystring", + "locationName":"format" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

", + "location":"querystring", + "locationName":"namespace" + }, + "package":{ + "shape":"PackageName", + "documentation":"

The name of the package that contains the requested asset.

", + "location":"querystring", + "locationName":"package" + }, + "packageVersion":{ + "shape":"PackageVersion", + "documentation":"

A string that contains the package version (for example, 3.5.2).

", + "location":"querystring", + "locationName":"version" + }, + "asset":{ + "shape":"AssetName", + "documentation":"

The name of the requested asset.

", + "location":"querystring", + "locationName":"asset" + }, + "packageVersionRevision":{ + "shape":"PackageVersionRevision", + "documentation":"

The name of the package version revision that contains the requested asset.

", + "location":"querystring", + "locationName":"revision" + } + } + }, + "GetPackageVersionAssetResult":{ + "type":"structure", + "members":{ + "asset":{ + "shape":"Asset", + "documentation":"

The binary file, or asset, that is downloaded.

" + }, + "assetName":{ + "shape":"AssetName", + "documentation":"

The name of the asset that is downloaded.

", + "location":"header", + "locationName":"X-AssetName" + }, + "packageVersion":{ + "shape":"PackageVersion", + "documentation":"

A string that contains the package version (for example, 3.5.2).

", + "location":"header", + "locationName":"X-PackageVersion" + }, + "packageVersionRevision":{ + "shape":"PackageVersionRevision", + "documentation":"

The name of the package version revision that contains the downloaded asset.

", + "location":"header", + "locationName":"X-PackageVersionRevision" + } + }, + "payload":"asset" + }, + "GetPackageVersionReadmeRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "format", + "package", + "packageVersion" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain that contains the repository that contains the package version with the requested readme file.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The repository that contains the package with the requested readme file.

", + "location":"querystring", + "locationName":"repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

A format that specifies the type of the package version with the requested readme file. The valid values are:

", + "location":"querystring", + "locationName":"format" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

", + "location":"querystring", + "locationName":"namespace" + }, + "package":{ + "shape":"PackageName", + "documentation":"

The name of the package version that contains the requested readme file.

", + "location":"querystring", + "locationName":"package" + }, + "packageVersion":{ + "shape":"PackageVersion", + "documentation":"

A string that contains the package version (for example, 3.5.2).

", + "location":"querystring", + "locationName":"version" + } + } + }, + "GetPackageVersionReadmeResult":{ + "type":"structure", + "members":{ + "format":{ + "shape":"PackageFormat", + "documentation":"

The format of the package with the requested readme file. Valid format types are:

" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

" + }, + "package":{ + "shape":"PackageName", + "documentation":"

The name of the package that contains the returned readme file.

" + }, + "version":{ + "shape":"PackageVersion", + "documentation":"

The version of the package with the requested readme file.

" + }, + "versionRevision":{ + "shape":"PackageVersionRevision", + "documentation":"

The current revision associated with the package version.

" + }, + "readme":{ + "shape":"String", + "documentation":"

The text of the returned readme file.

" + } + } + }, + "GetRepositoryEndpointRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "format" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain that contains the repository.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain that contains the repository. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository.

", + "location":"querystring", + "locationName":"repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

Returns which endpoint of a repository to return. A repository has one endpoint for each package format:

", + "location":"querystring", + "locationName":"format" + } + } + }, + "GetRepositoryEndpointResult":{ + "type":"structure", + "members":{ + "repositoryEndpoint":{ + "shape":"String", + "documentation":"

A string that specifies the URL of the returned endpoint.

" + } + } + }, + "GetRepositoryPermissionsPolicyRequest":{ + "type":"structure", + "required":[ + "domain", + "repository" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain containing the repository whose associated resource policy is to be retrieved.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository whose associated resource policy is to be retrieved.

", + "location":"querystring", + "locationName":"repository" + } + } + }, + "GetRepositoryPermissionsPolicyResult":{ + "type":"structure", + "members":{ + "policy":{ + "shape":"ResourcePolicy", + "documentation":"

The returned resource policy.

" + } + } + }, + "HashAlgorithm":{ + "type":"string", + "enum":[ + "MD5", + "SHA-1", + "SHA-256", + "SHA-512" + ] + }, + "HashValue":{ + "type":"string", + "max":512, + "min":32, + "pattern":"[0-9a-f]+" + }, + "Integer":{"type":"integer"}, + "InternalServerException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The operation did not succeed because of an error that occurred inside AWS CodeArtifact.

", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "LicenseInfo":{ + "type":"structure", + "members":{ + "name":{ + "shape":"String", + "documentation":"

Name of the license.

" + }, + "url":{ + "shape":"String", + "documentation":"

The URL for license data.

" + } + }, + "documentation":"

Details of the license data.

" + }, + "LicenseInfoList":{ + "type":"list", + "member":{"shape":"LicenseInfo"} + }, + "ListDomainsMaxResults":{ + "type":"integer", + "max":1000, + "min":1 + }, + "ListDomainsRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"ListDomainsMaxResults", + "documentation":"

The maximum number of results to return per page.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

" + } + } + }, + "ListDomainsResult":{ + "type":"structure", + "members":{ + "domains":{ + "shape":"DomainSummaryList", + "documentation":"

The returned list of DomainSummary objects.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

" + } + } + }, + "ListPackageVersionAssetsMaxResults":{ + "type":"integer", + "max":1000, + "min":1 + }, + "ListPackageVersionAssetsRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "format", + "package", + "packageVersion" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain that contains the repository associated with the package version assets.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository that contains the package that contains the returned package version assets.

", + "location":"querystring", + "locationName":"repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

The format of the package that contains the returned package version assets. The valid package types are:

", + "location":"querystring", + "locationName":"format" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

", + "location":"querystring", + "locationName":"namespace" + }, + "package":{ + "shape":"PackageName", + "documentation":"

The name of the package that contains the returned package version assets.

", + "location":"querystring", + "locationName":"package" + }, + "packageVersion":{ + "shape":"PackageVersion", + "documentation":"

A string that contains the package version (for example, 3.5.2).

", + "location":"querystring", + "locationName":"version" + }, + "maxResults":{ + "shape":"ListPackageVersionAssetsMaxResults", + "documentation":"

The maximum number of results to return per page.

", + "location":"querystring", + "locationName":"max-results" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"next-token" + } + } + }, + "ListPackageVersionAssetsResult":{ + "type":"structure", + "members":{ + "format":{ + "shape":"PackageFormat", + "documentation":"

The format of the package that contains the returned package version assets.

" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

" + }, + "package":{ + "shape":"PackageName", + "documentation":"

The name of the package that contains the returned package version assets.

" + }, + "version":{ + "shape":"PackageVersion", + "documentation":"

The version of the package associated with the returned assets.

" + }, + "versionRevision":{ + "shape":"PackageVersionRevision", + "documentation":"

The current revision associated with the package version.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

If there are additional results, this is the token for the next set of results.

" + }, + "assets":{ + "shape":"AssetSummaryList", + "documentation":"

The returned list of AssetSummary objects.

" + } + } + }, + "ListPackageVersionDependenciesRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "format", + "package", + "packageVersion" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The domain that contains the repository that contains the requested package version dependencies.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository that contains the requested package version.

", + "location":"querystring", + "locationName":"repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

The format of the package with the requested dependencies. The valid package types are:

", + "location":"querystring", + "locationName":"format" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

", + "location":"querystring", + "locationName":"namespace" + }, + "package":{ + "shape":"PackageName", + "documentation":"

The name of the package versions' package.

", + "location":"querystring", + "locationName":"package" + }, + "packageVersion":{ + "shape":"PackageVersion", + "documentation":"

A string that contains the package version (for example, 3.5.2).

", + "location":"querystring", + "locationName":"version" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"next-token" + } + } + }, + "ListPackageVersionDependenciesResult":{ + "type":"structure", + "members":{ + "format":{ + "shape":"PackageFormat", + "documentation":"

A format that specifies the type of the package that contains the returned dependencies. The valid values are:

" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

" + }, + "package":{ + "shape":"PackageName", + "documentation":"

The name of the package that contains the returned package versions dependencies.

" + }, + "version":{ + "shape":"PackageVersion", + "documentation":"

The version of the package that is specified in the request.

" + }, + "versionRevision":{ + "shape":"PackageVersionRevision", + "documentation":"

The current revision associated with the package version.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

" + }, + "dependencies":{ + "shape":"PackageDependencyList", + "documentation":"

The returned list of PackageDependency objects.

" + } + } + }, + "ListPackageVersionsMaxResults":{ + "type":"integer", + "max":1000, + "min":1 + }, + "ListPackageVersionsRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "format", + "package" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain that contains the repository that contains the returned package versions.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository that contains the package.

", + "location":"querystring", + "locationName":"repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

The format of the returned packages. The valid package types are:

", + "location":"querystring", + "locationName":"format" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

", + "location":"querystring", + "locationName":"namespace" + }, + "package":{ + "shape":"PackageName", + "documentation":"

The name of the package for which you want to return a list of package versions.

", + "location":"querystring", + "locationName":"package" + }, + "status":{ + "shape":"PackageVersionStatus", + "documentation":"

A string that specifies the status of the package versions to include in the returned list. It can be one of the following:

", + "location":"querystring", + "locationName":"status" + }, + "sortBy":{ + "shape":"PackageVersionSortType", + "documentation":"

How to sort the returned list of package versions.

", + "location":"querystring", + "locationName":"sortBy" + }, + "maxResults":{ + "shape":"ListPackageVersionsMaxResults", + "documentation":"

The maximum number of results to return per page.

", + "location":"querystring", + "locationName":"max-results" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"next-token" + } + } + }, + "ListPackageVersionsResult":{ + "type":"structure", + "members":{ + "defaultDisplayVersion":{ + "shape":"PackageVersion", + "documentation":"

The default package version to display. This depends on the package format:

" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

A format of the package. Valid package format values are:

" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

" + }, + "package":{ + "shape":"PackageName", + "documentation":"

The name of the package.

" + }, + "versions":{ + "shape":"PackageVersionSummaryList", + "documentation":"

The returned list of PackageVersionSummary objects.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

If there are additional results, this is the token for the next set of results.

" + } + } + }, + "ListPackagesMaxResults":{ + "type":"integer", + "max":1000, + "min":1 + }, + "ListPackagesRequest":{ + "type":"structure", + "required":[ + "domain", + "repository" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The domain that contains the repository that contains the requested list of packages.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository from which packages are to be listed.

", + "location":"querystring", + "locationName":"repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

The format of the packages. The valid package types are:

", + "location":"querystring", + "locationName":"format" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

", + "location":"querystring", + "locationName":"namespace" + }, + "packagePrefix":{ + "shape":"PackageName", + "documentation":"

A prefix used to filter returned repositories. Only repositories with names that start with repositoryPrefix are returned.

", + "location":"querystring", + "locationName":"package-prefix" + }, + "maxResults":{ + "shape":"ListPackagesMaxResults", + "documentation":"

The maximum number of results to return per page.

", + "location":"querystring", + "locationName":"max-results" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"next-token" + } + } + }, + "ListPackagesResult":{ + "type":"structure", + "members":{ + "packages":{ + "shape":"PackageSummaryList", + "documentation":"

The list of returned PackageSummary objects.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

If there are additional results, this is the token for the next set of results.

" + } + } + }, + "ListRepositoriesInDomainMaxResults":{ + "type":"integer", + "max":1000, + "min":1 + }, + "ListRepositoriesInDomainRequest":{ + "type":"structure", + "required":["domain"], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain that contains the returned list of repositories.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "administratorAccount":{ + "shape":"AccountId", + "documentation":"

Filter the list of repositories to only include those that are managed by the AWS account ID.

", + "location":"querystring", + "locationName":"administrator-account" + }, + "repositoryPrefix":{ + "shape":"RepositoryName", + "documentation":"

A prefix used to filter returned repositories. Only repositories with names that start with repositoryPrefix are returned.

", + "location":"querystring", + "locationName":"repository-prefix" + }, + "maxResults":{ + "shape":"ListRepositoriesInDomainMaxResults", + "documentation":"

The maximum number of results to return per page.

", + "location":"querystring", + "locationName":"max-results" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"next-token" + } + } + }, + "ListRepositoriesInDomainResult":{ + "type":"structure", + "members":{ + "repositories":{ + "shape":"RepositorySummaryList", + "documentation":"

The returned list of repositories.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

If there are additional results, this is the token for the next set of results.

" + } + } + }, + "ListRepositoriesMaxResults":{ + "type":"integer", + "max":1000, + "min":1 + }, + "ListRepositoriesRequest":{ + "type":"structure", + "members":{ + "repositoryPrefix":{ + "shape":"RepositoryName", + "documentation":"

A prefix used to filter returned repositories. Only repositories with names that start with repositoryPrefix are returned.

", + "location":"querystring", + "locationName":"repository-prefix" + }, + "maxResults":{ + "shape":"ListRepositoriesMaxResults", + "documentation":"

The maximum number of results to return per page.

", + "location":"querystring", + "locationName":"max-results" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"next-token" + } + } + }, + "ListRepositoriesResult":{ + "type":"structure", + "members":{ + "repositories":{ + "shape":"RepositorySummaryList", + "documentation":"

The returned list of RepositorySummary objects.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

If there are additional results, this is the token for the next set of results.

" + } + } + }, + "Long":{"type":"long"}, + "LongOptional":{"type":"long"}, + "PackageDependency":{ + "type":"structure", + "members":{ + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

" + }, + "package":{ + "shape":"PackageName", + "documentation":"

The name of the package that this package depends on.

" + }, + "dependencyType":{ + "shape":"String", + "documentation":"

The type of a package dependency. The possible values depend on the package type. Example types are compile, runtime, and test for Maven packages, and dev, prod, and optional for npm packages.

" + }, + "versionRequirement":{ + "shape":"String", + "documentation":"

The required version, or version range, of the package that this package depends on. The version format is specific to the package type. For example, the following are possible valid required versions: 1.2.3, ^2.3.4, or 4.x.

" + } + }, + "documentation":"

Details about a package dependency.

" + }, + "PackageDependencyList":{ + "type":"list", + "member":{"shape":"PackageDependency"} + }, + "PackageFormat":{ + "type":"string", + "enum":[ + "npm", + "pypi", + "maven" + ] + }, + "PackageName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[^!#/\\s]+" + }, + "PackageNamespace":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[^!#/\\s]+" + }, + "PackageSummary":{ + "type":"structure", + "members":{ + "format":{ + "shape":"PackageFormat", + "documentation":"

The format of the package. Valid values are:

" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

" + }, + "package":{ + "shape":"PackageName", + "documentation":"

The name of the package.

" + } + }, + "documentation":"

Details about a package, including its format, namespace, and name. The ListPackages operation returns a list of PackageSummary objects.

" + }, + "PackageSummaryList":{ + "type":"list", + "member":{"shape":"PackageSummary"} + }, + "PackageVersion":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[^!#/\\s]+" + }, + "PackageVersionDescription":{ + "type":"structure", + "members":{ + "format":{ + "shape":"PackageFormat", + "documentation":"

The format of the package version. The valid package formats are:

" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

" + }, + "packageName":{ + "shape":"PackageName", + "documentation":"

The name of the requested package.

" + }, + "displayName":{ + "shape":"String255", + "documentation":"

The name of the package that is displayed. The displayName varies depending on the package version's format. For example, if an npm package is named ui, is in the namespace vue, and has the format npm, then the displayName is @vue/ui.

" + }, + "version":{ + "shape":"PackageVersion", + "documentation":"

The version of the package.

" + }, + "summary":{ + "shape":"String", + "documentation":"

A summary of the package version. The summary is extracted from the package. The information in and detail level of the summary depends on the package version's format.

" + }, + "homePage":{ + "shape":"String", + "documentation":"

The homepage associated with the package.

" + }, + "sourceCodeRepository":{ + "shape":"String", + "documentation":"

The repository for the source code in the package version, or the source code used to build it.

" + }, + "publishedTime":{ + "shape":"Timestamp", + "documentation":"

A timestamp that contains the date and time the package version was published.

" + }, + "licenses":{ + "shape":"LicenseInfoList", + "documentation":"

Information about licenses associated with the package version.

" + }, + "revision":{ + "shape":"PackageVersionRevision", + "documentation":"

The revision of the package version.

" + }, + "status":{ + "shape":"PackageVersionStatus", + "documentation":"

A string that contains the status of the package version. It can be one of the following:

" + } + }, + "documentation":"

Details about a package version.

" + }, + "PackageVersionError":{ + "type":"structure", + "members":{ + "errorCode":{ + "shape":"PackageVersionErrorCode", + "documentation":"

The error code associated with the error. Valid error codes are:

" + }, + "errorMessage":{ + "shape":"ErrorMessage", + "documentation":"

The error message associated with the error.

" + } + }, + "documentation":"

An error associated with package.

" + }, + "PackageVersionErrorCode":{ + "type":"string", + "enum":[ + "ALREADY_EXISTS", + "MISMATCHED_REVISION", + "MISMATCHED_STATUS", + "NOT_ALLOWED", + "NOT_FOUND", + "SKIPPED" + ] + }, + "PackageVersionErrorMap":{ + "type":"map", + "key":{"shape":"PackageVersion"}, + "value":{"shape":"PackageVersionError"} + }, + "PackageVersionList":{ + "type":"list", + "member":{"shape":"PackageVersion"} + }, + "PackageVersionRevision":{ + "type":"string", + "max":50, + "min":1, + "pattern":"\\S+" + }, + "PackageVersionRevisionMap":{ + "type":"map", + "key":{"shape":"PackageVersion"}, + "value":{"shape":"PackageVersionRevision"} + }, + "PackageVersionSortType":{ + "type":"string", + "enum":["PUBLISHED_TIME"] + }, + "PackageVersionStatus":{ + "type":"string", + "enum":[ + "Published", + "Unfinished", + "Unlisted", + "Archived", + "Disposed", + "Deleted" + ] + }, + "PackageVersionSummary":{ + "type":"structure", + "required":[ + "version", + "status" + ], + "members":{ + "version":{ + "shape":"PackageVersion", + "documentation":"

Information about a package version.

" + }, + "revision":{ + "shape":"PackageVersionRevision", + "documentation":"

The revision associated with a package version.

" + }, + "status":{ + "shape":"PackageVersionStatus", + "documentation":"

A string that contains the status of the package version. It can be one of the following:

" + } + }, + "documentation":"

Details about a package version, including its status, version, and revision. The ListPackageVersions operation returns a list of PackageVersionSummary objects.

" + }, + "PackageVersionSummaryList":{ + "type":"list", + "member":{"shape":"PackageVersionSummary"} + }, + "PaginationToken":{ + "type":"string", + "max":2000, + "min":1, + "pattern":"\\S+" + }, + "PolicyDocument":{ + "type":"string", + "max":5120, + "min":1 + }, + "PolicyRevision":{ + "type":"string", + "max":100, + "min":1, + "pattern":"\\S+" + }, + "PutDomainPermissionsPolicyRequest":{ + "type":"structure", + "required":[ + "domain", + "policyDocument" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain on which to set the resource policy.

" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

" + }, + "policyRevision":{ + "shape":"PolicyRevision", + "documentation":"

The current revision of the resource policy to be set. This revision is used for optimistic locking, which prevents others from overwriting your changes to the domain's resource policy.

" + }, + "policyDocument":{ + "shape":"PolicyDocument", + "documentation":"

A valid displayable JSON Aspen policy string to be set as the access control resource policy on the provided domain.

" + } + } + }, + "PutDomainPermissionsPolicyResult":{ + "type":"structure", + "members":{ + "policy":{ + "shape":"ResourcePolicy", + "documentation":"

The resource policy that was set after processing the request.

" + } + } + }, + "PutRepositoryPermissionsPolicyRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "policyDocument" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain containing the repository to set the resource policy on.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository to set the resource policy on.

", + "location":"querystring", + "locationName":"repository" + }, + "policyRevision":{ + "shape":"PolicyRevision", + "documentation":"

Sets the revision of the resource policy that specifies permissions to access the repository. This revision is used for optimistic locking, which prevents others from overwriting your changes to the repository's resource policy.

" + }, + "policyDocument":{ + "shape":"PolicyDocument", + "documentation":"

A valid displayable JSON Aspen policy string to be set as the access control resource policy on the provided repository.

" + } + } + }, + "PutRepositoryPermissionsPolicyResult":{ + "type":"structure", + "members":{ + "policy":{ + "shape":"ResourcePolicy", + "documentation":"

The resource policy that was set after processing the request.

" + } + } + }, + "RepositoryDescription":{ + "type":"structure", + "members":{ + "name":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository.

" + }, + "administratorAccount":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that manages the repository.

" + }, + "domainName":{ + "shape":"DomainName", + "documentation":"

The name of the domain that contains the repository.

" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain that contains the repository. It does not include dashes or spaces.

" + }, + "arn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the repository.

" + }, + "description":{ + "shape":"Description", + "documentation":"

A text description of the repository.

" + }, + "upstreams":{ + "shape":"UpstreamRepositoryInfoList", + "documentation":"

A list of upstream repositories to associate with the repository. The order of the upstream repositories in the list determines their priority order when AWS CodeArtifact looks for a requested package version. For more information, see Working with upstream repositories.

" + }, + "externalConnections":{ + "shape":"RepositoryExternalConnectionInfoList", + "documentation":"

An array of external connections associated with the repository.

" + } + }, + "documentation":"

The details of a repository stored in AWS CodeArtifact. A CodeArtifact repository contains a set of package versions, each of which maps to a set of assets. Repositories are polyglot—a single repository can contain packages of any supported type. Each repository exposes endpoints for fetching and publishing packages using tools like the npm CLI, the Maven CLI (mvn), and pip. You can create up to 100 repositories per AWS account.

" + }, + "RepositoryExternalConnectionInfo":{ + "type":"structure", + "members":{ + "externalConnectionName":{ + "shape":"ExternalConnectionName", + "documentation":"

The name of the external connection associated with a repository.

" + }, + "packageFormat":{ + "shape":"PackageFormat", + "documentation":"

The package format associated with a repository's external connection. The valid package formats are:

" + }, + "status":{ + "shape":"ExternalConnectionStatus", + "documentation":"

The status of the external connection of a repository. There is one valid value, Available.

" + } + }, + "documentation":"

Contains information about the external connection of a repository.

" + }, + "RepositoryExternalConnectionInfoList":{ + "type":"list", + "member":{"shape":"RepositoryExternalConnectionInfo"} + }, + "RepositoryName":{ + "type":"string", + "max":100, + "min":2, + "pattern":"[A-Za-z0-9][A-Za-z0-9._\\-]{1,99}" + }, + "RepositorySummary":{ + "type":"structure", + "members":{ + "name":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository.

" + }, + "administratorAccount":{ + "shape":"AccountId", + "documentation":"

The AWS account ID that manages the repository.

" + }, + "domainName":{ + "shape":"DomainName", + "documentation":"

The name of the domain that contains the repository.

" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

" + }, + "arn":{ + "shape":"Arn", + "documentation":"

The ARN of the repository.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The description of the repository.

" + } + }, + "documentation":"

Details about a repository, including its Amazon Resource Name (ARN), description, and domain information. The ListRepositories operation returns a list of RepositorySummary objects.

" + }, + "RepositorySummaryList":{ + "type":"list", + "member":{"shape":"RepositorySummary"} + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

The ID of the resource.

" + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

The type of AWS resource.

" + } + }, + "documentation":"

The operation did not succeed because the resource requested is not found in the service.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "ResourcePolicy":{ + "type":"structure", + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

The ARN of the resource associated with the resource policy

" + }, + "revision":{ + "shape":"PolicyRevision", + "documentation":"

The current revision of the resource policy.

" + }, + "document":{ + "shape":"PolicyDocument", + "documentation":"

The resource policy formatted in JSON.

" + } + }, + "documentation":"

An AWS CodeArtifact resource policy that contains a resource ARN, document details, and a revision.

" + }, + "ResourceType":{ + "type":"string", + "enum":[ + "domain", + "repository", + "package", + "package-version", + "asset" + ] + }, + "RetryAfterSeconds":{"type":"integer"}, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

The ID of the resource.

" + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

The type of AWS resource.

" + } + }, + "documentation":"

The operation did not succeed because it would have exceeded a service limit for your account.

", + "error":{"httpStatusCode":402}, + "exception":true + }, + "String":{"type":"string"}, + "String255":{ + "type":"string", + "max":255, + "min":1 + }, + "SuccessfulPackageVersionInfo":{ + "type":"structure", + "members":{ + "revision":{ + "shape":"String", + "documentation":"

The revision of a package version.

" + }, + "status":{ + "shape":"PackageVersionStatus", + "documentation":"

The status of a package version. Valid statuses are:

" + } + }, + "documentation":"

Contains the revision and status of a package version.

" + }, + "SuccessfulPackageVersionInfoMap":{ + "type":"map", + "key":{"shape":"PackageVersion"}, + "value":{"shape":"SuccessfulPackageVersionInfo"} + }, + "ThrottlingException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "retryAfterSeconds":{ + "shape":"RetryAfterSeconds", + "documentation":"

The time period, in seconds, to wait before retrying the request.

", + "location":"header", + "locationName":"Retry-After" + } + }, + "documentation":"

The operation did not succeed because too many requests are sent to the service.

", + "error":{"httpStatusCode":429}, + "exception":true + }, + "Timestamp":{"type":"timestamp"}, + "UpdatePackageVersionsStatusRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "format", + "package", + "versions", + "targetStatus" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The domain that contains the repository that contains the package versions with a status to be updated.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The repository that contains the package versions with the status you want to update.

", + "location":"querystring", + "locationName":"repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

A format that specifies the type of the package with the statuses to update. The valid values are:

", + "location":"querystring", + "locationName":"format" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

", + "location":"querystring", + "locationName":"namespace" + }, + "package":{ + "shape":"PackageName", + "documentation":"

The name of the package with the version statuses to update.

", + "location":"querystring", + "locationName":"package" + }, + "versions":{ + "shape":"PackageVersionList", + "documentation":"

An array of strings that specify the versions of the package with the statuses to update.

" + }, + "versionRevisions":{ + "shape":"PackageVersionRevisionMap", + "documentation":"

A map of package versions and package version revisions. The map key is the package version (for example, 3.5.2), and the map value is the package version revision.

" + }, + "expectedStatus":{ + "shape":"PackageVersionStatus", + "documentation":"

The package version’s expected status before it is updated. If expectedStatus is provided, the package version's status is updated only if its status at the time UpdatePackageVersionsStatus is called matches expectedStatus.

" + }, + "targetStatus":{ + "shape":"PackageVersionStatus", + "documentation":"

The status you want to change the package version status to.

" + } + } + }, + "UpdatePackageVersionsStatusResult":{ + "type":"structure", + "members":{ + "successfulVersions":{ + "shape":"SuccessfulPackageVersionInfoMap", + "documentation":"

A list of PackageVersionError objects, one for each package version with a status that failed to update.

" + }, + "failedVersions":{ + "shape":"PackageVersionErrorMap", + "documentation":"

A list of SuccessfulPackageVersionInfo objects, one for each package version with a status that successfully updated.

" + } + } + }, + "UpdateRepositoryRequest":{ + "type":"structure", + "required":[ + "domain", + "repository" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain associated with the repository to update.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository to update.

", + "location":"querystring", + "locationName":"repository" + }, + "description":{ + "shape":"Description", + "documentation":"

An updated repository description.

" + }, + "upstreams":{ + "shape":"UpstreamRepositoryList", + "documentation":"

A list of upstream repositories to associate with the repository. The order of the upstream repositories in the list determines their priority order when AWS CodeArtifact looks for a requested package version. For more information, see Working with upstream repositories.

" + } + } + }, + "UpdateRepositoryResult":{ + "type":"structure", + "members":{ + "repository":{ + "shape":"RepositoryDescription", + "documentation":"

The updated repository.

" + } + } + }, + "UpstreamRepository":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The name of an upstream repository.

" + } + }, + "documentation":"

Information about an upstream repository. A list of UpstreamRepository objects is an input parameter to CreateRepository and UpdateRepository .

" + }, + "UpstreamRepositoryInfo":{ + "type":"structure", + "members":{ + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The name of an upstream repository.

" + } + }, + "documentation":"

Information about an upstream repository.

" + }, + "UpstreamRepositoryInfoList":{ + "type":"list", + "member":{"shape":"UpstreamRepositoryInfo"} + }, + "UpstreamRepositoryList":{ + "type":"list", + "member":{"shape":"UpstreamRepository"} + }, + "ValidationException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "reason":{ + "shape":"ValidationExceptionReason", + "documentation":"

" + } + }, + "documentation":"

The operation did not succeed because a parameter in the request was sent with an invalid value.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ValidationExceptionReason":{ + "type":"string", + "enum":[ + "CANNOT_PARSE", + "ENCRYPTION_KEY_ERROR", + "FIELD_VALIDATION_FAILED", + "UNKNOWN_OPERATION", + "OTHER" + ] + } + }, + "documentation":"

AWS CodeArtifact is a fully managed artifact repository compatible with language-native package managers and build tools such as npm, Apache Maven, and pip. You can use CodeArtifact to share packages with development teams and pull packages. Packages can be pulled from both public and CodeArtifact repositories. You can also create an upstream relationship between a CodeArtifact repository and another repository, which effectively merges their contents from the point of view of a package manager client.

AWS CodeArtifact Components

Use the information in this guide to help you work with the following CodeArtifact components:

CodeArtifact supports these operations:

" +} diff --git a/botocore/data/cognito-idp/2016-04-18/service-2.json b/botocore/data/cognito-idp/2016-04-18/service-2.json index 13af0ec5..48de0a16 100644 --- a/botocore/data/cognito-idp/2016-04-18/service-2.json +++ b/botocore/data/cognito-idp/2016-04-18/service-2.json @@ -284,6 +284,7 @@ {"shape":"NotAuthorizedException"}, {"shape":"UserNotFoundException"}, {"shape":"AliasExistsException"}, + {"shape":"LimitExceededException"}, {"shape":"InternalErrorException"} ], "documentation":"

Links an existing user account in a user pool (DestinationUser) to an identity from an external identity provider (SourceUser) based on a specified attribute name and value from the external identity provider. This allows you to create a link from the existing user account to an external federated user identity that has not yet been used to sign in, so that the federated user identity can be used to sign in as the existing user account.

For example, if there is an existing user with a username and password, this API links that user to a federated user identity, so that when the federated user identity is used, the user signs in as the existing user account.

Because this API allows a user with an external federated identity to sign in as an existing user in the user pool, it is critical that it only be used with external identity providers and provider attributes that have been trusted by the application owner.

See also .

This action is enabled only for admin access and requires developer credentials.

" @@ -3187,10 +3188,7 @@ "AuthParametersType":{ "type":"map", "key":{"shape":"StringType"}, - "value":{"shape":"AuthParametersValueType"} - }, - "AuthParametersValueType":{ - "type":"string", + "value":{"shape":"StringType"}, "sensitive":true }, "AuthenticationResultType":{ diff --git a/botocore/data/compute-optimizer/2019-11-01/service-2.json b/botocore/data/compute-optimizer/2019-11-01/service-2.json index 7a83d941..9b447ed9 100644 --- a/botocore/data/compute-optimizer/2019-11-01/service-2.json +++ b/botocore/data/compute-optimizer/2019-11-01/service-2.json @@ -13,6 +13,66 @@ "uid":"compute-optimizer-2019-11-01" }, "operations":{ + "DescribeRecommendationExportJobs":{ + "name":"DescribeRecommendationExportJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRecommendationExportJobsRequest"}, + "output":{"shape":"DescribeRecommendationExportJobsResponse"}, + "errors":[ + {"shape":"OptInRequiredException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"MissingAuthenticationToken"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Describes recommendation export jobs created in the last seven days.

Use the ExportAutoScalingGroupRecommendations or ExportEC2InstanceRecommendations actions to request an export of your recommendations. Then use the DescribeRecommendationExportJobs action to view your export jobs.

" + }, + "ExportAutoScalingGroupRecommendations":{ + "name":"ExportAutoScalingGroupRecommendations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ExportAutoScalingGroupRecommendationsRequest"}, + "output":{"shape":"ExportAutoScalingGroupRecommendationsResponse"}, + "errors":[ + {"shape":"OptInRequiredException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingAuthenticationToken"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Exports optimization recommendations for Auto Scaling groups.

Recommendations are exported in a comma-separated values (.csv) file, and its metadata in a JavaScript Object Notation (.json) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see Exporting Recommendations in the Compute Optimizer User Guide.

You can have only one Auto Scaling group export job in progress per AWS Region.

" + }, + "ExportEC2InstanceRecommendations":{ + "name":"ExportEC2InstanceRecommendations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ExportEC2InstanceRecommendationsRequest"}, + "output":{"shape":"ExportEC2InstanceRecommendationsResponse"}, + "errors":[ + {"shape":"OptInRequiredException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingAuthenticationToken"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Exports optimization recommendations for Amazon EC2 instances.

Recommendations are exported in a comma-separated values (.csv) file, and its metadata in a JavaScript Object Notation (.json) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see Exporting Recommendations in the Compute Optimizer User Guide.

You can have only one Amazon EC2 instance export job in progress per AWS Region.

" + }, "GetAutoScalingGroupRecommendations":{ "name":"GetAutoScalingGroupRecommendations", "http":{ @@ -89,7 +149,7 @@ {"shape":"MissingAuthenticationToken"}, {"shape":"ThrottlingException"} ], - "documentation":"

Returns the enrollment (opt in) status of an account to the AWS Compute Optimizer service.

If the account is a master account of an organization, this operation also confirms the enrollment status of member accounts within the organization.

" + "documentation":"

Returns the enrollment (opt in) status of an account to the AWS Compute Optimizer service.

If the account is the master account of an organization, this action also confirms the enrollment status of member accounts within the organization.

" }, "GetRecommendationSummaries":{ "name":"GetRecommendationSummaries", @@ -126,7 +186,7 @@ {"shape":"MissingAuthenticationToken"}, {"shape":"ThrottlingException"} ], - "documentation":"

Updates the enrollment (opt in) status of an account to the AWS Compute Optimizer service.

If the account is a master account of an organization, this operation can also enroll member accounts within the organization.

" + "documentation":"

Updates the enrollment (opt in) status of an account to the AWS Compute Optimizer service.

If the account is a master account of an organization, this action can also be used to enroll member accounts within the organization.

" } }, "shapes":{ @@ -245,19 +305,238 @@ "member":{"shape":"AutoScalingGroupRecommendation"} }, "Code":{"type":"string"}, + "CreationTimestamp":{"type":"timestamp"}, "CurrentInstanceType":{"type":"string"}, + "DescribeRecommendationExportJobsRequest":{ + "type":"structure", + "members":{ + "jobIds":{ + "shape":"JobIds", + "documentation":"

The identification numbers of the export jobs to return.

An export job ID is returned when you create an export using the ExportAutoScalingGroupRecommendations or ExportEC2InstanceRecommendations actions.

All export jobs created in the last seven days are returned if this parameter is omitted.

" + }, + "filters":{ + "shape":"JobFilters", + "documentation":"

An array of objects that describe a filter to return a more specific list of export jobs.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to advance to the next page of export jobs.

" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of export jobs to return with a single request.

To retrieve the remaining results, make another request with the returned NextToken value.

" + } + } + }, + "DescribeRecommendationExportJobsResponse":{ + "type":"structure", + "members":{ + "recommendationExportJobs":{ + "shape":"RecommendationExportJobs", + "documentation":"

An array of objects that describe recommendation export jobs.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to use to advance to the next page of export jobs.

This value is null when there are no more pages of export jobs to return.

" + } + } + }, "DesiredCapacity":{"type":"integer"}, + "DestinationBucket":{"type":"string"}, + "DestinationKey":{"type":"string"}, + "DestinationKeyPrefix":{"type":"string"}, "ErrorMessage":{"type":"string"}, + "ExportAutoScalingGroupRecommendationsRequest":{ + "type":"structure", + "required":["s3DestinationConfig"], + "members":{ + "accountIds":{ + "shape":"AccountIds", + "documentation":"

The IDs of the AWS accounts for which to export Auto Scaling group recommendations.

If your account is the master account of an organization, use this parameter to specify the member accounts for which you want to export recommendations.

This parameter cannot be specified together with the include member accounts parameter. The parameters are mutually exclusive.

Recommendations for member accounts are not included in the export if this parameter, or the include member accounts parameter, is omitted.

You can specify multiple account IDs per request.

" + }, + "filters":{ + "shape":"Filters", + "documentation":"

An array of objects that describe a filter to export a more specific set of Auto Scaling group recommendations.

" + }, + "fieldsToExport":{ + "shape":"ExportableAutoScalingGroupFields", + "documentation":"

The recommendations data to include in the export file.

" + }, + "s3DestinationConfig":{ + "shape":"S3DestinationConfig", + "documentation":"

An object to specify the destination Amazon Simple Storage Service (Amazon S3) bucket name and key prefix for the export job.

You must create the destination Amazon S3 bucket for your recommendations export before you create the export job. Compute Optimizer does not create the S3 bucket for you. After you create the S3 bucket, ensure that it has the required permission policy to allow Compute Optimizer to write the export file to it. If you plan to specify an object prefix when you create the export job, you must include the object prefix in the policy that you add to the S3 bucket. For more information, see Amazon S3 Bucket Policy for Compute Optimizer in the Compute Optimizer user guide.

" + }, + "fileFormat":{ + "shape":"FileFormat", + "documentation":"

The format of the export file.

The only export file format currently supported is Csv.

" + }, + "includeMemberAccounts":{ + "shape":"IncludeMemberAccounts", + "documentation":"

Indicates whether to include recommendations for resources in all member accounts of the organization if your account is the master account of an organization.

The member accounts must also be opted in to Compute Optimizer.

Recommendations for member accounts of the organization are not included in the export file if this parameter is omitted.

This parameter cannot be specified together with the account IDs parameter. The parameters are mutually exclusive.

Recommendations for member accounts are not included in the export if this parameter, or the account IDs parameter, is omitted.

" + } + } + }, + "ExportAutoScalingGroupRecommendationsResponse":{ + "type":"structure", + "members":{ + "jobId":{ + "shape":"JobId", + "documentation":"

The identification number of the export job.

Use the DescribeRecommendationExportJobs action, and specify the job ID to view the status of an export job.

" + }, + "s3Destination":{ + "shape":"S3Destination", + "documentation":"

An object that describes the destination Amazon S3 bucket of a recommendations export file.

" + } + } + }, + "ExportDestination":{ + "type":"structure", + "members":{ + "s3":{ + "shape":"S3Destination", + "documentation":"

An object that describes the destination Amazon Simple Storage Service (Amazon S3) bucket name and object keys of a recommendations export file, and its associated metadata file.

" + } + }, + "documentation":"

Describes the destination of the recommendations export and metadata files.

" + }, + "ExportEC2InstanceRecommendationsRequest":{ + "type":"structure", + "required":["s3DestinationConfig"], + "members":{ + "accountIds":{ + "shape":"AccountIds", + "documentation":"

The IDs of the AWS accounts for which to export instance recommendations.

If your account is the master account of an organization, use this parameter to specify the member accounts for which you want to export recommendations.

This parameter cannot be specified together with the include member accounts parameter. The parameters are mutually exclusive.

Recommendations for member accounts are not included in the export if this parameter, or the include member accounts parameter, is omitted.

You can specify multiple account IDs per request.

" + }, + "filters":{ + "shape":"Filters", + "documentation":"

An array of objects that describe a filter to export a more specific set of instance recommendations.

" + }, + "fieldsToExport":{ + "shape":"ExportableInstanceFields", + "documentation":"

The recommendations data to include in the export file.

" + }, + "s3DestinationConfig":{ + "shape":"S3DestinationConfig", + "documentation":"

An object to specify the destination Amazon Simple Storage Service (Amazon S3) bucket name and key prefix for the export job.

You must create the destination Amazon S3 bucket for your recommendations export before you create the export job. Compute Optimizer does not create the S3 bucket for you. After you create the S3 bucket, ensure that it has the required permission policy to allow Compute Optimizer to write the export file to it. If you plan to specify an object prefix when you create the export job, you must include the object prefix in the policy that you add to the S3 bucket. For more information, see Amazon S3 Bucket Policy for Compute Optimizer in the Compute Optimizer user guide.

" + }, + "fileFormat":{ + "shape":"FileFormat", + "documentation":"

The format of the export file.

The only export file format currently supported is Csv.

" + }, + "includeMemberAccounts":{ + "shape":"IncludeMemberAccounts", + "documentation":"

Indicates whether to include recommendations for resources in all member accounts of the organization if your account is the master account of an organization.

The member accounts must also be opted in to Compute Optimizer.

Recommendations for member accounts of the organization are not included in the export file if this parameter is omitted.

Recommendations for member accounts are not included in the export if this parameter, or the account IDs parameter, is omitted.

" + } + } + }, + "ExportEC2InstanceRecommendationsResponse":{ + "type":"structure", + "members":{ + "jobId":{ + "shape":"JobId", + "documentation":"

The identification number of the export job.

Use the DescribeRecommendationExportJobs action, and specify the job ID to view the status of an export job.

" + }, + "s3Destination":{ + "shape":"S3Destination", + "documentation":"

An object that describes the destination Amazon S3 bucket of a recommendations export file.

" + } + } + }, + "ExportableAutoScalingGroupField":{ + "type":"string", + "enum":[ + "AccountId", + "AutoScalingGroupArn", + "AutoScalingGroupName", + "Finding", + "UtilizationMetricsCpuMaximum", + "UtilizationMetricsMemoryMaximum", + "LookbackPeriodInDays", + "CurrentConfigurationInstanceType", + "CurrentConfigurationDesiredCapacity", + "CurrentConfigurationMinSize", + "CurrentConfigurationMaxSize", + "CurrentOnDemandPrice", + "CurrentStandardOneYearNoUpfrontReservedPrice", + "CurrentStandardThreeYearNoUpfrontReservedPrice", + "CurrentVCpus", + "CurrentMemory", + "CurrentStorage", + "CurrentNetwork", + "RecommendationOptionsConfigurationInstanceType", + "RecommendationOptionsConfigurationDesiredCapacity", + "RecommendationOptionsConfigurationMinSize", + "RecommendationOptionsConfigurationMaxSize", + "RecommendationOptionsProjectedUtilizationMetricsCpuMaximum", + "RecommendationOptionsProjectedUtilizationMetricsMemoryMaximum", + "RecommendationOptionsPerformanceRisk", + "RecommendationOptionsOnDemandPrice", + "RecommendationOptionsStandardOneYearNoUpfrontReservedPrice", + "RecommendationOptionsStandardThreeYearNoUpfrontReservedPrice", + "RecommendationOptionsVcpus", + "RecommendationOptionsMemory", + "RecommendationOptionsStorage", + "RecommendationOptionsNetwork", + "LastRefreshTimestamp" + ] + }, + "ExportableAutoScalingGroupFields":{ + "type":"list", + "member":{"shape":"ExportableAutoScalingGroupField"} + }, + "ExportableInstanceField":{ + "type":"string", + "enum":[ + "AccountId", + "InstanceArn", + "InstanceName", + "Finding", + "LookbackPeriodInDays", + "CurrentInstanceType", + "UtilizationMetricsCpuMaximum", + "UtilizationMetricsMemoryMaximum", + "CurrentOnDemandPrice", + "CurrentStandardOneYearNoUpfrontReservedPrice", + "CurrentStandardThreeYearNoUpfrontReservedPrice", + "CurrentVCpus", + "CurrentMemory", + "CurrentStorage", + "CurrentNetwork", + "RecommendationOptionsInstanceType", + "RecommendationOptionsProjectedUtilizationMetricsCpuMaximum", + "RecommendationOptionsProjectedUtilizationMetricsMemoryMaximum", + "RecommendationOptionsPerformanceRisk", + "RecommendationOptionsVcpus", + "RecommendationOptionsMemory", + "RecommendationOptionsStorage", + "RecommendationOptionsNetwork", + "RecommendationOptionsOnDemandPrice", + "RecommendationOptionsStandardOneYearNoUpfrontReservedPrice", + "RecommendationOptionsStandardThreeYearNoUpfrontReservedPrice", + "RecommendationsSourcesRecommendationSourceArn", + "RecommendationsSourcesRecommendationSourceType", + "LastRefreshTimestamp" + ] + }, + "ExportableInstanceFields":{ + "type":"list", + "member":{"shape":"ExportableInstanceField"} + }, + "FailureReason":{"type":"string"}, + "FileFormat":{ + "type":"string", + "enum":["Csv"] + }, "Filter":{ "type":"structure", "members":{ "name":{ "shape":"FilterName", - "documentation":"

The name of the filter.

Specify Finding to filter the results to a specific findings classification.

Specify RecommendationSourceType to filter the results to a specific resource type.

" + "documentation":"

The name of the filter.

Specify Finding to return recommendations with a specific findings classification (e.g., Overprovisioned).

Specify RecommendationSourceType to return recommendations of a specific resource type (e.g., AutoScalingGroup).

" }, "values":{ "shape":"FilterValues", - "documentation":"

The value of the filter.

If you specify the name parameter as Finding, and you're recommendations for an instance, then the valid values are Underprovisioned, Overprovisioned, NotOptimized, or Optimized.

If you specify the name parameter as Finding, and you're recommendations for an Auto Scaling group, then the valid values are Optimized, or NotOptimized.

If you specify the name parameter as RecommendationSourceType, then the valid values are EC2Instance, or AutoScalingGroup.

" + "documentation":"

The value of the filter.

If you specify the name parameter as Finding, and you request recommendations for an instance, then the valid values are Underprovisioned, Overprovisioned, NotOptimized, or Optimized.

If you specify the name parameter as Finding, and you request recommendations for an Auto Scaling group, then the valid values are Optimized, or NotOptimized.

If you specify the name parameter as RecommendationSourceType, then the valid values are Ec2Instance, or AutoScalingGroup.

" } }, "documentation":"

Describes a filter that returns a more specific list of recommendations.

" @@ -292,7 +571,7 @@ "members":{ "accountIds":{ "shape":"AccountIds", - "documentation":"

The AWS account IDs for which to return Auto Scaling group recommendations.

Only one account ID can be specified per request.

" + "documentation":"

The IDs of the AWS accounts for which to return Auto Scaling group recommendations.

If your account is the master account of an organization, use this parameter to specify the member accounts for which you want to return Auto Scaling group recommendations.

Only one account ID can be specified per request.

" }, "autoScalingGroupArns":{ "shape":"AutoScalingGroupArns", @@ -304,7 +583,7 @@ }, "maxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of Auto Scaling group recommendations to return with a single call.

To retrieve the remaining results, make another call with the returned NextToken value.

" + "documentation":"

The maximum number of Auto Scaling group recommendations to return with a single request.

To retrieve the remaining results, make another request with the returned NextToken value.

" }, "filters":{ "shape":"Filters", @@ -342,7 +621,7 @@ }, "maxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of instance recommendations to return with a single call.

To retrieve the remaining results, make another call with the returned NextToken value.

" + "documentation":"

The maximum number of instance recommendations to return with a single request.

To retrieve the remaining results, make another request with the returned NextToken value.

" }, "filters":{ "shape":"Filters", @@ -350,7 +629,7 @@ }, "accountIds":{ "shape":"AccountIds", - "documentation":"

The AWS account IDs for which to return instance recommendations.

Only one account ID can be specified per request.

" + "documentation":"

The IDs of the AWS accounts for which to return instance recommendations.

If your account is the master account of an organization, use this parameter to specify the member accounts for which you want to return instance recommendations.

Only one account ID can be specified per request.

" } } }, @@ -461,7 +740,7 @@ "members":{ "accountIds":{ "shape":"AccountIds", - "documentation":"

The AWS account IDs for which to return recommendation summaries.

Only one account ID can be specified per request.

" + "documentation":"

The IDs of the AWS accounts for which to return recommendation summaries.

If your account is the master account of an organization, use this parameter to specify the member accounts for which you want to return recommendation summaries.

Only one account ID can be specified per request.

" }, "nextToken":{ "shape":"NextToken", @@ -469,7 +748,7 @@ }, "maxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of recommendation summaries to return with a single call.

To retrieve the remaining results, make another call with the returned NextToken value.

" + "documentation":"

The maximum number of recommendation summaries to return with a single request.

To retrieve the remaining results, make another request with the returned NextToken value.

" } } }, @@ -503,7 +782,7 @@ }, "accountId":{ "shape":"AccountId", - "documentation":"

The AWS account ID of the instance recommendation.

" + "documentation":"

The AWS account ID of the instance.

" }, "instanceName":{ "shape":"InstanceName", @@ -572,7 +851,7 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

The request processing has failed because of an unknown error, exception, or failure.

", + "documentation":"

An internal error has occurred. Try your call again.

", "exception":true, "fault":true }, @@ -585,7 +864,56 @@ "exception":true, "synthetic":true }, + "JobFilter":{ + "type":"structure", + "members":{ + "name":{ + "shape":"JobFilterName", + "documentation":"

The name of the filter.

Specify ResourceType to return export jobs of a specific resource type (e.g., Ec2Instance).

Specify JobStatus to return export jobs with a specific status (e.g, Complete).

" + }, + "values":{ + "shape":"FilterValues", + "documentation":"

The value of the filter.

If you specify the name parameter as ResourceType, the valid values are Ec2Instance or AutoScalingGroup.

If you specify the name parameter as JobStatus, the valid values are Queued, InProgress, Complete, or Failed.

" + } + }, + "documentation":"

Describes a filter that returns a more specific list of recommendation export jobs.

This filter is used with the DescribeRecommendationExportJobs action.

" + }, + "JobFilterName":{ + "type":"string", + "enum":[ + "ResourceType", + "JobStatus" + ] + }, + "JobFilters":{ + "type":"list", + "member":{"shape":"JobFilter"} + }, + "JobId":{"type":"string"}, + "JobIds":{ + "type":"list", + "member":{"shape":"JobId"} + }, + "JobStatus":{ + "type":"string", + "enum":[ + "Queued", + "InProgress", + "Complete", + "Failed" + ] + }, "LastRefreshTimestamp":{"type":"timestamp"}, + "LastUpdatedTimestamp":{"type":"timestamp"}, + "LimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

The request exceeds a limit of the service.

", + "exception":true, + "synthetic":true + }, "LookBackPeriodInDays":{"type":"double"}, "MaxResults":{ "type":"integer", @@ -594,6 +922,7 @@ "MaxSize":{"type":"integer"}, "MemberAccountsEnrolled":{"type":"boolean"}, "Message":{"type":"string"}, + "MetadataKey":{"type":"string"}, "MetricName":{ "type":"string", "enum":[ @@ -629,7 +958,7 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

You must opt in to the service to perform this action.

", + "documentation":"

The account is not opted in to AWS Compute Optimizer.

", "exception":true, "synthetic":true }, @@ -666,6 +995,44 @@ "member":{"shape":"UtilizationMetric"} }, "Rank":{"type":"integer"}, + "RecommendationExportJob":{ + "type":"structure", + "members":{ + "jobId":{ + "shape":"JobId", + "documentation":"

The identification number of the export job.

" + }, + "destination":{ + "shape":"ExportDestination", + "documentation":"

An object that describes the destination of the export file.

" + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

The resource type of the exported recommendations.

" + }, + "status":{ + "shape":"JobStatus", + "documentation":"

The status of the export job.

" + }, + "creationTimestamp":{ + "shape":"CreationTimestamp", + "documentation":"

The timestamp of when the export job was created.

" + }, + "lastUpdatedTimestamp":{ + "shape":"LastUpdatedTimestamp", + "documentation":"

The timestamp of when the export job was last updated.

" + }, + "failureReason":{ + "shape":"FailureReason", + "documentation":"

The reason for an export job failure.

" + } + }, + "documentation":"

Describes a recommendation export job.

Use the DescribeRecommendationExportJobs action to view your recommendation export jobs.

Use the ExportAutoScalingGroupRecommendations or ExportEC2InstanceRecommendations actions to request an export of your recommendations.

" + }, + "RecommendationExportJobs":{ + "type":"list", + "member":{"shape":"RecommendationExportJob"} + }, "RecommendationOptions":{ "type":"list", "member":{"shape":"InstanceRecommendationOption"} @@ -746,10 +1113,49 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

The specified resource was not found.

", + "documentation":"

A resource that is required for the action doesn't exist.

", "exception":true, "synthetic":true }, + "ResourceType":{ + "type":"string", + "enum":[ + "Ec2Instance", + "AutoScalingGroup" + ] + }, + "S3Destination":{ + "type":"structure", + "members":{ + "bucket":{ + "shape":"DestinationBucket", + "documentation":"

The name of the Amazon S3 bucket used as the destination of an export file.

" + }, + "key":{ + "shape":"DestinationKey", + "documentation":"

The Amazon S3 bucket key of an export file.

The key uniquely identifies the object, or export file, in the S3 bucket.

" + }, + "metadataKey":{ + "shape":"MetadataKey", + "documentation":"

The Amazon S3 bucket key of a metadata file.

The key uniquely identifies the object, or metadata file, in the S3 bucket.

" + } + }, + "documentation":"

Describes the destination Amazon Simple Storage Service (Amazon S3) bucket name and object keys of a recommendations export file, and its associated metadata file.

" + }, + "S3DestinationConfig":{ + "type":"structure", + "members":{ + "bucket":{ + "shape":"DestinationBucket", + "documentation":"

The name of the Amazon S3 bucket to use as the destination for an export job.

" + }, + "keyPrefix":{ + "shape":"DestinationKeyPrefix", + "documentation":"

The Amazon S3 bucket prefix for an export job.

" + } + }, + "documentation":"

Describes the destination Amazon Simple Storage Service (Amazon S3) bucket name and key prefix for a recommendations export job.

You must create the destination Amazon S3 bucket for your recommendations export before you create the export job. Compute Optimizer does not create the S3 bucket for you. After you create the S3 bucket, ensure that it has the required permission policy to allow Compute Optimizer to write the export file to it. If you plan to specify an object prefix when you create the export job, you must include the object prefix in the policy that you add to the S3 bucket. For more information, see Amazon S3 Bucket Policy for Compute Optimizer in the Compute Optimizer user guide.

" + }, "ServiceUnavailableException":{ "type":"structure", "members":{ @@ -794,7 +1200,7 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

The limit on the number of requests per second was exceeded.

", + "documentation":"

The request was denied due to request throttling.

", "exception":true, "synthetic":true }, @@ -813,7 +1219,7 @@ }, "includeMemberAccounts":{ "shape":"IncludeMemberAccounts", - "documentation":"

Indicates whether to enroll member accounts within the organization, if the account is a master account of an organization.

" + "documentation":"

Indicates whether to enroll member accounts of the organization if the your account is the master account of an organization.

" } } }, @@ -853,5 +1259,5 @@ "member":{"shape":"UtilizationMetric"} } }, - "documentation":"

AWS Compute Optimizer is a service that analyzes the configuration and utilization metrics of your AWS resources, such as EC2 instances and Auto Scaling groups. It reports whether your resources are optimal, and generates optimization recommendations to reduce the cost and improve the performance of your workloads. Compute Optimizer also provides recent utilization metric data, as well as projected utilization metric data for the recommendations, which you can use to evaluate which recommendation provides the best price-performance trade-off. The analysis of your usage patterns can help you decide when to move or resize your running resources, and still meet your performance and capacity requirements. For more information about Compute Optimizer, see the AWS Compute Optimizer User Guide.

" + "documentation":"

AWS Compute Optimizer is a service that analyzes the configuration and utilization metrics of your AWS resources, such as EC2 instances and Auto Scaling groups. It reports whether your resources are optimal, and generates optimization recommendations to reduce the cost and improve the performance of your workloads. Compute Optimizer also provides recent utilization metric data, as well as projected utilization metric data for the recommendations, which you can use to evaluate which recommendation provides the best price-performance trade-off. The analysis of your usage patterns can help you decide when to move or resize your running resources, and still meet your performance and capacity requirements. For more information about Compute Optimizer, including the required permissions to use the service, see the AWS Compute Optimizer User Guide.

" } diff --git a/botocore/data/dataexchange/2017-07-25/service-2.json b/botocore/data/dataexchange/2017-07-25/service-2.json index 10e7d628..7074c6b0 100644 --- a/botocore/data/dataexchange/2017-07-25/service-2.json +++ b/botocore/data/dataexchange/2017-07-25/service-2.json @@ -1393,8 +1393,7 @@ }, "documentation": "

Encryption configuration of the export job. Includes the encryption type as well as the AWS KMS key. The KMS key is only necessary if you chose the KMS encryption type.

", "required": [ - "Type", - "KmsKeyArn" + "Type" ] }, "GetAssetRequest": { diff --git a/botocore/data/directconnect/2012-10-25/service-2.json b/botocore/data/directconnect/2012-10-25/service-2.json index 05dfa537..5aef9e5a 100644 --- a/botocore/data/directconnect/2012-10-25/service-2.json +++ b/botocore/data/directconnect/2012-10-25/service-2.json @@ -321,7 +321,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Creates a private virtual interface. A virtual interface is the VLAN that transports AWS Direct Connect traffic. A private virtual interface can be connected to either a Direct Connect gateway or a Virtual Private Gateway (VGW). Connecting the private virtual interface to a Direct Connect gateway enables the possibility for connecting to multiple VPCs, including VPCs in different AWS Regions. Connecting the private virtual interface to a VGW only provides access to a single VPC within the same Region.

" + "documentation":"

Creates a private virtual interface. A virtual interface is the VLAN that transports AWS Direct Connect traffic. A private virtual interface can be connected to either a Direct Connect gateway or a Virtual Private Gateway (VGW). Connecting the private virtual interface to a Direct Connect gateway enables the possibility for connecting to multiple VPCs, including VPCs in different AWS Regions. Connecting the private virtual interface to a VGW only provides access to a single VPC within the same Region.

Setting the MTU of a virtual interface to 9001 (jumbo frames) can cause an update to the underlying physical connection if it wasn't updated to support jumbo frames. Updating the connection disrupts network connectivity for all virtual interfaces associated with the connection for up to 30 seconds. To check whether your connection supports jumbo frames, call DescribeConnections. To check whether your virtual interface supports jumbo frames, call DescribeVirtualInterfaces.

" }, "CreatePublicVirtualInterface":{ "name":"CreatePublicVirtualInterface", @@ -353,7 +353,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Creates a transit virtual interface. A transit virtual interface should be used to access one or more transit gateways associated with Direct Connect gateways. A transit virtual interface enables the connection of multiple VPCs attached to a transit gateway to a Direct Connect gateway.

If you associate your transit gateway with one or more Direct Connect gateways, the Autonomous System Number (ASN) used by the transit gateway and the Direct Connect gateway must be different. For example, if you use the default ASN 64512 for both your the transit gateway and Direct Connect gateway, the association request fails.

" + "documentation":"

Creates a transit virtual interface. A transit virtual interface should be used to access one or more transit gateways associated with Direct Connect gateways. A transit virtual interface enables the connection of multiple VPCs attached to a transit gateway to a Direct Connect gateway.

If you associate your transit gateway with one or more Direct Connect gateways, the Autonomous System Number (ASN) used by the transit gateway and the Direct Connect gateway must be different. For example, if you use the default ASN 64512 for both your the transit gateway and Direct Connect gateway, the association request fails.

Setting the MTU of a virtual interface to 8500 (jumbo frames) can cause an update to the underlying physical connection if it wasn't updated to support jumbo frames. Updating the connection disrupts network connectivity for all virtual interfaces associated with the connection for up to 30 seconds. To check whether your connection supports jumbo frames, call DescribeConnections. To check whether your virtual interface supports jumbo frames, call DescribeVirtualInterfaces.

" }, "DeleteBGPPeer":{ "name":"DeleteBGPPeer", @@ -706,6 +706,48 @@ ], "documentation":"

Disassociates a connection from a link aggregation group (LAG). The connection is interrupted and re-established as a standalone connection (the connection is not deleted; to delete the connection, use the DeleteConnection request). If the LAG has associated virtual interfaces or hosted connections, they remain associated with the LAG. A disassociated connection owned by an AWS Direct Connect Partner is automatically converted to an interconnect.

If disassociating the connection would cause the LAG to fall below its setting for minimum number of operational connections, the request fails, except when it's the last member of the LAG. If all connections are disassociated, the LAG continues to exist as an empty LAG with no physical connections.

" }, + "ListVirtualInterfaceTestHistory":{ + "name":"ListVirtualInterfaceTestHistory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListVirtualInterfaceTestHistoryRequest"}, + "output":{"shape":"ListVirtualInterfaceTestHistoryResponse"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ], + "documentation":"

Lists the virtual interface failover test history.

" + }, + "StartBgpFailoverTest":{ + "name":"StartBgpFailoverTest", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartBgpFailoverTestRequest"}, + "output":{"shape":"StartBgpFailoverTestResponse"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ], + "documentation":"

Starts the virtual interface failover test that verifies your configuration meets your resiliency requirements by placing the BGP peering session in the DOWN state. You can then send traffic to verify that there are no outages.

You can run the test on public, private, transit, and hosted virtual interfaces.

You can use ListVirtualInterfaceTestHistory to view the virtual interface test history.

If you need to stop the test before the test interval completes, use StopBgpFailoverTest.

" + }, + "StopBgpFailoverTest":{ + "name":"StopBgpFailoverTest", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopBgpFailoverTestRequest"}, + "output":{"shape":"StopBgpFailoverTestResponse"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ], + "documentation":"

Stops the virtual interface failover test.

" + }, "TagResource":{ "name":"TagResource", "http":{ @@ -776,7 +818,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Updates the specified attributes of the specified virtual private interface.

Setting the MTU of a virtual interface to 9001 (jumbo frames) can cause an update to the underlying physical connection if it wasn't updated to support jumbo frames. Updating the connection disrupts network connectivity for all virtual interfaces associated with the connection for up to 30 seconds. To check whether your connection supports jumbo frames, call DescribeConnections. To check whether your virtual interface supports jumbo frames, call DescribeVirtualInterfaces.

" + "documentation":"

Updates the specified attributes of the specified virtual private interface.

Setting the MTU of a virtual interface to 9001 (jumbo frames) can cause an update to the underlying physical connection if it wasn't updated to support jumbo frames. Updating the connection disrupts network connectivity for all virtual interfaces associated with the connection for up to 30 seconds. To check whether your connection supports jumbo frames, call DescribeConnections. To check whether your virtual q interface supports jumbo frames, call DescribeVirtualInterfaces.

" } }, "shapes":{ @@ -1088,6 +1130,10 @@ "documentation":"

Information about a BGP peer.

" }, "BGPPeerId":{"type":"string"}, + "BGPPeerIdList":{ + "type":"list", + "member":{"shape":"BGPPeerId"} + }, "BGPPeerList":{ "type":"list", "member":{"shape":"BGPPeer"} @@ -2286,7 +2332,9 @@ "documentation":"

A tag key was specified more than once.

", "exception":true }, + "EndTime":{"type":"timestamp"}, "ErrorMessage":{"type":"string"}, + "FailureTestHistoryStatus":{"type":"string"}, "GatewayIdToAssociate":{"type":"string"}, "GatewayIdentifier":{"type":"string"}, "GatewayType":{ @@ -2495,6 +2543,48 @@ } } }, + "ListVirtualInterfaceTestHistoryRequest":{ + "type":"structure", + "members":{ + "testId":{ + "shape":"TestId", + "documentation":"

The ID of the virtual interface failover test.

" + }, + "virtualInterfaceId":{ + "shape":"VirtualInterfaceId", + "documentation":"

The ID of the virtual interface that was tested.

" + }, + "bgpPeers":{ + "shape":"BGPPeerIdList", + "documentation":"

The BGP peers that were placed in the DOWN state during the virtual interface failover test.

" + }, + "status":{ + "shape":"FailureTestHistoryStatus", + "documentation":"

The status of the virtual interface failover test.

" + }, + "maxResults":{ + "shape":"MaxResultSetSize", + "documentation":"

The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

If MaxResults is given a value larger than 100, only 100 results are returned.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token for the next page of results.

" + } + } + }, + "ListVirtualInterfaceTestHistoryResponse":{ + "type":"structure", + "members":{ + "virtualInterfaceTestHistory":{ + "shape":"VirtualInterfaceTestHistoryList", + "documentation":"

The ID of the tested virtual interface.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

" + } + } + }, "Loa":{ "type":"structure", "members":{ @@ -2598,7 +2688,7 @@ "members":{ "virtualInterfaceName":{ "shape":"VirtualInterfaceName", - "documentation":"

The name of the virtual interface assigned by the customer network.

" + "documentation":"

The name of the virtual interface assigned by the customer network. The name has a maximum of 100 characters. The following are valid characters: a-z, 0-9 and a hyphen (-).

" }, "vlan":{ "shape":"VLAN", @@ -2653,7 +2743,7 @@ "members":{ "virtualInterfaceName":{ "shape":"VirtualInterfaceName", - "documentation":"

The name of the virtual interface assigned by the customer network.

" + "documentation":"

The name of the virtual interface assigned by the customer network. The name has a maximum of 100 characters. The following are valid characters: a-z, 0-9 and a hyphen (-).

" }, "vlan":{ "shape":"VLAN", @@ -2700,7 +2790,7 @@ "members":{ "virtualInterfaceName":{ "shape":"VirtualInterfaceName", - "documentation":"

The name of the virtual interface assigned by the customer network.

" + "documentation":"

The name of the virtual interface assigned by the customer network. The name has a maximum of 100 characters. The following are valid characters: a-z, 0-9 and a hyphen (-).

" }, "vlan":{ "shape":"VLAN", @@ -2747,7 +2837,7 @@ "members":{ "virtualInterfaceName":{ "shape":"VirtualInterfaceName", - "documentation":"

The name of the virtual interface assigned by the customer network.

" + "documentation":"

The name of the virtual interface assigned by the customer network. The name has a maximum of 100 characters. The following are valid characters: a-z, 0-9 and a hyphen (-).

" }, "vlan":{ "shape":"VLAN", @@ -2789,7 +2879,7 @@ "members":{ "virtualInterfaceName":{ "shape":"VirtualInterfaceName", - "documentation":"

The name of the virtual interface assigned by the customer network.

" + "documentation":"

The name of the virtual interface assigned by the customer network. The name has a maximum of 100 characters. The following are valid characters: a-z, 0-9 and a hyphen (-).

" }, "vlan":{ "shape":"VLAN", @@ -2835,7 +2925,7 @@ "members":{ "virtualInterfaceName":{ "shape":"VirtualInterfaceName", - "documentation":"

The name of the virtual interface assigned by the customer network.

" + "documentation":"

The name of the virtual interface assigned by the customer network. The name has a maximum of 100 characters. The following are valid characters: a-z, 0-9 and a hyphen (-).

" }, "vlan":{ "shape":"VLAN", @@ -2920,7 +3010,54 @@ "member":{"shape":"RouteFilterPrefix"} }, "RouterConfig":{"type":"string"}, + "StartBgpFailoverTestRequest":{ + "type":"structure", + "required":["virtualInterfaceId"], + "members":{ + "virtualInterfaceId":{ + "shape":"VirtualInterfaceId", + "documentation":"

The ID of the virtual interface you want to test.

" + }, + "bgpPeers":{ + "shape":"BGPPeerIdList", + "documentation":"

The BGP peers to place in the DOWN state.

" + }, + "testDurationInMinutes":{ + "shape":"TestDuration", + "documentation":"

The time in minutes that the virtual interface failover test will last.

Maximum value: 180 minutes (3 hours).

Default: 180 minutes (3 hours).

" + } + } + }, + "StartBgpFailoverTestResponse":{ + "type":"structure", + "members":{ + "virtualInterfaceTest":{ + "shape":"VirtualInterfaceTestHistory", + "documentation":"

Information about the virtual interface failover test.

" + } + } + }, + "StartTime":{"type":"timestamp"}, "StateChangeError":{"type":"string"}, + "StopBgpFailoverTestRequest":{ + "type":"structure", + "required":["virtualInterfaceId"], + "members":{ + "virtualInterfaceId":{ + "shape":"VirtualInterfaceId", + "documentation":"

The ID of the virtual interface you no longer want to test.

" + } + } + }, + "StopBgpFailoverTestResponse":{ + "type":"structure", + "members":{ + "virtualInterfaceTest":{ + "shape":"VirtualInterfaceTestHistory", + "documentation":"

Information about the virtual interface failover test.

" + } + } + }, "Tag":{ "type":"structure", "required":["key"], @@ -2979,6 +3116,11 @@ "min":0, "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" }, + "TestDuration":{ + "type":"integer", + "box":true + }, + "TestId":{"type":"string"}, "TooManyTagsException":{ "type":"structure", "members":{ @@ -3122,7 +3264,7 @@ }, "virtualInterfaceName":{ "shape":"VirtualInterfaceName", - "documentation":"

The name of the virtual interface assigned by the customer network.

" + "documentation":"

The name of the virtual interface assigned by the customer network. The name has a maximum of 100 characters. The following are valid characters: a-z, 0-9 and a hyphen (-).

" }, "vlan":{ "shape":"VLAN", @@ -3220,6 +3362,48 @@ "unknown" ] }, + "VirtualInterfaceTestHistory":{ + "type":"structure", + "members":{ + "testId":{ + "shape":"TestId", + "documentation":"

The ID of the virtual interface failover test.

" + }, + "virtualInterfaceId":{ + "shape":"VirtualInterfaceId", + "documentation":"

The ID of the tested virtual interface.

" + }, + "bgpPeers":{ + "shape":"BGPPeerIdList", + "documentation":"

The BGP peers that were put in the DOWN state as part of the virtual interface failover test.

" + }, + "status":{ + "shape":"FailureTestHistoryStatus", + "documentation":"

The status of the virtual interface failover test.

" + }, + "ownerAccount":{ + "shape":"OwnerAccount", + "documentation":"

The owner ID of the tested virtual interface.

" + }, + "testDurationInMinutes":{ + "shape":"TestDuration", + "documentation":"

The time that the virtual interface failover test ran in minutes.

" + }, + "startTime":{ + "shape":"StartTime", + "documentation":"

The time that the virtual interface moves to the DOWN state.

" + }, + "endTime":{ + "shape":"EndTime", + "documentation":"

The time that the virtual interface moves out of the DOWN state.

" + } + }, + "documentation":"

Information about the virtual interface failover test.

" + }, + "VirtualInterfaceTestHistoryList":{ + "type":"list", + "member":{"shape":"VirtualInterfaceTestHistory"} + }, "VirtualInterfaceType":{"type":"string"}, "VirtualInterfaces":{ "type":"structure", diff --git a/botocore/data/dlm/2018-01-12/service-2.json b/botocore/data/dlm/2018-01-12/service-2.json index 86149184..22948588 100644 --- a/botocore/data/dlm/2018-01-12/service-2.json +++ b/botocore/data/dlm/2018-01-12/service-2.json @@ -717,7 +717,7 @@ }, "ScheduleName":{ "type":"string", - "max":500, + "max":120, "min":0, "pattern":"[\\p{all}]*" }, diff --git a/botocore/data/ec2/2016-11-15/service-2.json b/botocore/data/ec2/2016-11-15/service-2.json index b7936c3a..da139ac6 100644 --- a/botocore/data/ec2/2016-11-15/service-2.json +++ b/botocore/data/ec2/2016-11-15/service-2.json @@ -2220,7 +2220,7 @@ }, "input":{"shape":"DescribeSnapshotsRequest"}, "output":{"shape":"DescribeSnapshotsResult"}, - "documentation":"

Describes the specified EBS snapshots available to you or all of the EBS snapshots available to you.

The snapshots available to you include public snapshots, private snapshots that you own, and private snapshots owned by other AWS accounts for which you have explicit create volume permissions.

The create volume permissions fall into the following categories:

The list of snapshots returned can be filtered by specifying snapshot IDs, snapshot owners, or AWS accounts with create volume permissions. If no options are specified, Amazon EC2 returns all snapshots for which you have create volume permissions.

If you specify one or more snapshot IDs, only snapshots that have the specified IDs are returned. If you specify an invalid snapshot ID, an error is returned. If you specify a snapshot ID for which you do not have access, it is not included in the returned results.

If you specify one or more snapshot owners using the OwnerIds option, only snapshots from the specified owners and for which you have access are returned. The results can include the AWS account IDs of the specified owners, amazon for snapshots owned by Amazon, or self for snapshots that you own.

If you specify a list of restorable users, only snapshots with create snapshot permissions for those users are returned. You can specify AWS account IDs (if you own the snapshots), self for snapshots for which you own or have explicit permissions, or all for public snapshots.

If you are describing a long list of snapshots, you can paginate the output to make the list more manageable. The MaxResults parameter sets the maximum number of results returned in a single page. If the list of results exceeds your MaxResults value, then that number of results is returned along with a NextToken value that can be passed to a subsequent DescribeSnapshots request to retrieve the remaining results.

To get the state of fast snapshot restores for a snapshot, use DescribeFastSnapshotRestores.

For more information about EBS snapshots, see Amazon EBS Snapshots in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes the specified EBS snapshots available to you or all of the EBS snapshots available to you.

The snapshots available to you include public snapshots, private snapshots that you own, and private snapshots owned by other AWS accounts for which you have explicit create volume permissions.

The create volume permissions fall into the following categories:

The list of snapshots returned can be filtered by specifying snapshot IDs, snapshot owners, or AWS accounts with create volume permissions. If no options are specified, Amazon EC2 returns all snapshots for which you have create volume permissions.

If you specify one or more snapshot IDs, only snapshots that have the specified IDs are returned. If you specify an invalid snapshot ID, an error is returned. If you specify a snapshot ID for which you do not have access, it is not included in the returned results.

If you specify one or more snapshot owners using the OwnerIds option, only snapshots from the specified owners and for which you have access are returned. The results can include the AWS account IDs of the specified owners, amazon for snapshots owned by Amazon, or self for snapshots that you own.

If you specify a list of restorable users, only snapshots with create snapshot permissions for those users are returned. You can specify AWS account IDs (if you own the snapshots), self for snapshots for which you own or have explicit permissions, or all for public snapshots.

If you are describing a long list of snapshots, we recommend that you paginate the output to make the list more manageable. The MaxResults parameter sets the maximum number of results returned in a single page. If the list of results exceeds your MaxResults value, then that number of results is returned along with a NextToken value that can be passed to a subsequent DescribeSnapshots request to retrieve the remaining results.

To get the state of fast snapshot restores for a snapshot, use DescribeFastSnapshotRestores.

For more information about EBS snapshots, see Amazon EBS Snapshots in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribeSpotDatafeedSubscription":{ "name":"DescribeSpotDatafeedSubscription", @@ -2430,7 +2430,7 @@ }, "input":{"shape":"DescribeVolumesRequest"}, "output":{"shape":"DescribeVolumesResult"}, - "documentation":"

Describes the specified EBS volumes or all of your EBS volumes.

If you are describing a long list of volumes, you can paginate the output to make the list more manageable. The MaxResults parameter sets the maximum number of results returned in a single page. If the list of results exceeds your MaxResults value, then that number of results is returned along with a NextToken value that can be passed to a subsequent DescribeVolumes request to retrieve the remaining results.

For more information about EBS volumes, see Amazon EBS Volumes in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes the specified EBS volumes or all of your EBS volumes.

If you are describing a long list of volumes, we recommend that you paginate the output to make the list more manageable. The MaxResults parameter sets the maximum number of results returned in a single page. If the list of results exceeds your MaxResults value, then that number of results is returned along with a NextToken value that can be passed to a subsequent DescribeVolumes request to retrieve the remaining results.

For more information about EBS volumes, see Amazon EBS Volumes in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribeVolumesModifications":{ "name":"DescribeVolumesModifications", @@ -2440,7 +2440,7 @@ }, "input":{"shape":"DescribeVolumesModificationsRequest"}, "output":{"shape":"DescribeVolumesModificationsResult"}, - "documentation":"

Reports the current modification status of EBS volumes.

Current-generation EBS volumes support modification of attributes including type, size, and (for io1 volumes) IOPS provisioning while either attached to or detached from an instance. Following an action from the API or the console to modify a volume, the status of the modification may be modifying, optimizing, completed, or failed. If a volume has never been modified, then certain elements of the returned VolumeModification objects are null.

You can also use CloudWatch Events to check the status of a modification to an EBS volume. For information about CloudWatch Events, see the Amazon CloudWatch Events User Guide. For more information, see Monitoring Volume Modifications\" in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes the most recent volume modification request for the specified EBS volumes.

If a volume has never been modified, some information in the output will be null. If a volume has been modified more than once, the output includes only the most recent modification request.

You can also use CloudWatch Events to check the status of a modification to an EBS volume. For information about CloudWatch Events, see the Amazon CloudWatch Events User Guide. For more information, see Monitoring Volume Modifications in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribeVpcAttribute":{ "name":"DescribeVpcAttribute", @@ -12300,12 +12300,12 @@ }, "OwnerId":{ "shape":"String", - "documentation":"

The ID of the AWS account that owns the snapshot.

", + "documentation":"

The ID of the AWS account that enabled fast snapshot restores on the snapshot.

", "locationName":"ownerId" }, "OwnerAlias":{ "shape":"String", - "documentation":"

The alias of the snapshot owner.

", + "documentation":"

The AWS owner alias that enabled fast snapshot restores on the snapshot. This is intended for future use.

", "locationName":"ownerAlias" }, "EnablingTime":{ @@ -12353,7 +12353,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

The filters. The possible values are:

", + "documentation":"

The filters. The possible values are:

", "locationName":"Filter" }, "MaxResults":{ @@ -13283,7 +13283,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters. Filter names and values are case-sensitive.

", + "documentation":"

One or more filters. Filter names and values are case-sensitive.

", "locationName":"Filter" }, "MaxResults":{ @@ -14810,7 +14810,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

The filters.

", + "documentation":"

The filters.

", "locationName":"Filter" }, "MaxResults":{ @@ -14823,7 +14823,7 @@ }, "OwnerIds":{ "shape":"OwnerStringList", - "documentation":"

Describes the snapshots owned by these owners.

", + "documentation":"

Scopes the results to snapshots with the specified owners. You can specify a combination of AWS account IDs, self, and amazon.

", "locationName":"Owner" }, "RestorableByUserIds":{ @@ -15764,12 +15764,12 @@ }, "VolumeIds":{ "shape":"VolumeIdStringList", - "documentation":"

The IDs of the volumes for which in-progress modifications will be described.

", + "documentation":"

The IDs of the volumes.

", "locationName":"VolumeId" }, "Filters":{ "shape":"FilterList", - "documentation":"

The filters. Supported filters: volume-id | modification-state | target-size | target-iops | target-volume-type | original-size | original-iops | original-volume-type | start-time | originalMultiAttachEnabled | targetMultiAttachEnabled.

", + "documentation":"

The filters.

", "locationName":"Filter" }, "NextToken":{ @@ -16708,12 +16708,12 @@ }, "OwnerId":{ "shape":"String", - "documentation":"

The ID of the AWS account that owns the snapshot.

", + "documentation":"

The ID of the AWS account that enabled fast snapshot restores on the snapshot.

", "locationName":"ownerId" }, "OwnerAlias":{ "shape":"String", - "documentation":"

The alias of the snapshot owner.

", + "documentation":"

The AWS owner alias that enabled fast snapshot restores on the snapshot. This is intended for future use.

", "locationName":"ownerAlias" }, "EnablingTime":{ @@ -17355,6 +17355,11 @@ "shape":"EbsOptimizedInfo", "documentation":"

Describes the optimized EBS performance for the instance type.

", "locationName":"ebsOptimizedInfo" + }, + "NvmeSupport":{ + "shape":"EbsNvmeSupport", + "documentation":"

Indicates whether non-volatile memory express (NVMe) is supported.

", + "locationName":"nvmeSupport" } }, "documentation":"

Describes the Amazon EBS features supported by the instance type.

" @@ -17401,6 +17406,14 @@ }, "documentation":"

Describes information used to set up an EBS volume specified in a block device mapping.

" }, + "EbsNvmeSupport":{ + "type":"string", + "enum":[ + "unsupported", + "supported", + "required" + ] + }, "EbsOptimizedInfo":{ "type":"structure", "members":{ @@ -17642,7 +17655,7 @@ "members":{ "Type":{ "shape":"String", - "documentation":"

The type of elastic inference accelerator. The possible values are eia1.medium, eia1.large, and eia1.xlarge.

" + "documentation":"

The type of elastic inference accelerator. The possible values are eia1.medium, eia1.large, eia1.xlarge, eia2.medium, eia2.large, and eia2.xlarge.

" }, "Count":{ "shape":"ElasticInferenceAcceleratorCount", @@ -17810,12 +17823,12 @@ }, "OwnerId":{ "shape":"String", - "documentation":"

The ID of the AWS account that owns the snapshot.

", + "documentation":"

The ID of the AWS account that enabled fast snapshot restores on the snapshot.

", "locationName":"ownerId" }, "OwnerAlias":{ "shape":"String", - "documentation":"

The alias of the snapshot owner.

", + "documentation":"

The AWS owner alias that enabled fast snapshot restores on the snapshot. This is intended for future use.

", "locationName":"ownerAlias" }, "EnablingTime":{ @@ -22840,6 +22853,15 @@ "r5ad.12xlarge", "r5ad.16xlarge", "r5ad.24xlarge", + "r6g.metal", + "r6g.medium", + "r6g.large", + "r6g.xlarge", + "r6g.2xlarge", + "r6g.4xlarge", + "r6g.8xlarge", + "r6g.12xlarge", + "r6g.16xlarge", "x1.16xlarge", "x1.32xlarge", "x1e.xlarge", @@ -22890,6 +22912,14 @@ "c5.18xlarge", "c5.24xlarge", "c5.metal", + "c5a.large", + "c5a.xlarge", + "c5a.2xlarge", + "c5a.4xlarge", + "c5a.8xlarge", + "c5a.12xlarge", + "c5a.16xlarge", + "c5a.24xlarge", "c5d.large", "c5d.xlarge", "c5d.2xlarge", @@ -22905,6 +22935,15 @@ "c5n.4xlarge", "c5n.9xlarge", "c5n.18xlarge", + "c6g.metal", + "c6g.medium", + "c6g.large", + "c6g.xlarge", + "c6g.2xlarge", + "c6g.4xlarge", + "c6g.8xlarge", + "c6g.12xlarge", + "c6g.16xlarge", "cc1.4xlarge", "cc2.8xlarge", "g2.2xlarge", @@ -22919,6 +22958,7 @@ "g4dn.8xlarge", "g4dn.12xlarge", "g4dn.16xlarge", + "g4dn.metal", "cg1.4xlarge", "p2.xlarge", "p2.8xlarge", @@ -32736,7 +32776,7 @@ }, "OwnerAlias":{ "shape":"String", - "documentation":"

Value from an Amazon-maintained list (amazon | self | all | aws-marketplace | microsoft) of snapshot owners. Not to be confused with the user-configured AWS account alias, which is set from the IAM console.

", + "documentation":"

The AWS owner alias, as maintained by Amazon. The possible values are: amazon | self | all | aws-marketplace | microsoft. This AWS owner alias is not to be confused with the user-configured AWS account alias, which is set from the IAM console.

", "locationName":"ownerAlias" }, "Tags":{ @@ -36691,7 +36731,7 @@ }, "OriginalSize":{ "shape":"Integer", - "documentation":"

The original size of the volume.

", + "documentation":"

The original size of the volume, in GiB.

", "locationName":"originalSize" }, "OriginalIops":{ diff --git a/botocore/data/ecs/2014-11-13/service-2.json b/botocore/data/ecs/2014-11-13/service-2.json index 20a4a772..e2af225f 100644 --- a/botocore/data/ecs/2014-11-13/service-2.json +++ b/botocore/data/ecs/2014-11-13/service-2.json @@ -25,7 +25,8 @@ {"shape":"ServerException"}, {"shape":"ClientException"}, {"shape":"InvalidParameterException"}, - {"shape":"LimitExceededException"} + {"shape":"LimitExceededException"}, + {"shape":"UpdateInProgressException"} ], "documentation":"

Creates a new capacity provider. Capacity providers are associated with an Amazon ECS cluster and are used in capacity provider strategies to facilitate cluster auto scaling.

Only capacity providers using an Auto Scaling group can be created. Amazon ECS tasks on AWS Fargate use the FARGATE and FARGATE_SPOT capacity providers which are already created and available to all accounts in Regions supported by AWS Fargate.

" }, @@ -116,6 +117,21 @@ ], "documentation":"

Deletes one or more custom attributes from an Amazon ECS resource.

" }, + "DeleteCapacityProvider":{ + "name":"DeleteCapacityProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteCapacityProviderRequest"}, + "output":{"shape":"DeleteCapacityProviderResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"} + ], + "documentation":"

Deletes the specified capacity provider.

The FARGATE and FARGATE_SPOT capacity providers are reserved and cannot be deleted. You can disassociate them from a cluster using either the PutClusterCapacityProviders API or by deleting the cluster.

Prior to a capacity provider being deleted, the capacity provider must be removed from the capacity provider strategy from all services. The UpdateService API can be used to remove a capacity provider from a service's capacity provider strategy. When updating a service, the forceNewDeployment option can be used to ensure that any tasks using the Amazon EC2 instance capacity provided by the capacity provider are transitioned to use the capacity from the remaining capacity providers. Only capacity providers that are not associated with a cluster can be deleted. To remove a capacity provider from a cluster, you can either use PutClusterCapacityProviders or delete the cluster.

" + }, "DeleteCluster":{ "name":"DeleteCluster", "http":{ @@ -993,12 +1009,20 @@ }, "status":{ "shape":"CapacityProviderStatus", - "documentation":"

The current status of the capacity provider. Only capacity providers in an ACTIVE state can be used in a cluster.

" + "documentation":"

The current status of the capacity provider. Only capacity providers in an ACTIVE state can be used in a cluster. When a capacity provider is successfully deleted, it will have an INACTIVE status.

" }, "autoScalingGroupProvider":{ "shape":"AutoScalingGroupProvider", "documentation":"

The Auto Scaling group settings for the capacity provider.

" }, + "updateStatus":{ + "shape":"CapacityProviderUpdateStatus", + "documentation":"

The update status of the capacity provider. The following are the possible states that will be returned.

DELETE_IN_PROGRESS

The capacity provider is in the process of being deleted.

DELETE_COMPLETE

The capacity provider has been successfully deleted and will have an INACTIVE status.

DELETE_FAILED

The capacity provider was unable to be deleted. The update status reason will provide further details about why the delete failed.

" + }, + "updateStatusReason":{ + "shape":"String", + "documentation":"

The update status reason. This provides further details about the update status for the capacity provider.

" + }, "tags":{ "shape":"Tags", "documentation":"

The metadata that you apply to the capacity provider to help you categorize and organize it. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

" @@ -1016,7 +1040,10 @@ }, "CapacityProviderStatus":{ "type":"string", - "enum":["ACTIVE"] + "enum":[ + "ACTIVE", + "INACTIVE" + ] }, "CapacityProviderStrategy":{ "type":"list", @@ -1051,6 +1078,14 @@ "max":1000, "min":0 }, + "CapacityProviderUpdateStatus":{ + "type":"string", + "enum":[ + "DELETE_IN_PROGRESS", + "DELETE_COMPLETE", + "DELETE_FAILED" + ] + }, "CapacityProviders":{ "type":"list", "member":{"shape":"CapacityProvider"} @@ -1426,7 +1461,7 @@ }, "ulimits":{ "shape":"UlimitList", - "documentation":"

A list of ulimits to set in the container. This parameter maps to Ulimits in the Create a container section of the Docker Remote API and the --ulimit option to docker run. Valid naming values are displayed in the Ulimit data type. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

This parameter is not supported for Windows containers.

" + "documentation":"

A list of ulimits to set in the container. If a ulimit value is specified in a task definition, it will override the default values set by Docker. This parameter maps to Ulimits in the Create a container section of the Docker Remote API and the --ulimit option to docker run. Valid naming values are displayed in the Ulimit data type. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

This parameter is not supported for Windows containers.

" }, "logConfiguration":{ "shape":"LogConfiguration", @@ -1928,6 +1963,22 @@ } } }, + "DeleteCapacityProviderRequest":{ + "type":"structure", + "required":["capacityProvider"], + "members":{ + "capacityProvider":{ + "shape":"String", + "documentation":"

The short name or full Amazon Resource Name (ARN) of the capacity provider to delete.

" + } + } + }, + "DeleteCapacityProviderResponse":{ + "type":"structure", + "members":{ + "capacityProvider":{"shape":"CapacityProvider"} + } + }, "DeleteClusterRequest":{ "type":"structure", "required":["cluster"], @@ -2808,7 +2859,7 @@ "members":{ "name":{ "shape":"SettingName", - "documentation":"

The resource name you want to list the account settings for.

" + "documentation":"

The name of the account setting you want to list the settings for.

" }, "value":{ "shape":"String", @@ -3673,7 +3724,7 @@ }, "executionRoleArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the task execution role that the Amazon ECS container agent and the Docker daemon can assume.

" + "documentation":"

The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent permission to make AWS API calls on your behalf. The task execution IAM role is required depending on the requirements of your task. For more information, see Amazon ECS task execution IAM role in the Amazon Elastic Container Service Developer Guide.

" }, "networkMode":{ "shape":"NetworkMode", @@ -4687,7 +4738,7 @@ }, "executionRoleArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the task execution role that containers in this task can assume. All containers in this task are granted the permissions that are specified in this role.

" + "documentation":"

The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent permission to make AWS API calls on your behalf. The task execution IAM role is required depending on the requirements of your task. For more information, see Amazon ECS task execution IAM role in the Amazon Elastic Container Service Developer Guide.

" }, "networkMode":{ "shape":"NetworkMode", @@ -4818,7 +4869,7 @@ }, "executionRoleArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the task execution role that the Amazon ECS container agent and the Docker daemon can assume.

" + "documentation":"

The Amazon Resource Name (ARN) of the task execution IAM role override for the task.

" }, "memory":{ "shape":"String", @@ -5302,7 +5353,7 @@ }, "host":{ "shape":"HostVolumeProperties", - "documentation":"

This parameter is specified when you are using bind mount host volumes. Bind mount host volumes are supported when you are using either the EC2 or Fargate launch types. The contents of the host parameter determine whether your bind mount host volume persists on the host container instance and where it is stored. If the host parameter is empty, then the Docker daemon assigns a host path for your data volume. However, the data is not guaranteed to persist after the containers associated with it stop running.

Windows containers can mount whole directories on the same drive as $env:ProgramData. Windows containers cannot mount directories on a different drive, and mount point cannot be across drives. For example, you can mount C:\\my\\path:C:\\my\\path and D:\\:D:\\, but not D:\\my\\path:C:\\my\\path or D:\\:C:\\my\\path.

" + "documentation":"

This parameter is specified when you are using bind mount host volumes. The contents of the host parameter determine whether your bind mount host volume persists on the host container instance and where it is stored. If the host parameter is empty, then the Docker daemon assigns a host path for your data volume. However, the data is not guaranteed to persist after the containers associated with it stop running.

Windows containers can mount whole directories on the same drive as $env:ProgramData. Windows containers cannot mount directories on a different drive, and mount point cannot be across drives. For example, you can mount C:\\my\\path:C:\\my\\path and D:\\:D:\\, but not D:\\my\\path:C:\\my\\path or D:\\:C:\\my\\path.

" }, "dockerVolumeConfiguration":{ "shape":"DockerVolumeConfiguration", @@ -5310,10 +5361,10 @@ }, "efsVolumeConfiguration":{ "shape":"EFSVolumeConfiguration", - "documentation":"

This parameter is specified when you are using an Amazon Elastic File System (Amazon EFS) file storage. Amazon EFS file systems are only supported when you are using the EC2 launch type.

EFSVolumeConfiguration remains in preview and is a Beta Service as defined by and subject to the Beta Service Participation Service Terms located at https://aws.amazon.com/service-terms (\"Beta Terms\"). These Beta Terms apply to your participation in this preview of EFSVolumeConfiguration.

" + "documentation":"

This parameter is specified when you are using an Amazon Elastic File System file system for task storage.

" } }, - "documentation":"

A data volume used in a task definition. For tasks that use a Docker volume, specify a DockerVolumeConfiguration. For tasks that use a bind mount host volume, specify a host and optional sourcePath. For more information, see Using Data Volumes in Tasks.

" + "documentation":"

A data volume used in a task definition. For tasks that use Amazon Elastic File System (Amazon EFS) file storage, specify an efsVolumeConfiguration. For tasks that use a Docker volume, specify a DockerVolumeConfiguration. For tasks that use a bind mount host volume, specify a host and optional sourcePath. For more information, see Using Data Volumes in Tasks.

" }, "VolumeFrom":{ "type":"structure", diff --git a/botocore/data/elasticache/2015-02-02/service-2.json b/botocore/data/elasticache/2015-02-02/service-2.json index fe8e2aae..02655ec4 100644 --- a/botocore/data/elasticache/2015-02-02/service-2.json +++ b/botocore/data/elasticache/2015-02-02/service-2.json @@ -2386,6 +2386,7 @@ "shape":"BooleanOptional", "documentation":"

Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.

If true, Multi-AZ is enabled for this replication group. If false, Multi-AZ is disabled for this replication group.

AutomaticFailoverEnabled must be enabled for Redis (cluster mode enabled) replication groups.

Default: false

Amazon ElastiCache for Redis does not support Multi-AZ with automatic failover on:

" }, + "MultiAZEnabled":{"shape":"BooleanOptional"}, "NumCacheClusters":{ "shape":"IntegerOptional", "documentation":"

The number of nodes in the cluster.

This parameter is not used if there is more than one node group (shard). You should use ReplicasPerNodeGroup instead.

If AutomaticFailoverEnabled is true, the value of this parameter must be at least 2. If AutomaticFailoverEnabled is false you can omit this parameter (it will default to 1), or you can explicitly set it to a value between 2 and 6.

The maximum permitted value for NumCacheClusters is 6 (1 primary plus 5 replicas).

" @@ -3945,6 +3946,7 @@ "shape":"BooleanOptional", "documentation":"

Determines whether a read replica is automatically promoted to read/write primary if the existing primary encounters a failure.

Valid values: true | false

Amazon ElastiCache for Redis does not support Multi-AZ with automatic failover on:

" }, + "MultiAZEnabled":{"shape":"BooleanOptional"}, "NodeGroupId":{ "shape":"String", "documentation":"

Deprecated. This parameter is not used.

", @@ -4056,6 +4058,13 @@ "ReplicationGroup":{"shape":"ReplicationGroup"} } }, + "MultiAZStatus":{ + "type":"string", + "enum":[ + "enabled", + "disabled" + ] + }, "NoOperationFault":{ "type":"structure", "members":{ @@ -4712,6 +4721,7 @@ "shape":"AutomaticFailoverStatus", "documentation":"

Indicates the status of Multi-AZ with automatic failover for this Redis replication group.

Amazon ElastiCache for Redis does not support Multi-AZ with automatic failover on:

" }, + "MultiAZ":{"shape":"MultiAZStatus"}, "ConfigurationEndpoint":{ "shape":"Endpoint", "documentation":"

The configuration endpoint for this replication group. Use the configuration endpoint to connect to this replication group.

" diff --git a/botocore/data/elasticbeanstalk/2010-12-01/service-2.json b/botocore/data/elasticbeanstalk/2010-12-01/service-2.json index 6d71f2fa..249e5a1c 100644 --- a/botocore/data/elasticbeanstalk/2010-12-01/service-2.json +++ b/botocore/data/elasticbeanstalk/2010-12-01/service-2.json @@ -41,6 +41,18 @@ ], "documentation":"

Applies a scheduled managed action immediately. A managed action can be applied only if its status is Scheduled. Get the status and action ID of a managed action with DescribeEnvironmentManagedActions.

" }, + "AssociateEnvironmentOperationsRole":{ + "name":"AssociateEnvironmentOperationsRole", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateEnvironmentOperationsRoleMessage"}, + "errors":[ + {"shape":"InsufficientPrivilegesException"} + ], + "documentation":"

Add or change the operations role used by an environment. After this call is made, Elastic Beanstalk uses the associated operations role for permissions to downstream services during subsequent calls acting on this environment. For more information, see Operations roles in the AWS Elastic Beanstalk Developer Guide.

" + }, "CheckDNSAvailability":{ "name":"CheckDNSAvailability", "http":{ @@ -442,6 +454,18 @@ ], "documentation":"

Describes a platform version. Provides full details. Compare to ListPlatformVersions, which provides summary information about a list of platform versions.

For definitions of platform version and other platform-related terms, see AWS Elastic Beanstalk Platforms Glossary.

" }, + "DisassociateEnvironmentOperationsRole":{ + "name":"DisassociateEnvironmentOperationsRole", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateEnvironmentOperationsRoleMessage"}, + "errors":[ + {"shape":"InsufficientPrivilegesException"} + ], + "documentation":"

Disassociate the operations role from an environment. After this call is made, Elastic Beanstalk uses the caller's permissions for permissions to downstream services during subsequent calls acting on this environment. For more information, see Operations roles in the AWS Elastic Beanstalk Developer Guide.

" + }, "ListAvailableSolutionStacks":{ "name":"ListAvailableSolutionStacks", "http":{ @@ -984,6 +1008,24 @@ }, "documentation":"

The result message containing information about the managed action.

" }, + "AssociateEnvironmentOperationsRoleMessage":{ + "type":"structure", + "required":[ + "EnvironmentName", + "OperationsRole" + ], + "members":{ + "EnvironmentName":{ + "shape":"EnvironmentName", + "documentation":"

The name of the environment to which to set the operations role.

" + }, + "OperationsRole":{ + "shape":"OperationsRole", + "documentation":"

The Amazon Resource Name (ARN) of an existing IAM role to be used as the environment's operations role.

" + } + }, + "documentation":"

Request to add or change the operations role used by an environment.

" + }, "AutoCreateApplication":{"type":"boolean"}, "AutoScalingGroup":{ "type":"structure", @@ -1524,7 +1566,7 @@ }, "PlatformArn":{ "shape":"PlatformArn", - "documentation":"

The Amazon Resource Name (ARN) of the custom platform to use with the environment. For more information, see Custom Platforms in the AWS Elastic Beanstalk Developer Guide.

If you specify PlatformArn, don't specify SolutionStackName.

" + "documentation":"

The Amazon Resource Name (ARN) of the custom platform to use with the environment. For more information, see Custom Platforms in the AWS Elastic Beanstalk Developer Guide.

If you specify PlatformArn, don't specify SolutionStackName.

" }, "OptionSettings":{ "shape":"ConfigurationOptionSettingsList", @@ -1533,6 +1575,10 @@ "OptionsToRemove":{ "shape":"OptionsSpecifierList", "documentation":"

A list of custom user-defined configuration options to remove from the configuration set for this new environment.

" + }, + "OperationsRole":{ + "shape":"OperationsRole", + "documentation":"

The Amazon Resource Name (ARN) of an existing IAM role to be used as the environment's operations role. If specified, Elastic Beanstalk uses the operations role for permissions to downstream services during this call and during subsequent calls acting on this environment. To specify an operations role, you must have the iam:PassRole permission for the role. For more information, see Operations roles in the AWS Elastic Beanstalk Developer Guide.

" } }, "documentation":"

" @@ -2117,6 +2163,17 @@ "type":"string", "max":200 }, + "DisassociateEnvironmentOperationsRoleMessage":{ + "type":"structure", + "required":["EnvironmentName"], + "members":{ + "EnvironmentName":{ + "shape":"EnvironmentName", + "documentation":"

The name of the environment from which to disassociate the operations role.

" + } + }, + "documentation":"

Request to disassociate the operations role from an environment.

" + }, "Ec2InstanceId":{"type":"string"}, "ElasticBeanstalkServiceException":{ "type":"structure", @@ -2213,6 +2270,10 @@ "EnvironmentArn":{ "shape":"EnvironmentArn", "documentation":"

The environment's Amazon Resource Name (ARN), which can be used in other API requests that require an ARN.

" + }, + "OperationsRole":{ + "shape":"OperationsRole", + "documentation":"

The Amazon Resource Name (ARN) of the environment's operations role. For more information, see Operations roles in the AWS Elastic Beanstalk Developer Guide.

" } }, "documentation":"

Describes the properties of an environment.

" @@ -2997,6 +3058,11 @@ }, "exception":true }, + "OperationsRole":{ + "type":"string", + "max":256, + "min":1 + }, "OptionNamespace":{"type":"string"}, "OptionRestrictionMaxLength":{"type":"integer"}, "OptionRestrictionMaxValue":{"type":"integer"}, @@ -4081,11 +4147,11 @@ }, "TagsToAdd":{ "shape":"TagList", - "documentation":"

A list of tags to add or update.

If a key of an existing tag is added, the tag's value is updated.

" + "documentation":"

A list of tags to add or update. If a key of an existing tag is added, the tag's value is updated.

Specify at least one of these parameters: TagsToAdd, TagsToRemove.

" }, "TagsToRemove":{ "shape":"TagKeyList", - "documentation":"

A list of tag keys to remove.

If a tag key doesn't exist, it is silently ignored.

" + "documentation":"

A list of tag keys to remove. If a tag key doesn't exist, it is silently ignored.

Specify at least one of these parameters: TagsToAdd, TagsToRemove.

" } } }, @@ -4165,5 +4231,5 @@ }, "VirtualizationType":{"type":"string"} }, - "documentation":"AWS Elastic Beanstalk

AWS Elastic Beanstalk makes it easy for you to create, deploy, and manage scalable, fault-tolerant applications running on the Amazon Web Services cloud.

For more information about this product, go to the AWS Elastic Beanstalk details page. The location of the latest AWS Elastic Beanstalk WSDL is http://elasticbeanstalk.s3.amazonaws.com/doc/2010-12-01/AWSElasticBeanstalk.wsdl. To install the Software Development Kits (SDKs), Integrated Development Environment (IDE) Toolkits, and command line tools that enable you to access the API, go to Tools for Amazon Web Services.

Endpoints

For a list of region-specific endpoints that AWS Elastic Beanstalk supports, go to Regions and Endpoints in the Amazon Web Services Glossary.

" + "documentation":"AWS Elastic Beanstalk

AWS Elastic Beanstalk makes it easy for you to create, deploy, and manage scalable, fault-tolerant applications running on the Amazon Web Services cloud.

For more information about this product, go to the AWS Elastic Beanstalk details page. The location of the latest AWS Elastic Beanstalk WSDL is https://elasticbeanstalk.s3.amazonaws.com/doc/2010-12-01/AWSElasticBeanstalk.wsdl. To install the Software Development Kits (SDKs), Integrated Development Environment (IDE) Toolkits, and command line tools that enable you to access the API, go to Tools for Amazon Web Services.

Endpoints

For a list of region-specific endpoints that AWS Elastic Beanstalk supports, go to Regions and Endpoints in the Amazon Web Services Glossary.

" } diff --git a/botocore/data/emr/2009-03-31/service-2.json b/botocore/data/emr/2009-03-31/service-2.json index 997e4b04..041d84b9 100644 --- a/botocore/data/emr/2009-03-31/service-2.json +++ b/botocore/data/emr/2009-03-31/service-2.json @@ -905,6 +905,10 @@ "shape":"String", "documentation":"

The path to the Amazon S3 location where logs for this cluster are stored.

" }, + "LogEncryptionKmsKeyId":{ + "shape":"String", + "documentation":"

The AWS KMS customer master key (CMK) used for encrypting log files. This attribute is only available with EMR version 5.30.0 and later, excluding EMR 6.0.0.

" + }, "RequestedAmiVersion":{ "shape":"String", "documentation":"

The AMI version requested for this cluster.

" @@ -2416,6 +2420,10 @@ "shape":"XmlString", "documentation":"

The location in Amazon S3 where log files for the job are stored.

" }, + "LogEncryptionKmsKeyId":{ + "shape":"XmlString", + "documentation":"

The AWS KMS customer master key (CMK) used for encrypting log files. This attribute is only available with EMR version 5.30.0 and later, excluding EMR 6.0.0.

" + }, "AmiVersion":{ "shape":"XmlStringMaxLen256", "documentation":"

Applies only to Amazon EMR AMI versions 3.x and 2.x. For Amazon EMR releases 4.0 and later, ReleaseLabel is used. To specify a custom AMI, use CustomAmiID.

" @@ -3239,6 +3247,10 @@ "shape":"XmlString", "documentation":"

The location in Amazon S3 to write the log files of the job flow. If a value is not provided, logs are not created.

" }, + "LogEncryptionKmsKeyId":{ + "shape":"XmlString", + "documentation":"

The AWS KMS customer master key (CMK) used for encrypting log files. If a value is not provided, the logs will remain encrypted by AES-256. This attribute is only available with EMR version 5.30.0 and later, excluding EMR 6.0.0.

" + }, "AdditionalInfo":{ "shape":"XmlString", "documentation":"

A JSON string for selecting additional features.

" diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index 9d1253eb..4755e71f 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -965,6 +965,20 @@ "us-west-2" : { } } }, + "codeartifact" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, "codebuild" : { "endpoints" : { "ap-east-1" : { }, @@ -1428,6 +1442,7 @@ }, "datasync" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -1437,6 +1452,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -2569,8 +2585,10 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, "eu-west-1" : { }, @@ -3214,6 +3232,7 @@ }, "license-manager" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -3223,6 +3242,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -4680,6 +4700,30 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "securityhub-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "securityhub-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "securityhub-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "securityhub-fips.us-west-2.amazonaws.com" + }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, @@ -5971,6 +6015,15 @@ "cn-northwest-1" : { } } }, + "autoscaling-plans" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "backup" : { "endpoints" : { "cn-north-1" : { }, @@ -5983,6 +6036,18 @@ "cn-northwest-1" : { } } }, + "budgets" : { + "endpoints" : { + "aws-cn-global" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "hostname" : "budgets.amazonaws.com.cn" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-cn-global" + }, "cloudformation" : { "endpoints" : { "cn-north-1" : { }, @@ -6247,6 +6312,12 @@ "cn-northwest-1" : { } } }, + "kinesisanalytics" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "kms" : { "endpoints" : { "cn-north-1" : { }, @@ -6594,7 +6665,19 @@ }, "api.sagemaker" : { "endpoints" : { - "us-gov-west-1" : { } + "us-gov-west-1" : { }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "api-fips.sagemaker.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1-fips-secondary" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "api.sagemaker.us-gov-west-1.amazonaws.com" + } } }, "apigateway" : { @@ -7192,6 +7275,12 @@ "region" : "us-gov-west-1" }, "hostname" : "iam.us-gov.amazonaws.com" + }, + "iam-govcloud-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "iam.us-gov.amazonaws.com" } }, "isRegionalized" : false, @@ -7238,6 +7327,18 @@ }, "kinesis" : { "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "kinesis-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "kinesis-fips.us-gov-west-1.amazonaws.com" + }, "us-gov-east-1" : { }, "us-gov-west-1" : { } } @@ -7569,6 +7670,18 @@ }, "securityhub" : { "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "securityhub-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "securityhub-fips.us-gov-west-1.amazonaws.com" + }, "us-gov-east-1" : { }, "us-gov-west-1" : { } } @@ -7733,6 +7846,12 @@ }, "storagegateway" : { "endpoints" : { + "fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "storagegateway-fips.us-gov-west-1.amazonaws.com" + }, "us-gov-east-1" : { }, "us-gov-west-1" : { } } diff --git a/botocore/data/es/2015-01-01/service-2.json b/botocore/data/es/2015-01-01/service-2.json index 4d8fcd91..5e960abc 100644 --- a/botocore/data/es/2015-01-01/service-2.json +++ b/botocore/data/es/2015-01-01/service-2.json @@ -10,6 +10,21 @@ "uid":"es-2015-01-01" }, "operations":{ + "AcceptInboundCrossClusterSearchConnection":{ + "name":"AcceptInboundCrossClusterSearchConnection", + "http":{ + "method":"PUT", + "requestUri":"/2015-01-01/es/ccs/inboundConnection/{ConnectionId}/accept" + }, + "input":{"shape":"AcceptInboundCrossClusterSearchConnectionRequest"}, + "output":{"shape":"AcceptInboundCrossClusterSearchConnectionResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"DisabledOperationException"} + ], + "documentation":"

Allows the destination domain owner to accept an inbound cross-cluster search connection request.

" + }, "AddTags":{ "name":"AddTags", "http":{ @@ -78,6 +93,22 @@ ], "documentation":"

Creates a new Elasticsearch domain. For more information, see Creating Elasticsearch Domains in the Amazon Elasticsearch Service Developer Guide.

" }, + "CreateOutboundCrossClusterSearchConnection":{ + "name":"CreateOutboundCrossClusterSearchConnection", + "http":{ + "method":"POST", + "requestUri":"/2015-01-01/es/ccs/outboundConnection" + }, + "input":{"shape":"CreateOutboundCrossClusterSearchConnectionRequest"}, + "output":{"shape":"CreateOutboundCrossClusterSearchConnectionResponse"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"InternalException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"DisabledOperationException"} + ], + "documentation":"

Creates a new cross-cluster search connection from a source domain to a destination domain.

" + }, "CreatePackage":{ "name":"CreatePackage", "http":{ @@ -126,6 +157,34 @@ ], "documentation":"

Deletes the service-linked role that Elasticsearch Service uses to manage and maintain VPC domains. Role deletion will fail if any existing VPC domains use the role. You must delete any such Elasticsearch domains before deleting the role. See Deleting Elasticsearch Service Role in VPC Endpoints for Amazon Elasticsearch Service Domains.

" }, + "DeleteInboundCrossClusterSearchConnection":{ + "name":"DeleteInboundCrossClusterSearchConnection", + "http":{ + "method":"DELETE", + "requestUri":"/2015-01-01/es/ccs/inboundConnection/{ConnectionId}" + }, + "input":{"shape":"DeleteInboundCrossClusterSearchConnectionRequest"}, + "output":{"shape":"DeleteInboundCrossClusterSearchConnectionResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"DisabledOperationException"} + ], + "documentation":"

Allows the destination domain owner to delete an existing inbound cross-cluster search connection.

" + }, + "DeleteOutboundCrossClusterSearchConnection":{ + "name":"DeleteOutboundCrossClusterSearchConnection", + "http":{ + "method":"DELETE", + "requestUri":"/2015-01-01/es/ccs/outboundConnection/{ConnectionId}" + }, + "input":{"shape":"DeleteOutboundCrossClusterSearchConnectionRequest"}, + "output":{"shape":"DeleteOutboundCrossClusterSearchConnectionResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"DisabledOperationException"} + ], + "documentation":"

Allows the source domain owner to delete an existing outbound cross-cluster search connection.

" + }, "DeletePackage":{ "name":"DeletePackage", "http":{ @@ -209,6 +268,34 @@ ], "documentation":"

Describe Elasticsearch Limits for a given InstanceType and ElasticsearchVersion. When modifying existing Domain, specify the DomainName to know what Limits are supported for modifying.

" }, + "DescribeInboundCrossClusterSearchConnections":{ + "name":"DescribeInboundCrossClusterSearchConnections", + "http":{ + "method":"POST", + "requestUri":"/2015-01-01/es/ccs/inboundConnection/search" + }, + "input":{"shape":"DescribeInboundCrossClusterSearchConnectionsRequest"}, + "output":{"shape":"DescribeInboundCrossClusterSearchConnectionsResponse"}, + "errors":[ + {"shape":"InvalidPaginationTokenException"}, + {"shape":"DisabledOperationException"} + ], + "documentation":"

Lists all the inbound cross-cluster search connections for a destination domain.

" + }, + "DescribeOutboundCrossClusterSearchConnections":{ + "name":"DescribeOutboundCrossClusterSearchConnections", + "http":{ + "method":"POST", + "requestUri":"/2015-01-01/es/ccs/outboundConnection/search" + }, + "input":{"shape":"DescribeOutboundCrossClusterSearchConnectionsRequest"}, + "output":{"shape":"DescribeOutboundCrossClusterSearchConnectionsResponse"}, + "errors":[ + {"shape":"InvalidPaginationTokenException"}, + {"shape":"DisabledOperationException"} + ], + "documentation":"

Lists all the outbound cross-cluster search connections for a source domain.

" + }, "DescribePackages":{ "name":"DescribePackages", "http":{ @@ -440,6 +527,20 @@ ], "documentation":"

Allows you to purchase reserved Elasticsearch instances.

" }, + "RejectInboundCrossClusterSearchConnection":{ + "name":"RejectInboundCrossClusterSearchConnection", + "http":{ + "method":"PUT", + "requestUri":"/2015-01-01/es/ccs/inboundConnection/{ConnectionId}/reject" + }, + "input":{"shape":"RejectInboundCrossClusterSearchConnectionRequest"}, + "output":{"shape":"RejectInboundCrossClusterSearchConnectionResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"DisabledOperationException"} + ], + "documentation":"

Allows the destination domain owner to reject an inbound cross-cluster search connection request.

" + }, "RemoveTags":{ "name":"RemoveTags", "http":{ @@ -512,6 +613,29 @@ "type":"string", "documentation":"

The Amazon Resource Name (ARN) of the Elasticsearch domain. See Identifiers for IAM Entities in Using AWS Identity and Access Management for more information.

" }, + "AcceptInboundCrossClusterSearchConnectionRequest":{ + "type":"structure", + "required":["CrossClusterSearchConnectionId"], + "members":{ + "CrossClusterSearchConnectionId":{ + "shape":"CrossClusterSearchConnectionId", + "documentation":"

The id of the inbound connection that you want to accept.

", + "location":"uri", + "locationName":"ConnectionId" + } + }, + "documentation":"

Container for the parameters to the AcceptInboundCrossClusterSearchConnection operation.

" + }, + "AcceptInboundCrossClusterSearchConnectionResponse":{ + "type":"structure", + "members":{ + "CrossClusterSearchConnection":{ + "shape":"InboundCrossClusterSearchConnection", + "documentation":"

Specifies the InboundCrossClusterSearchConnection of accepted inbound connection.

" + } + }, + "documentation":"

The result of a AcceptInboundCrossClusterSearchConnection operation. Contains details of accepted inbound connection.

" + }, "AccessDeniedException":{ "type":"structure", "members":{ @@ -780,6 +904,10 @@ "error":{"httpStatusCode":409}, "exception":true }, + "ConnectionAlias":{ + "type":"string", + "max":20 + }, "CreateElasticsearchDomainRequest":{ "type":"structure", "required":["DomainName"], @@ -852,6 +980,55 @@ }, "documentation":"

The result of a CreateElasticsearchDomain operation. Contains the status of the newly created Elasticsearch domain.

" }, + "CreateOutboundCrossClusterSearchConnectionRequest":{ + "type":"structure", + "required":[ + "SourceDomainInfo", + "DestinationDomainInfo", + "ConnectionAlias" + ], + "members":{ + "SourceDomainInfo":{ + "shape":"DomainInformation", + "documentation":"

Specifies the DomainInformation for the source Elasticsearch domain.

" + }, + "DestinationDomainInfo":{ + "shape":"DomainInformation", + "documentation":"

Specifies the DomainInformation for the destination Elasticsearch domain.

" + }, + "ConnectionAlias":{ + "shape":"ConnectionAlias", + "documentation":"

Specifies the connection alias that will be used by the customer for this connection.

" + } + }, + "documentation":"

Container for the parameters to the CreateOutboundCrossClusterSearchConnection operation.

" + }, + "CreateOutboundCrossClusterSearchConnectionResponse":{ + "type":"structure", + "members":{ + "SourceDomainInfo":{ + "shape":"DomainInformation", + "documentation":"

Specifies the DomainInformation for the source Elasticsearch domain.

" + }, + "DestinationDomainInfo":{ + "shape":"DomainInformation", + "documentation":"

Specifies the DomainInformation for the destination Elasticsearch domain.

" + }, + "ConnectionAlias":{ + "shape":"ConnectionAlias", + "documentation":"

Specifies the connection alias provided during the create connection request.

" + }, + "ConnectionStatus":{ + "shape":"OutboundCrossClusterSearchConnectionStatus", + "documentation":"

Specifies the OutboundCrossClusterSearchConnectionStatus for the newly created connection.

" + }, + "CrossClusterSearchConnectionId":{ + "shape":"CrossClusterSearchConnectionId", + "documentation":"

Unique id for the created outbound connection, which is used for subsequent operations on connection.

" + } + }, + "documentation":"

The result of a CreateOutboundCrossClusterSearchConnection request. Contains the details of the newly created cross-cluster search connection.

" + }, "CreatePackageRequest":{ "type":"structure", "required":[ @@ -890,6 +1067,8 @@ "documentation":"

Container for response returned by CreatePackage operation.

" }, "CreatedAt":{"type":"timestamp"}, + "CrossClusterSearchConnectionId":{"type":"string"}, + "CrossClusterSearchConnectionStatusMessage":{"type":"string"}, "DeleteElasticsearchDomainRequest":{ "type":"structure", "required":["DomainName"], @@ -913,6 +1092,52 @@ }, "documentation":"

The result of a DeleteElasticsearchDomain request. Contains the status of the pending deletion, or no status if the domain and all of its resources have been deleted.

" }, + "DeleteInboundCrossClusterSearchConnectionRequest":{ + "type":"structure", + "required":["CrossClusterSearchConnectionId"], + "members":{ + "CrossClusterSearchConnectionId":{ + "shape":"CrossClusterSearchConnectionId", + "documentation":"

The id of the inbound connection that you want to permanently delete.

", + "location":"uri", + "locationName":"ConnectionId" + } + }, + "documentation":"

Container for the parameters to the DeleteInboundCrossClusterSearchConnection operation.

" + }, + "DeleteInboundCrossClusterSearchConnectionResponse":{ + "type":"structure", + "members":{ + "CrossClusterSearchConnection":{ + "shape":"InboundCrossClusterSearchConnection", + "documentation":"

Specifies the InboundCrossClusterSearchConnection of deleted inbound connection.

" + } + }, + "documentation":"

The result of a DeleteInboundCrossClusterSearchConnection operation. Contains details of deleted inbound connection.

" + }, + "DeleteOutboundCrossClusterSearchConnectionRequest":{ + "type":"structure", + "required":["CrossClusterSearchConnectionId"], + "members":{ + "CrossClusterSearchConnectionId":{ + "shape":"CrossClusterSearchConnectionId", + "documentation":"

The id of the outbound connection that you want to permanently delete.

", + "location":"uri", + "locationName":"ConnectionId" + } + }, + "documentation":"

Container for the parameters to the DeleteOutboundCrossClusterSearchConnection operation.

" + }, + "DeleteOutboundCrossClusterSearchConnectionResponse":{ + "type":"structure", + "members":{ + "CrossClusterSearchConnection":{ + "shape":"OutboundCrossClusterSearchConnection", + "documentation":"

Specifies the OutboundCrossClusterSearchConnection of deleted outbound connection.

" + } + }, + "documentation":"

The result of a DeleteOutboundCrossClusterSearchConnection operation. Contains details of deleted outbound connection.

" + }, "DeletePackageRequest":{ "type":"structure", "required":["PackageID"], @@ -1052,6 +1277,70 @@ }, "documentation":"

Container for the parameters received from DescribeElasticsearchInstanceTypeLimits operation.

" }, + "DescribeInboundCrossClusterSearchConnectionsRequest":{ + "type":"structure", + "members":{ + "Filters":{ + "shape":"FilterList", + "documentation":"

A list of filters used to match properties for inbound cross-cluster search connection. Available Filter names for this operation are:

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

Set this value to limit the number of results returned. If not specified, defaults to 100.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

NextToken is sent in case the earlier API call results contain the NextToken. It is used for pagination.

" + } + }, + "documentation":"

Container for the parameters to the DescribeInboundCrossClusterSearchConnections operation.

" + }, + "DescribeInboundCrossClusterSearchConnectionsResponse":{ + "type":"structure", + "members":{ + "CrossClusterSearchConnections":{ + "shape":"InboundCrossClusterSearchConnections", + "documentation":"

Consists of list of InboundCrossClusterSearchConnection matching the specified filter criteria.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If more results are available and NextToken is present, make the next request to the same API with the received NextToken to paginate the remaining results.

" + } + }, + "documentation":"

The result of a DescribeInboundCrossClusterSearchConnections request. Contains the list of connections matching the filter criteria.

" + }, + "DescribeOutboundCrossClusterSearchConnectionsRequest":{ + "type":"structure", + "members":{ + "Filters":{ + "shape":"FilterList", + "documentation":"

A list of filters used to match properties for outbound cross-cluster search connection. Available Filter names for this operation are:

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

Set this value to limit the number of results returned. If not specified, defaults to 100.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

NextToken is sent in case the earlier API call results contain the NextToken. It is used for pagination.

" + } + }, + "documentation":"

Container for the parameters to the DescribeOutboundCrossClusterSearchConnections operation.

" + }, + "DescribeOutboundCrossClusterSearchConnectionsResponse":{ + "type":"structure", + "members":{ + "CrossClusterSearchConnections":{ + "shape":"OutboundCrossClusterSearchConnections", + "documentation":"

Consists of list of OutboundCrossClusterSearchConnection matching the specified filter criteria.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If more results are available and NextToken is present, make the next request to the same API with the received NextToken to paginate the remaining results.

" + } + }, + "documentation":"

The result of a DescribeOutboundCrossClusterSearchConnections request. Contains the list of connections matching the filter criteria.

" + }, "DescribePackagesFilter":{ "type":"structure", "members":{ @@ -1284,6 +1573,15 @@ "member":{"shape":"DomainInfo"}, "documentation":"

Contains the list of Elasticsearch domain information.

" }, + "DomainInformation":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "OwnerId":{"shape":"OwnerId"}, + "DomainName":{"shape":"DomainName"}, + "Region":{"shape":"Region"} + } + }, "DomainName":{ "type":"string", "documentation":"

The name of an Elasticsearch domain. Domain names are unique across the domains owned by an account within an AWS region. Domain names start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen).

", @@ -1762,6 +2060,24 @@ }, "ErrorMessage":{"type":"string"}, "ErrorType":{"type":"string"}, + "Filter":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"NonEmptyString", + "documentation":"

Specifies the name of the filter.

" + }, + "Values":{ + "shape":"ValueStringList", + "documentation":"

Contains one or more values for the filter.

" + } + }, + "documentation":"

A filter used to limit results when describing inbound or outbound cross-cluster search connections. Multiple values can be specified per filter. A cross-cluster search connection must match at least one of the specified values for it to be returned from an operation.

" + }, + "FilterList":{ + "type":"list", + "member":{"shape":"Filter"} + }, "GUID":{ "type":"string", "pattern":"\\p{XDigit}{8}-\\p{XDigit}{4}-\\p{XDigit}{4}-\\p{XDigit}{4}-\\p{XDigit}{12}" @@ -1859,6 +2175,57 @@ "min":1, "pattern":"[\\w-]+:[0-9a-f-]+" }, + "InboundCrossClusterSearchConnection":{ + "type":"structure", + "members":{ + "SourceDomainInfo":{ + "shape":"DomainInformation", + "documentation":"

Specifies the DomainInformation for the source Elasticsearch domain.

" + }, + "DestinationDomainInfo":{ + "shape":"DomainInformation", + "documentation":"

Specifies the DomainInformation for the destination Elasticsearch domain.

" + }, + "CrossClusterSearchConnectionId":{ + "shape":"CrossClusterSearchConnectionId", + "documentation":"

Specifies the connection id for the inbound cross-cluster search connection.

" + }, + "ConnectionStatus":{ + "shape":"InboundCrossClusterSearchConnectionStatus", + "documentation":"

Specifies the InboundCrossClusterSearchConnectionStatus for the outbound connection.

" + } + }, + "documentation":"

Specifies details of an inbound connection.

" + }, + "InboundCrossClusterSearchConnectionStatus":{ + "type":"structure", + "members":{ + "StatusCode":{ + "shape":"InboundCrossClusterSearchConnectionStatusCode", + "documentation":"

The state code for inbound connection. This can be one of the following:

" + }, + "Message":{ + "shape":"CrossClusterSearchConnectionStatusMessage", + "documentation":"

Specifies verbose information for the inbound connection status.

" + } + }, + "documentation":"

Specifies the coonection status of an inbound cross-cluster search connection.

" + }, + "InboundCrossClusterSearchConnectionStatusCode":{ + "type":"string", + "enum":[ + "PENDING_ACCEPTANCE", + "APPROVED", + "REJECTING", + "REJECTED", + "DELETING", + "DELETED" + ] + }, + "InboundCrossClusterSearchConnections":{ + "type":"list", + "member":{"shape":"InboundCrossClusterSearchConnection"} + }, "InstanceCount":{ "type":"integer", "documentation":"

Specifies the number of EC2 instances in the Elasticsearch domain.

", @@ -1890,6 +2257,14 @@ "error":{"httpStatusCode":500}, "exception":true }, + "InvalidPaginationTokenException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The request processing has failed because of invalid pagination token provided by customer. Returns an HTTP status code of 400.

", + "error":{"httpStatusCode":400}, + "exception":true + }, "InvalidTypeException":{ "type":"structure", "members":{ @@ -2224,6 +2599,10 @@ }, "documentation":"

Status of the node-to-node encryption options for the specified Elasticsearch domain.

" }, + "NonEmptyString":{ + "type":"string", + "min":1 + }, "OptionState":{ "type":"string", "documentation":"

The state of a requested change. One of the following:

", @@ -2264,6 +2643,68 @@ }, "documentation":"

Provides the current status of the entity.

" }, + "OutboundCrossClusterSearchConnection":{ + "type":"structure", + "members":{ + "SourceDomainInfo":{ + "shape":"DomainInformation", + "documentation":"

Specifies the DomainInformation for the source Elasticsearch domain.

" + }, + "DestinationDomainInfo":{ + "shape":"DomainInformation", + "documentation":"

Specifies the DomainInformation for the destination Elasticsearch domain.

" + }, + "CrossClusterSearchConnectionId":{ + "shape":"CrossClusterSearchConnectionId", + "documentation":"

Specifies the connection id for the outbound cross-cluster search connection.

" + }, + "ConnectionAlias":{ + "shape":"ConnectionAlias", + "documentation":"

Specifies the connection alias for the outbound cross-cluster search connection.

" + }, + "ConnectionStatus":{ + "shape":"OutboundCrossClusterSearchConnectionStatus", + "documentation":"

Specifies the OutboundCrossClusterSearchConnectionStatus for the outbound connection.

" + } + }, + "documentation":"

Specifies details of an outbound connection.

" + }, + "OutboundCrossClusterSearchConnectionStatus":{ + "type":"structure", + "members":{ + "StatusCode":{ + "shape":"OutboundCrossClusterSearchConnectionStatusCode", + "documentation":"

The state code for outbound connection. This can be one of the following:

" + }, + "Message":{ + "shape":"CrossClusterSearchConnectionStatusMessage", + "documentation":"

Specifies verbose information for the outbound connection status.

" + } + }, + "documentation":"

Specifies the connection status of an outbound cross-cluster search connection.

" + }, + "OutboundCrossClusterSearchConnectionStatusCode":{ + "type":"string", + "enum":[ + "PENDING_ACCEPTANCE", + "VALIDATING", + "VALIDATION_FAILED", + "PROVISIONING", + "ACTIVE", + "REJECTED", + "DELETING", + "DELETED" + ] + }, + "OutboundCrossClusterSearchConnections":{ + "type":"list", + "member":{"shape":"OutboundCrossClusterSearchConnection"} + }, + "OwnerId":{ + "type":"string", + "max":12, + "min":12 + }, "PackageDescription":{ "type":"string", "max":1024 @@ -2408,6 +2849,30 @@ "member":{"shape":"RecurringCharge"} }, "ReferencePath":{"type":"string"}, + "Region":{"type":"string"}, + "RejectInboundCrossClusterSearchConnectionRequest":{ + "type":"structure", + "required":["CrossClusterSearchConnectionId"], + "members":{ + "CrossClusterSearchConnectionId":{ + "shape":"CrossClusterSearchConnectionId", + "documentation":"

The id of the inbound connection that you want to reject.

", + "location":"uri", + "locationName":"ConnectionId" + } + }, + "documentation":"

Container for the parameters to the RejectInboundCrossClusterSearchConnection operation.

" + }, + "RejectInboundCrossClusterSearchConnectionResponse":{ + "type":"structure", + "members":{ + "CrossClusterSearchConnection":{ + "shape":"InboundCrossClusterSearchConnection", + "documentation":"

Specifies the InboundCrossClusterSearchConnection of rejected inbound connection.

" + } + }, + "documentation":"

The result of a RejectInboundCrossClusterSearchConnection operation. Contains details of rejected inbound connection.

" + }, "RemoveTagsRequest":{ "type":"structure", "required":[ @@ -2997,6 +3462,11 @@ "error":{"httpStatusCode":400}, "exception":true }, + "ValueStringList":{ + "type":"list", + "member":{"shape":"NonEmptyString"}, + "min":1 + }, "VolumeType":{ "type":"string", "documentation":"

The type of EBS volume, standard, gp2, or io1. See Configuring EBS-based Storagefor more information.

", diff --git a/botocore/data/fsx/2018-03-01/service-2.json b/botocore/data/fsx/2018-03-01/service-2.json index ced5ee83..144bf709 100644 --- a/botocore/data/fsx/2018-03-01/service-2.json +++ b/botocore/data/fsx/2018-03-01/service-2.json @@ -264,9 +264,10 @@ {"shape":"IncompatibleParameterError"}, {"shape":"InternalServerError"}, {"shape":"FileSystemNotFound"}, - {"shape":"MissingFileSystemConfiguration"} + {"shape":"MissingFileSystemConfiguration"}, + {"shape":"ServiceLimitExceeded"} ], - "documentation":"

Updates a file system configuration.

" + "documentation":"

Use this operation to update the configuration of an existing Amazon FSx file system. For an Amazon FSx for Lustre file system, you can update only the WeeklyMaintenanceStartTime. For an Amazon for Windows File Server file system, you can update the following properties:

You can update multiple properties in a single request.

" } }, "shapes":{ @@ -324,6 +325,53 @@ "min":1, "pattern":"^.{1,255}$" }, + "AdministrativeAction":{ + "type":"structure", + "members":{ + "AdministrativeActionType":{"shape":"AdministrativeActionType"}, + "ProgressPercent":{ + "shape":"ProgressPercent", + "documentation":"

Provides the percent complete of a STORAGE_OPTIMIZATION administrative action.

" + }, + "RequestTime":{ + "shape":"RequestTime", + "documentation":"

Time that the administrative action request was received.

" + }, + "Status":{ + "shape":"Status", + "documentation":"

Describes the status of the administrative action, as follows:

" + }, + "TargetFileSystemValues":{ + "shape":"FileSystem", + "documentation":"

Describes the target StorageCapacity or ThroughputCapacity value provided in the UpdateFileSystem operation. Returned for FILE_SYSTEM_UPDATE administrative actions.

" + }, + "FailureDetails":{"shape":"AdministrativeActionFailureDetails"} + }, + "documentation":"

Describes a specific Amazon FSx Administrative Action for the current Windows file system.

" + }, + "AdministrativeActionFailureDetails":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"ErrorMessage", + "documentation":"

Error message providing details about the failure.

" + } + }, + "documentation":"

Provides information about a failed administrative action.

" + }, + "AdministrativeActionType":{ + "type":"string", + "documentation":"

Describes the type of administrative action, as follows:

", + "enum":[ + "FILE_SYSTEM_UPDATE", + "STORAGE_OPTIMIZATION" + ] + }, + "AdministrativeActions":{ + "type":"list", + "member":{"shape":"AdministrativeAction"}, + "max":50 + }, "ArchivePath":{ "type":"string", "max":900, @@ -645,7 +693,7 @@ "members":{ "WeeklyMaintenanceStartTime":{ "shape":"WeeklyTime", - "documentation":"

The preferred time to perform weekly maintenance, in the UTC time zone.

" + "documentation":"

The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone, where d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.

" }, "ImportPath":{ "shape":"ArchivePath", @@ -693,7 +741,7 @@ }, "StorageType":{ "shape":"StorageType", - "documentation":"

Sets the storage type for the Amazon FSx for Windows file system you're creating. Valid values are SSD and HDD.

Default value is SSD. For more information, see Storage Type Options in the Amazon FSx for Windows User Guide.

" + "documentation":"

Sets the storage type for the Amazon FSx for Windows file system you're creating. Valid values are SSD and HDD.

Default value is SSD. For more information, see Storage Type Options in the Amazon FSx for Windows User Guide.

" }, "SubnetIds":{ "shape":"SubnetIds", @@ -749,7 +797,7 @@ }, "WeeklyMaintenanceStartTime":{ "shape":"WeeklyTime", - "documentation":"

The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone.

" + "documentation":"

The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone, where d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.

" }, "DailyAutomaticBackupStartTime":{ "shape":"DailyTime", @@ -1253,7 +1301,11 @@ "shape":"WindowsFileSystemConfiguration", "documentation":"

The configuration for this Microsoft Windows file system.

" }, - "LustreConfiguration":{"shape":"LustreFileSystemConfiguration"} + "LustreConfiguration":{"shape":"LustreFileSystemConfiguration"}, + "AdministrativeActions":{ + "shape":"AdministrativeActions", + "documentation":"

A list of administrative actions for the file system that are in process or waiting to be processed. Administrative actions describe changes to the Windows file system that you have initiated using the UpdateFileSystem action.

" + } }, "documentation":"

A description of a specific Amazon FSx file system.

" }, @@ -1492,7 +1544,7 @@ "members":{ "WeeklyMaintenanceStartTime":{ "shape":"WeeklyTime", - "documentation":"

The UTC time that you want to begin your weekly maintenance window.

" + "documentation":"

The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone. d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.

" }, "DataRepositoryConfiguration":{"shape":"DataRepositoryConfiguration"}, "DeploymentType":{ @@ -1519,6 +1571,7 @@ "MaxResults":{ "type":"integer", "documentation":"

The maximum number of resources to return in the response. This value must be an integer greater than zero.

", + "max":2147483647, "min":1 }, "Megabytes":{ @@ -1603,6 +1656,7 @@ "type":"string", "enum":["FAILED_FILES_ONLY"] }, + "RequestTime":{"type":"timestamp"}, "ResourceARN":{ "type":"string", "documentation":"

The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify AWS resources. We require an ARN when you need to specify a resource unambiguously across all of AWS. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

", @@ -1727,7 +1781,7 @@ "documentation":"

A list of up to two IP addresses of DNS servers or domain controllers in the self-managed AD directory.

" } }, - "documentation":"

The configuration that Amazon FSx uses to join the Windows File Server instance to the self-managed Microsoft Active Directory (AD) directory.

" + "documentation":"

The configuration that Amazon FSx uses to join the Windows File Server instance to a self-managed Microsoft Active Directory (AD) directory.

" }, "ServiceLimit":{ "type":"string", @@ -1753,9 +1807,20 @@ "exception":true }, "StartTime":{"type":"timestamp"}, + "Status":{ + "type":"string", + "enum":[ + "FAILED", + "IN_PROGRESS", + "PENDING", + "COMPLETED", + "UPDATED_OPTIMIZING" + ] + }, "StorageCapacity":{ "type":"integer", "documentation":"

The storage capacity for your Amazon FSx file system, in gibibytes.

", + "max":2147483647, "min":0 }, "StorageType":{ @@ -1895,7 +1960,7 @@ "members":{ "WeeklyMaintenanceStartTime":{ "shape":"WeeklyTime", - "documentation":"

The preferred time to perform weekly maintenance, in the UTC time zone.

" + "documentation":"

The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone. d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.

" } }, "documentation":"

The configuration object for Amazon FSx for Lustre file systems used in the UpdateFileSystem operation.

" @@ -1904,15 +1969,22 @@ "type":"structure", "required":["FileSystemId"], "members":{ - "FileSystemId":{"shape":"FileSystemId"}, + "FileSystemId":{ + "shape":"FileSystemId", + "documentation":"

Identifies the file system that you are updating.

" + }, "ClientRequestToken":{ "shape":"ClientRequestToken", - "documentation":"

(Optional) A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent updates. This string is automatically filled on your behalf when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK.

", + "documentation":"

A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent updates. This string is automatically filled on your behalf when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK.

", "idempotencyToken":true }, + "StorageCapacity":{ + "shape":"StorageCapacity", + "documentation":"

Use this parameter to increase the storage capacity of an Amazon FSx for Windows File Server file system. Specifies the storage capacity target value, GiB, for the file system you're updating. The storage capacity target value must be at least 10 percent (%) greater than the current storage capacity value. In order to increase storage capacity, the file system needs to have at least 16 MB/s of throughput capacity. You cannot make a storage capacity increase request if there is an existing storage capacity increase request in progress. For more information, see Managing Storage Capacity.

" + }, "WindowsConfiguration":{ "shape":"UpdateFileSystemWindowsConfiguration", - "documentation":"

The configuration update for this Microsoft Windows file system. The only supported options are for backup and maintenance and for self-managed Active Directory configuration.

" + "documentation":"

The configuration updates for an Amazon FSx for Windows File Server file system.

" }, "LustreConfiguration":{"shape":"UpdateFileSystemLustreConfiguration"} }, @@ -1933,22 +2005,26 @@ "members":{ "WeeklyMaintenanceStartTime":{ "shape":"WeeklyTime", - "documentation":"

The preferred time to perform weekly maintenance, in the UTC time zone.

" + "documentation":"

The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone. Where d is the weekday number, from 1 through 7, with 1 = Monday and 7 = Sunday.

" }, "DailyAutomaticBackupStartTime":{ "shape":"DailyTime", - "documentation":"

The preferred time to take daily automatic backups, in the UTC time zone.

" + "documentation":"

The preferred time to start the daily automatic backup, in the UTC time zone, for example, 02:00

" }, "AutomaticBackupRetentionDays":{ "shape":"AutomaticBackupRetentionDays", - "documentation":"

The number of days to retain automatic backups. Setting this to 0 disables automatic backups. You can retain automatic backups for a maximum of 35 days.

" + "documentation":"

The number of days to retain automatic daily backups. Setting this to zero (0) disables automatic daily backups. You can retain automatic daily backups for a maximum of 35 days. For more information, see Working with Automatic Daily Backups.

" + }, + "ThroughputCapacity":{ + "shape":"MegabytesPerSecond", + "documentation":"

Sets the target value for a file system's throughput capacity, in MB/s, that you are updating the file system to. Valid values are 8, 16, 32, 64, 128, 256, 512, 1024, 2048. You cannot make a throughput capacity update request if there is an existing throughput capacity update request in progress. For more information, see Managing Throughput Capacity.

" }, "SelfManagedActiveDirectoryConfiguration":{ "shape":"SelfManagedActiveDirectoryConfigurationUpdates", - "documentation":"

The configuration Amazon FSx uses to join the Windows File Server instance to the self-managed Microsoft AD directory.

" + "documentation":"

The configuration Amazon FSx uses to join the Windows File Server instance to the self-managed Microsoft AD directory. You cannot make a self-managed Microsoft AD update request if there is an existing self-managed Microsoft AD update request in progress.

" } }, - "documentation":"

Updates the Microsoft Windows configuration for an existing Amazon FSx for Windows File Server file system. Amazon FSx overwrites existing properties with non-null values provided in the request. If you don't specify a non-null value for a property, that property is not updated.

" + "documentation":"

Updates the configuration for an existing Amazon FSx for Windows File Server file system. Amazon FSx only overwrites existing properties with non-null values provided in the request.

" }, "VpcId":{ "type":"string", @@ -2006,7 +2082,7 @@ }, "WeeklyMaintenanceStartTime":{ "shape":"WeeklyTime", - "documentation":"

The preferred time to perform weekly maintenance, in the UTC time zone.

" + "documentation":"

The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone. d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.

" }, "DailyAutomaticBackupStartTime":{ "shape":"DailyTime", diff --git a/botocore/data/glue/2017-03-31/service-2.json b/botocore/data/glue/2017-03-31/service-2.json index c66fb39e..64c02ac9 100644 --- a/botocore/data/glue/2017-03-31/service-2.json +++ b/botocore/data/glue/2017-03-31/service-2.json @@ -3185,7 +3185,7 @@ }, "Configuration":{ "shape":"CrawlerConfiguration", - "documentation":"

Crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler's behavior. For more information, see Configuring a Crawler.

" + "documentation":"

Crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler's behavior. For more information, see Configuring a Crawler.

" }, "CrawlerSecurityConfiguration":{ "shape":"CrawlerSecurityConfiguration", @@ -3400,7 +3400,7 @@ }, "Schedule":{ "shape":"CronExpression", - "documentation":"

A cron expression used to specify the schedule. For more information, see Time-Based Schedules for Jobs and Crawlers. For example, to run something every day at 12:15 UTC, specify cron(15 12 * * ? *).

" + "documentation":"

A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers. For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *).

" }, "Classifiers":{ "shape":"ClassifierNameList", @@ -3416,7 +3416,7 @@ }, "Configuration":{ "shape":"CrawlerConfiguration", - "documentation":"

The crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler's behavior. For more information, see Configuring a Crawler.

" + "documentation":"

Crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler's behavior. For more information, see Configuring a Crawler.

" }, "CrawlerSecurityConfiguration":{ "shape":"CrawlerSecurityConfiguration", @@ -3424,7 +3424,7 @@ }, "Tags":{ "shape":"TagsMap", - "documentation":"

The tags to use with this crawler request. You can use tags to limit access to the crawler. For more information, see AWS Tags in AWS Glue.

" + "documentation":"

The tags to use with this crawler request. You may use tags to limit access to the crawler. For more information about tags in AWS Glue, see AWS Tags in AWS Glue in the developer guide.

" } } }, @@ -3774,7 +3774,7 @@ }, "JsonPath":{ "shape":"JsonPath", - "documentation":"

A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath, as described in Writing JsonPath Custom Classifiers.

" + "documentation":"

A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath, as described in Writing JsonPath Custom Classifiers.

" } }, "documentation":"

Specifies a JSON classifier for CreateClassifier to create.

" @@ -4744,6 +4744,14 @@ "Path":{ "shape":"Path", "documentation":"

The name of the DynamoDB table to crawl.

" + }, + "scanAll":{ + "shape":"NullableBoolean", + "documentation":"

Indicates whether to scan all the records, or to sample rows from the table. Scanning all the records can take a long time when the table is not a high throughput table.

A value of true means to scan all records, while a value of false means to sample the records. If no value is specified, the value defaults to true.

" + }, + "scanRate":{ + "shape":"NullableDouble", + "documentation":"

The percentage of the configured read capacity units to use by the AWS Glue crawler. Read capacity units is a term defined by DynamoDB, and is a numeric value that acts as rate limiter for the number of reads that can be performed on that table per second.

The valid values are null or a value between 0.1 to 1.5. A null value is used when user does not provide a value, and defaults to 0.5 of the configured Read Capacity Unit (for provisioned tables), or 0.25 of the max configured Read Capacity Unit (for tables using on-demand mode).

" } }, "documentation":"

Specifies an Amazon DynamoDB table to crawl.

" @@ -6159,7 +6167,7 @@ }, "DatabaseName":{ "shape":"NameString", - "documentation":"

The name of the catalog database where the functions are located.

" + "documentation":"

The name of the catalog database where the functions are located. If none is provided, functions from all the databases across the catalog will be returned.

" }, "Pattern":{ "shape":"NameString", @@ -6387,11 +6395,11 @@ }, "GrokPattern":{ "shape":"GrokPattern", - "documentation":"

The grok pattern applied to a data store by this classifier. For more information, see built-in patterns in Writing Custom Classifiers.

" + "documentation":"

The grok pattern applied to a data store by this classifier. For more information, see built-in patterns in Writing Custom Classifiers.

" }, "CustomPatterns":{ "shape":"CustomPatterns", - "documentation":"

Optional custom grok patterns defined by this classifier. For more information, see custom patterns in Writing Custom Classifiers.

" + "documentation":"

Optional custom grok patterns defined by this classifier. For more information, see custom patterns in Writing Custom Classifiers.

" } }, "documentation":"

A classifier that uses grok patterns.

" @@ -6507,7 +6515,7 @@ }, "Exclusions":{ "shape":"PathList", - "documentation":"

A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler.

" + "documentation":"

A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler.

" } }, "documentation":"

Specifies a JDBC data store to crawl.

" @@ -6909,7 +6917,7 @@ }, "JsonPath":{ "shape":"JsonPath", - "documentation":"

A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath, as described in Writing JsonPath Custom Classifiers.

" + "documentation":"

A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath, as described in Writing JsonPath Custom Classifiers.

" } }, "documentation":"

A classifier for JSON content.

" @@ -7943,7 +7951,7 @@ }, "Exclusions":{ "shape":"PathList", - "documentation":"

A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler.

" + "documentation":"

A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler.

" } }, "documentation":"

Specifies a data store in Amazon Simple Storage Service (Amazon S3).

" @@ -7958,7 +7966,7 @@ "members":{ "ScheduleExpression":{ "shape":"CronExpression", - "documentation":"

A cron expression used to specify the schedule. For more information, see Time-Based Schedules for Jobs and Crawlers. For example, to run something every day at 12:15 UTC, specify cron(15 12 * * ? *).

" + "documentation":"

A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers. For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *).

" }, "State":{ "shape":"ScheduleState", @@ -9287,7 +9295,7 @@ }, "Schedule":{ "shape":"CronExpression", - "documentation":"

A cron expression used to specify the schedule. For more information, see Time-Based Schedules for Jobs and Crawlers. For example, to run something every day at 12:15 UTC, specify cron(15 12 * * ? *).

" + "documentation":"

A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers. For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *).

" }, "Classifiers":{ "shape":"ClassifierNameList", @@ -9303,7 +9311,7 @@ }, "Configuration":{ "shape":"CrawlerConfiguration", - "documentation":"

The crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler's behavior. For more information, see Configuring a Crawler.

" + "documentation":"

Crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler's behavior. For more information, see Configuring a Crawler.

" }, "CrawlerSecurityConfiguration":{ "shape":"CrawlerSecurityConfiguration", @@ -9326,7 +9334,7 @@ }, "Schedule":{ "shape":"CronExpression", - "documentation":"

The updated cron expression used to specify the schedule. For more information, see Time-Based Schedules for Jobs and Crawlers. For example, to run something every day at 12:15 UTC, specify cron(15 12 * * ? *).

" + "documentation":"

The updated cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers. For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *).

" } } }, @@ -9498,7 +9506,7 @@ }, "JsonPath":{ "shape":"JsonPath", - "documentation":"

A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath, as described in Writing JsonPath Custom Classifiers.

" + "documentation":"

A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath, as described in Writing JsonPath Custom Classifiers.

" } }, "documentation":"

Specifies a JSON classifier to be updated.

" @@ -9739,6 +9747,10 @@ "shape":"NameString", "documentation":"

The name of the function.

" }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the database where the function resides.

" + }, "ClassName":{ "shape":"NameString", "documentation":"

The Java class that contains the function code.

" diff --git a/botocore/data/guardduty/2017-11-28/service-2.json b/botocore/data/guardduty/2017-11-28/service-2.json index 9c59912a..df746d23 100644 --- a/botocore/data/guardduty/2017-11-28/service-2.json +++ b/botocore/data/guardduty/2017-11-28/service-2.json @@ -595,7 +595,7 @@ {"shape":"BadRequestException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Lists details about associated member accounts for the current GuardDuty master account.

" + "documentation":"

Lists details about all member accounts for the current GuardDuty master account.

" }, "ListOrganizationAdminAccounts":{ "name":"ListOrganizationAdminAccounts", @@ -870,6 +870,22 @@ "members":{ } }, + "AccessControlList":{ + "type":"structure", + "members":{ + "AllowsPublicReadAccess":{ + "shape":"Boolean", + "documentation":"

A value that indicates whether public read access for the bucket is enabled through an Access Control List (ACL).

", + "locationName":"allowsPublicReadAccess" + }, + "AllowsPublicWriteAccess":{ + "shape":"Boolean", + "documentation":"

A value that indicates whether public write access for the bucket is enabled through an Access Control List (ACL).

", + "locationName":"allowsPublicWriteAccess" + } + }, + "documentation":"

Contains information on the current access control policies for the bucket.

" + }, "AccessKeyDetails":{ "type":"structure", "members":{ @@ -933,6 +949,17 @@ "max":50, "min":1 }, + "AccountLevelPermissions":{ + "type":"structure", + "members":{ + "BlockPublicAccess":{ + "shape":"BlockPublicAccess", + "documentation":"

Describes the S3 Block Public Access settings of the bucket's parent account.

", + "locationName":"blockPublicAccess" + } + }, + "documentation":"

Contains information about the account level permissions on the S3 bucket.

" + }, "Action":{ "type":"structure", "members":{ @@ -1069,7 +1096,70 @@ "error":{"httpStatusCode":400}, "exception":true }, + "BlockPublicAccess":{ + "type":"structure", + "members":{ + "IgnorePublicAcls":{ + "shape":"Boolean", + "documentation":"

Indicates if S3 Block Public Access is set to IgnorePublicAcls.

", + "locationName":"ignorePublicAcls" + }, + "RestrictPublicBuckets":{ + "shape":"Boolean", + "documentation":"

Indicates if S3 Block Public Access is set to RestrictPublicBuckets.

", + "locationName":"restrictPublicBuckets" + }, + "BlockPublicAcls":{ + "shape":"Boolean", + "documentation":"

Indicates if S3 Block Public Access is set to BlockPublicAcls.

", + "locationName":"blockPublicAcls" + }, + "BlockPublicPolicy":{ + "shape":"Boolean", + "documentation":"

Indicates if S3 Block Public Access is set to BlockPublicPolicy.

", + "locationName":"blockPublicPolicy" + } + }, + "documentation":"

Contains information on how the bucker owner's S3 Block Public Access settings are being applied to the S3 bucket. See S3 Block Public Access for more information.

" + }, "Boolean":{"type":"boolean"}, + "BucketLevelPermissions":{ + "type":"structure", + "members":{ + "AccessControlList":{ + "shape":"AccessControlList", + "documentation":"

Contains information on how Access Control Policies are applied to the bucket.

", + "locationName":"accessControlList" + }, + "BucketPolicy":{ + "shape":"BucketPolicy", + "documentation":"

Contains information on the bucket policies for the S3 bucket.

", + "locationName":"bucketPolicy" + }, + "BlockPublicAccess":{ + "shape":"BlockPublicAccess", + "documentation":"

Contains information on which account level S3 Block Public Access settings are applied to the S3 bucket.

", + "locationName":"blockPublicAccess" + } + }, + "documentation":"

Contains information about the bucket level permissions for the S3 bucket.

" + }, + "BucketPolicy":{ + "type":"structure", + "members":{ + "AllowsPublicReadAccess":{ + "shape":"Boolean", + "documentation":"

A value that indicates whether public read access for the bucket is enabled through a bucket policy.

", + "locationName":"allowsPublicReadAccess" + }, + "AllowsPublicWriteAccess":{ + "shape":"Boolean", + "documentation":"

A value that indicates whether public write access for the bucket is enabled through a bucket policy.

", + "locationName":"allowsPublicWriteAccess" + } + }, + "documentation":"

Contains information on the current bucket policies for the S3 bucket.

" + }, "City":{ "type":"structure", "members":{ @@ -1307,7 +1397,7 @@ }, "Location":{ "shape":"Location", - "documentation":"

The URI of the file that contains the IPSet.

", + "documentation":"

The URI of the file that contains the IPSet. For example: https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key.

", "locationName":"location" }, "Activate":{ @@ -1463,7 +1553,7 @@ }, "Location":{ "shape":"Location", - "documentation":"

The URI of the file that contains the ThreatIntelSet.

", + "documentation":"

The URI of the file that contains the ThreatIntelSet. For example: https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key.

", "locationName":"location" }, "Activate":{ @@ -1522,6 +1612,22 @@ } } }, + "DefaultServerSideEncryption":{ + "type":"structure", + "members":{ + "EncryptionType":{ + "shape":"String", + "documentation":"

The type of encryption used for objects within the S3 bucket.

", + "locationName":"encryptionType" + }, + "KmsMasterKeyArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the KMS encryption key. Only available if the bucket EncryptionType is aws:kms.

", + "locationName":"kmsMasterKeyArn" + } + }, + "documentation":"

Contains information on the server side encryption method used in the S3 bucket. See S3 Server-Side Encryption for more information.

" + }, "DeleteDetectorRequest":{ "type":"structure", "required":["DetectorId"], @@ -2422,7 +2528,7 @@ }, "Location":{ "shape":"Location", - "documentation":"

The URI of the file that contains the IPSet.

", + "documentation":"

The URI of the file that contains the IPSet. For example: https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key.

", "locationName":"location" }, "Status":{ @@ -2556,7 +2662,7 @@ }, "Location":{ "shape":"Location", - "documentation":"

The URI of the file that contains the ThreatIntelSet.

", + "documentation":"

The URI of the file that contains the ThreatIntelSet. For example: https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key.

", "locationName":"location" }, "Status":{ @@ -2876,7 +2982,7 @@ }, "FindingCriteria":{ "shape":"FindingCriteria", - "documentation":"

Represents the criteria used for querying findings. Valid values include:

", + "documentation":"

Represents the criteria used for querying findings. Valid values include:

", "locationName":"findingCriteria" }, "SortCriteria":{ @@ -3008,7 +3114,7 @@ }, "OnlyAssociated":{ "shape":"String", - "documentation":"

Specifies what member accounts the response includes based on their relationship status with the master account. The default value is \"true\". If set to \"false\" the response includes all existing member accounts (including members who haven't been invited yet or have been disassociated).

", + "documentation":"

Specifies whether to only return associated members or to return all members (including members who haven't been invited yet or have been disassociated).

", "location":"querystring", "locationName":"onlyAssociated" } @@ -3428,6 +3534,33 @@ }, "documentation":"

Contains information about the ISP organization of the remote IP address.

" }, + "Owner":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"String", + "documentation":"

The canonical user ID of the bucket owner. For information about locating your canonical user ID see Finding Your Account Canonical User ID.

", + "locationName":"id" + } + }, + "documentation":"

Contains information on the owner of the bucket.

" + }, + "PermissionConfiguration":{ + "type":"structure", + "members":{ + "BucketLevelPermissions":{ + "shape":"BucketLevelPermissions", + "documentation":"

Contains information about the bucket level permissions for the S3 bucket.

", + "locationName":"bucketLevelPermissions" + }, + "AccountLevelPermissions":{ + "shape":"AccountLevelPermissions", + "documentation":"

Contains information about the account level permissions on the S3 bucket.

", + "locationName":"accountLevelPermissions" + } + }, + "documentation":"

Contains information about how permissions are configured for the S3 bucket.

" + }, "PortProbeAction":{ "type":"structure", "members":{ @@ -3509,6 +3642,22 @@ "type":"list", "member":{"shape":"ProductCode"} }, + "PublicAccess":{ + "type":"structure", + "members":{ + "PermissionConfiguration":{ + "shape":"PermissionConfiguration", + "documentation":"

Contains information about how permissions are configured for the S3 bucket.

", + "locationName":"permissionConfiguration" + }, + "EffectivePermission":{ + "shape":"String", + "documentation":"

Describes the effective permission on this bucket after factoring all attached policies.

", + "locationName":"effectivePermission" + } + }, + "documentation":"

Describes the public access policies that apply to the S3 bucket.

" + }, "PublishingStatus":{ "type":"string", "enum":[ @@ -3575,6 +3724,11 @@ "documentation":"

The IAM access key details (IAM user information) of a user that engaged in the activity that prompted GuardDuty to generate a finding.

", "locationName":"accessKeyDetails" }, + "S3BucketDetails":{ + "shape":"S3BucketDetails", + "documentation":"

Contains information on the S3 bucket.

", + "locationName":"s3BucketDetails" + }, "InstanceDetails":{ "shape":"InstanceDetails", "documentation":"

The information about the EC2 instance associated with the activity that prompted GuardDuty to generate a finding.

", @@ -3588,6 +3742,56 @@ }, "documentation":"

Contains information about the AWS resource associated with the activity that prompted GuardDuty to generate a finding.

" }, + "S3BucketDetail":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the S3 bucket.

", + "locationName":"arn" + }, + "Name":{ + "shape":"String", + "documentation":"

The name of the S3 bucket.

", + "locationName":"name" + }, + "Type":{ + "shape":"String", + "documentation":"

Describes whether the bucket is a source or destination bucket.

", + "locationName":"type" + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

The date and time the bucket was created at.

", + "locationName":"createdAt" + }, + "Owner":{ + "shape":"Owner", + "documentation":"

The owner of the S3 bucket.

", + "locationName":"owner" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

All tags attached to the S3 bucket

", + "locationName":"tags" + }, + "DefaultServerSideEncryption":{ + "shape":"DefaultServerSideEncryption", + "documentation":"

Describes the server side encryption method used in the S3 bucket.

", + "locationName":"defaultServerSideEncryption" + }, + "PublicAccess":{ + "shape":"PublicAccess", + "documentation":"

Describes the public access policies that apply to the S3 bucket.

", + "locationName":"publicAccess" + } + } + }, + "S3BucketDetails":{ + "type":"list", + "member":{"shape":"S3BucketDetail"}, + "documentation":"

Contains information on the S3 bucket.

" + }, "SecurityGroup":{ "type":"structure", "members":{ @@ -3868,6 +4072,7 @@ "type":"list", "member":{"shape":"String"} }, + "Timestamp":{"type":"timestamp"}, "UnarchiveFindingsRequest":{ "type":"structure", "required":[ @@ -4086,7 +4291,7 @@ }, "Location":{ "shape":"Location", - "documentation":"

The updated URI of the file that contains the IPSet.

", + "documentation":"

The updated URI of the file that contains the IPSet. For example: https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key.

", "locationName":"location" }, "Activate":{ @@ -4183,7 +4388,7 @@ }, "Location":{ "shape":"Location", - "documentation":"

The updated URI of the file that contains the ThreateIntelSet.

", + "documentation":"

The updated URI of the file that contains the ThreateIntelSet. For example: https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key.

", "locationName":"location" }, "Activate":{ diff --git a/botocore/data/iam/2010-05-08/service-2.json b/botocore/data/iam/2010-05-08/service-2.json index 72f46101..59704532 100644 --- a/botocore/data/iam/2010-05-08/service-2.json +++ b/botocore/data/iam/2010-05-08/service-2.json @@ -849,7 +849,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"InvalidInputException"} ], - "documentation":"

Generates a report that includes details about when an IAM resource (user, group, role, or policy) was last used in an attempt to access AWS services. Recent activity usually appears within four hours. IAM reports activity for the last 365 days, or less if your Region began supporting this feature within the last year. For more information, see Regions Where Data Is Tracked.

The service last accessed data includes all attempts to access an AWS API, not just the successful ones. This includes all attempts that were made using the AWS Management Console, the AWS API through any of the SDKs, or any of the command line tools. An unexpected entry in the service last accessed data does not mean that your account has been compromised, because the request might have been denied. Refer to your CloudTrail logs as the authoritative source for information about all API calls and whether they were successful or denied access. For more information, see Logging IAM Events with CloudTrail in the IAM User Guide.

The GenerateServiceLastAccessedDetails operation returns a JobId. Use this parameter in the following operations to retrieve the following details from your report:

To check the status of the GenerateServiceLastAccessedDetails request, use the JobId parameter in the same operations and test the JobStatus response parameter.

For additional information about the permissions policies that allow an identity (user, group, or role) to access specific services, use the ListPoliciesGrantingServiceAccess operation.

Service last accessed data does not use other policy types when determining whether a resource could access a service. These other policy types include resource-based policies, access control lists, AWS Organizations policies, IAM permissions boundaries, and AWS STS assume role policies. It only applies permissions policy logic. For more about the evaluation of policy types, see Evaluating Policies in the IAM User Guide.

For more information about service last accessed data, see Reducing Policy Scope by Viewing User Activity in the IAM User Guide.

" + "documentation":"

Generates a report that includes details about when an IAM resource (user, group, role, or policy) was last used in an attempt to access AWS services. Recent activity usually appears within four hours. IAM reports activity for the last 365 days, or less if your Region began supporting this feature within the last year. For more information, see Regions Where Data Is Tracked.

The service last accessed data includes all attempts to access an AWS API, not just the successful ones. This includes all attempts that were made using the AWS Management Console, the AWS API through any of the SDKs, or any of the command line tools. An unexpected entry in the service last accessed data does not mean that your account has been compromised, because the request might have been denied. Refer to your CloudTrail logs as the authoritative source for information about all API calls and whether they were successful or denied access. For more information, see Logging IAM Events with CloudTrail in the IAM User Guide.

The GenerateServiceLastAccessedDetails operation returns a JobId. Use this parameter in the following operations to retrieve the following details from your report:

To check the status of the GenerateServiceLastAccessedDetails request, use the JobId parameter in the same operations and test the JobStatus response parameter.

For additional information about the permissions policies that allow an identity (user, group, or role) to access specific services, use the ListPoliciesGrantingServiceAccess operation.

Service last accessed data does not use other policy types when determining whether a resource could access a service. These other policy types include resource-based policies, access control lists, AWS Organizations policies, IAM permissions boundaries, and AWS STS assume role policies. It only applies permissions policy logic. For more about the evaluation of policy types, see Evaluating Policies in the IAM User Guide.

For more information about service and action last accessed data, see Reducing Permissions Using Service Last Accessed Data in the IAM User Guide.

" }, "GetAccessKeyLastUsed":{ "name":"GetAccessKeyLastUsed", @@ -1201,7 +1201,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"InvalidInputException"} ], - "documentation":"

Retrieves a service last accessed report that was created using the GenerateServiceLastAccessedDetails operation. You can use the JobId parameter in GetServiceLastAccessedDetails to retrieve the status of your report job. When the report is complete, you can retrieve the generated report. The report includes a list of AWS services that the resource (user, group, role, or managed policy) can access.

Service last accessed data does not use other policy types when determining whether a resource could access a service. These other policy types include resource-based policies, access control lists, AWS Organizations policies, IAM permissions boundaries, and AWS STS assume role policies. It only applies permissions policy logic. For more about the evaluation of policy types, see Evaluating Policies in the IAM User Guide.

For each service that the resource could access using permissions policies, the operation returns details about the most recent access attempt. If there was no attempt, the service is listed without details about the most recent attempt to access the service. If the operation fails, the GetServiceLastAccessedDetails operation returns the reason that it failed.

The GetServiceLastAccessedDetails operation returns a list of services. This list includes the number of entities that have attempted to access the service and the date and time of the last attempt. It also returns the ARN of the following entity, depending on the resource ARN that you used to generate the report:

By default, the list is sorted by service namespace.

" + "documentation":"

Retrieves a service last accessed report that was created using the GenerateServiceLastAccessedDetails operation. You can use the JobId parameter in GetServiceLastAccessedDetails to retrieve the status of your report job. When the report is complete, you can retrieve the generated report. The report includes a list of AWS services that the resource (user, group, role, or managed policy) can access.

Service last accessed data does not use other policy types when determining whether a resource could access a service. These other policy types include resource-based policies, access control lists, AWS Organizations policies, IAM permissions boundaries, and AWS STS assume role policies. It only applies permissions policy logic. For more about the evaluation of policy types, see Evaluating Policies in the IAM User Guide.

For each service that the resource could access using permissions policies, the operation returns details about the most recent access attempt. If there was no attempt, the service is listed without details about the most recent attempt to access the service. If the operation fails, the GetServiceLastAccessedDetails operation returns the reason that it failed.

The GetServiceLastAccessedDetails operation returns a list of services. This list includes the number of entities that have attempted to access the service and the date and time of the last attempt. It also returns the ARN of the following entity, depending on the resource ARN that you used to generate the report:

By default, the list is sorted by service namespace.

If you specified ACTION_LEVEL granularity when you generated the report, this operation returns service and action last accessed data. This includes the most recent access attempt for each tracked action within a service. Otherwise, this operation returns only service data.

For more information about service and action last accessed data, see Reducing Permissions Using Service Last Accessed Data in the IAM User Guide.

" }, "GetServiceLastAccessedDetailsWithEntities":{ "name":"GetServiceLastAccessedDetailsWithEntities", @@ -2291,6 +2291,13 @@ } }, "shapes":{ + "AccessAdvisorUsageGranularityType":{ + "type":"string", + "enum":[ + "SERVICE_LEVEL", + "ACTION_LEVEL" + ] + }, "AccessDetail":{ "type":"structure", "required":[ @@ -3709,6 +3716,10 @@ "Arn":{ "shape":"arnType", "documentation":"

The ARN of the IAM resource (user, group, role, or managed policy) used to generate information about when the resource was last used in an attempt to access an AWS service.

" + }, + "Granularity":{ + "shape":"AccessAdvisorUsageGranularityType", + "documentation":"

The level of detail that you want to generate. You can specify whether you want to generate information about the last attempt to access services or actions. If you specify service-level granularity, this operation generates only service data. If you specify action-level granularity, it generates service and action data. If you don't include this optional parameter, the operation generates service data.

" } } }, @@ -4307,6 +4318,10 @@ "shape":"jobStatusType", "documentation":"

The status of the job.

" }, + "JobType":{ + "shape":"AccessAdvisorUsageGranularityType", + "documentation":"

The type of job. Service jobs return information about when each service was last accessed. Action jobs also include information about when tracked actions within the service were last accessed.

" + }, "JobCreationDate":{ "shape":"dateType", "documentation":"

The date and time, in ISO 8601 date-time format, when the report job was created.

" @@ -4321,7 +4336,7 @@ }, "IsTruncated":{ "shape":"booleanType", - "documentation":"

A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all your results.

" + "documentation":"

A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all your results.

" }, "Marker":{ "shape":"responseMarkerType", @@ -6783,9 +6798,17 @@ "shape":"arnType", "documentation":"

The ARN of the authenticated entity (user or role) that last attempted to access the service. AWS does not report unauthenticated requests.

This field is null if no IAM entities attempted to access the service within the reporting period.

" }, + "LastAuthenticatedRegion":{ + "shape":"stringType", + "documentation":"

The Region from which the authenticated entity (user or role) last attempted to access the service. AWS does not report unauthenticated requests.

This field is null if no IAM entities attempted to access the service within the reporting period.

" + }, "TotalAuthenticatedEntities":{ "shape":"integerType", "documentation":"

The total number of authenticated principals (root user, IAM users, or IAM roles) that have attempted to access the service.

This field is null if no principals attempted to access the service within the reporting period.

" + }, + "TrackedActionsLastAccessed":{ + "shape":"TrackedActionsLastAccessed", + "documentation":"

An object that contains details about the most recent attempt to access a tracked action within the service.

This field is null if there no tracked actions or if the principal did not use the tracked actions within the reporting period. This field is also null if the report was generated at the service level and not the action level. For more information, see the Granularity field in GenerateServiceLastAccessedDetails.

" } }, "documentation":"

Contains details about the most recent attempt to access the service.

This data type is used as a response element in the GetServiceLastAccessedDetails operation.

" @@ -7039,7 +7062,7 @@ }, "PermissionsBoundaryPolicyInputList":{ "shape":"SimulationPolicyListType", - "documentation":"

The IAM permissions boundary policy to simulate. The permissions boundary sets the maximum permissions that the entity can have. You can input only one permissions boundary when you pass a policy to this operation. An IAM entity can only have one permissions boundary in effect at a time. For example, if a permissions boundary is attached to an entity and you pass in a different permissions boundary policy using this parameter, then the new permission boundary policy is used for the simulation. For more information about permissions boundaries, see Permissions Boundaries for IAM Entities in the IAM User Guide. The policy input is specified as a string containing the complete, valid JSON text of a permissions boundary policy.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

" + "documentation":"

The IAM permissions boundary policy to simulate. The permissions boundary sets the maximum permissions that the entity can have. You can input only one permissions boundary when you pass a policy to this operation. An IAM entity can only have one permissions boundary in effect at a time. For example, if a permissions boundary is attached to an entity and you pass in a different permissions boundary policy using this parameter, then the new permissions boundary policy is used for the simulation. For more information about permissions boundaries, see Permissions Boundaries for IAM Entities in the IAM User Guide. The policy input is specified as a string containing the complete, valid JSON text of a permissions boundary policy.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

" }, "ActionNames":{ "shape":"ActionNameListType", @@ -7161,6 +7184,29 @@ } } }, + "TrackedActionLastAccessed":{ + "type":"structure", + "members":{ + "ActionName":{ + "shape":"stringType", + "documentation":"

The name of the tracked action to which access was attempted. Tracked actions are actions that report activity to IAM.

" + }, + "LastAccessedEntity":{"shape":"arnType"}, + "LastAccessedTime":{ + "shape":"dateType", + "documentation":"

The date and time, in ISO 8601 date-time format, when an authenticated entity most recently attempted to access the tracked service. AWS does not report unauthenticated requests.

This field is null if no IAM entities attempted to access the service within the reporting period.

" + }, + "LastAccessedRegion":{ + "shape":"stringType", + "documentation":"

The Region from which the authenticated entity (user or role) last attempted to access the tracked action. AWS does not report unauthenticated requests.

This field is null if no IAM entities attempted to access the service within the reporting period.

" + } + }, + "documentation":"

Contains details about the most recent attempt to access an action within the service.

This data type is used as a response element in the GetServiceLastAccessedDetails operation.

" + }, + "TrackedActionsLastAccessed":{ + "type":"list", + "member":{"shape":"TrackedActionLastAccessed"} + }, "UnmodifiableEntityException":{ "type":"structure", "members":{ @@ -8285,5 +8331,5 @@ "pattern":"[\\w+=,.@-]+" } }, - "documentation":"AWS Identity and Access Management

AWS Identity and Access Management (IAM) is a web service that you can use to manage users and user permissions under your AWS account. This guide provides descriptions of IAM actions that you can call programmatically. For general information about IAM, see AWS Identity and Access Management (IAM). For the user guide for IAM, see Using IAM.

AWS provides SDKs that consist of libraries and sample code for various programming languages and platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient way to create programmatic access to IAM and AWS. For example, the SDKs take care of tasks such as cryptographically signing requests (see below), managing errors, and retrying requests automatically. For information about the AWS SDKs, including how to download and install them, see the Tools for Amazon Web Services page.

We recommend that you use the AWS SDKs to make programmatic API calls to IAM. However, you can also use the IAM Query API to make direct calls to the IAM web service. To learn more about the IAM Query API, see Making Query Requests in the Using IAM guide. IAM supports GET and POST requests for all actions. That is, the API does not require you to use GET for some actions and POST for others. However, GET requests are subject to the limitation size of a URL. Therefore, for operations that require larger sizes, use a POST request.

Signing Requests

Requests must be signed using an access key ID and a secret access key. We strongly recommend that you do not use your AWS account access key ID and secret access key for everyday work with IAM. You can use the access key ID and secret access key for an IAM user or you can use the AWS Security Token Service to generate temporary security credentials and use those to sign requests.

To sign requests, we recommend that you use Signature Version 4. If you have an existing application that uses Signature Version 2, you do not have to update it to use Signature Version 4. However, some operations now require Signature Version 4. The documentation for operations that require version 4 indicate this requirement.

Additional Resources

For more information, see the following:

" + "documentation":"AWS Identity and Access Management

AWS Identity and Access Management (IAM) is a web service for securely controlling access to AWS services. With IAM, you can centrally manage users, security credentials such as access keys, and permissions that control which AWS resources users and applications can access. For more information about IAM, see AWS Identity and Access Management (IAM) and the AWS Identity and Access Management User Guide.

" } diff --git a/botocore/data/imagebuilder/2019-12-02/service-2.json b/botocore/data/imagebuilder/2019-12-02/service-2.json index aa735d7c..d22ee968 100644 --- a/botocore/data/imagebuilder/2019-12-02/service-2.json +++ b/botocore/data/imagebuilder/2019-12-02/service-2.json @@ -51,7 +51,8 @@ {"shape":"CallRateLimitExceededException"}, {"shape":"InvalidVersionNumberException"}, {"shape":"ResourceInUseException"}, - {"shape":"InvalidParameterCombinationException"} + {"shape":"InvalidParameterCombinationException"}, + {"shape":"ServiceQuotaExceededException"} ], "documentation":"

Creates a new component that can be used to build, validate, test, and assess your image.

" }, @@ -73,7 +74,8 @@ {"shape":"CallRateLimitExceededException"}, {"shape":"ResourceInUseException"}, {"shape":"ResourceAlreadyExistsException"}, - {"shape":"InvalidParameterCombinationException"} + {"shape":"InvalidParameterCombinationException"}, + {"shape":"ServiceQuotaExceededException"} ], "documentation":"

Creates a new distribution configuration. Distribution configurations define and configure the outputs of your pipeline.

" }, @@ -93,7 +95,8 @@ {"shape":"IdempotentParameterMismatchException"}, {"shape":"ForbiddenException"}, {"shape":"CallRateLimitExceededException"}, - {"shape":"ResourceInUseException"} + {"shape":"ResourceInUseException"}, + {"shape":"ServiceQuotaExceededException"} ], "documentation":"

Creates a new image. This request will create a new image along with all of the configured output resources defined in the distribution configuration.

" }, @@ -114,7 +117,8 @@ {"shape":"ForbiddenException"}, {"shape":"CallRateLimitExceededException"}, {"shape":"ResourceInUseException"}, - {"shape":"ResourceAlreadyExistsException"} + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ServiceQuotaExceededException"} ], "documentation":"

Creates a new image pipeline. Image pipelines enable you to automate the creation and distribution of images.

" }, @@ -136,7 +140,8 @@ {"shape":"CallRateLimitExceededException"}, {"shape":"InvalidVersionNumberException"}, {"shape":"ResourceInUseException"}, - {"shape":"ResourceAlreadyExistsException"} + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ServiceQuotaExceededException"} ], "documentation":"

Creates a new image recipe. Image recipes define how images are configured, tested, and assessed.

" }, @@ -157,7 +162,8 @@ {"shape":"ForbiddenException"}, {"shape":"CallRateLimitExceededException"}, {"shape":"ResourceInUseException"}, - {"shape":"ResourceAlreadyExistsException"} + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ServiceQuotaExceededException"} ], "documentation":"

Creates a new infrastructure configuration. An infrastructure configuration defines the environment in which your image will be built and tested.

" }, @@ -533,7 +539,7 @@ {"shape":"ForbiddenException"}, {"shape":"CallRateLimitExceededException"} ], - "documentation":"

Returns a list of distribution configurations.

" + "documentation":"

Returns a list of image build versions.

" }, "ListImagePipelineImages":{ "name":"ListImagePipelineImages", @@ -610,7 +616,7 @@ {"shape":"ForbiddenException"}, {"shape":"CallRateLimitExceededException"} ], - "documentation":"

Returns the list of image build versions for the specified semantic version.

" + "documentation":"

Returns the list of images that you have access to.

" }, "ListInfrastructureConfigurations":{ "name":"ListInfrastructureConfigurations", @@ -1376,6 +1382,10 @@ "shape":"TagMap", "documentation":"

The tags of the image recipe.

" }, + "workingDirectory":{ + "shape":"NonEmptyString", + "documentation":"

The working directory to be used during build and test workflows.

" + }, "clientToken":{ "shape":"ClientToken", "documentation":"

The idempotency token used to make this request idempotent.

", @@ -1504,6 +1514,10 @@ "shape":"SnsTopicArn", "documentation":"

The SNS topic on which to send image build events.

" }, + "resourceTags":{ + "shape":"ResourceTagMap", + "documentation":"

The tags attached to the resource created by Image Builder.

" + }, "tags":{ "shape":"TagMap", "documentation":"

The tags of the infrastructure configuration.

" @@ -1921,7 +1935,7 @@ "required":["componentBuildVersionArn"], "members":{ "componentBuildVersionArn":{ - "shape":"ComponentBuildVersionArn", + "shape":"ComponentVersionArnOrBuildVersionArn", "documentation":"

The Amazon Resource Name (ARN) of the component that you want to retrieve. Regex requires \"/\\d+$\" suffix.

", "location":"querystring", "locationName":"componentBuildVersionArn" @@ -2071,7 +2085,7 @@ "required":["imageBuildVersionArn"], "members":{ "imageBuildVersionArn":{ - "shape":"ImageBuildVersionArn", + "shape":"ImageVersionArnOrBuildVersionArn", "documentation":"

The Amazon Resource Name (ARN) of the image that you want to retrieve.

", "location":"querystring", "locationName":"imageBuildVersionArn" @@ -2329,6 +2343,10 @@ "tags":{ "shape":"TagMap", "documentation":"

The tags of the image recipe.

" + }, + "workingDirectory":{ + "shape":"NonEmptyString", + "documentation":"

The working directory to be used during build and test workflows.

" } }, "documentation":"

An image recipe.

" @@ -2512,6 +2530,10 @@ "type":"string", "pattern":"^arn:aws[^:]*:imagebuilder:[^:]+:(?:\\d{12}|aws):image/[a-z0-9-_]+/\\d+\\.\\d+\\.\\d+$" }, + "ImageVersionArnOrBuildVersionArn":{ + "type":"string", + "pattern":"^arn:aws[^:]*:imagebuilder:[^:]+:(?:\\d{12}|aws):image/[a-z0-9-_]+/(?:(?:(\\d+|x)\\.(\\d+|x)\\.(\\d+|x))|(?:\\d+\\.\\d+\\.\\d+/\\d+))$" + }, "ImageVersionList":{ "type":"list", "member":{"shape":"ImageVersion"} @@ -2650,6 +2672,10 @@ "shape":"DateTime", "documentation":"

The date on which the infrastructure configuration was last updated.

" }, + "resourceTags":{ + "shape":"ResourceTagMap", + "documentation":"

The tags attached to the resource created by Image Builder.

" + }, "tags":{ "shape":"TagMap", "documentation":"

The tags of the infrastructure configuration.

" @@ -2684,6 +2710,10 @@ "shape":"DateTime", "documentation":"

The date on which the infrastructure configuration was last updated.

" }, + "resourceTags":{ + "shape":"ResourceTagMap", + "documentation":"

The tags attached to the image created by Image Builder.

" + }, "tags":{ "shape":"TagMap", "documentation":"

The tags of the infrastructure configuration.

" @@ -3358,6 +3388,13 @@ "max":30000, "min":1 }, + "ResourceTagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":30, + "min":1 + }, "RestrictedInteger":{ "type":"integer", "max":25, @@ -3404,6 +3441,15 @@ "error":{"httpStatusCode":500}, "exception":true }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

You have exceeded the number of permitted resources or operations for this service. For service quotas, see EC2 Image Builder endpoints and quotas.

", + "error":{"httpStatusCode":402}, + "exception":true + }, "ServiceUnavailableException":{ "type":"structure", "members":{ @@ -3694,6 +3740,10 @@ "shape":"ClientToken", "documentation":"

The idempotency token used to make this request idempotent.

", "idempotencyToken":true + }, + "resourceTags":{ + "shape":"ResourceTagMap", + "documentation":"

The tags attached to the resource created by Image Builder.

" } } }, diff --git a/botocore/data/iot-data/2015-05-28/paginators-1.json b/botocore/data/iot-data/2015-05-28/paginators-1.json new file mode 100644 index 00000000..ea142457 --- /dev/null +++ b/botocore/data/iot-data/2015-05-28/paginators-1.json @@ -0,0 +1,3 @@ +{ + "pagination": {} +} diff --git a/botocore/data/iot-data/2015-05-28/service-2.json b/botocore/data/iot-data/2015-05-28/service-2.json index 88fba6cd..394d79bb 100644 --- a/botocore/data/iot-data/2015-05-28/service-2.json +++ b/botocore/data/iot-data/2015-05-28/service-2.json @@ -1,14 +1,14 @@ { "version":"2.0", "metadata":{ - "uid":"iot-data-2015-05-28", "apiVersion":"2015-05-28", "endpointPrefix":"data.iot", "protocol":"rest-json", "serviceFullName":"AWS IoT Data Plane", "serviceId":"IoT Data Plane", "signatureVersion":"v4", - "signingName":"iotdata" + "signingName":"iotdata", + "uid":"iot-data-2015-05-28" }, "operations":{ "DeleteThingShadow":{ @@ -29,7 +29,7 @@ {"shape":"MethodNotAllowedException"}, {"shape":"UnsupportedDocumentEncodingException"} ], - "documentation":"

Deletes the thing shadow for the specified thing.

For more information, see DeleteThingShadow in the AWS IoT Developer Guide.

" + "documentation":"

Deletes the shadow for the specified thing.

For more information, see DeleteThingShadow in the AWS IoT Developer Guide.

" }, "GetThingShadow":{ "name":"GetThingShadow", @@ -49,7 +49,26 @@ {"shape":"MethodNotAllowedException"}, {"shape":"UnsupportedDocumentEncodingException"} ], - "documentation":"

Gets the thing shadow for the specified thing.

For more information, see GetThingShadow in the AWS IoT Developer Guide.

" + "documentation":"

Gets the shadow for the specified thing.

For more information, see GetThingShadow in the AWS IoT Developer Guide.

" + }, + "ListNamedShadowsForThing":{ + "name":"ListNamedShadowsForThing", + "http":{ + "method":"GET", + "requestUri":"/api/things/shadow/ListNamedShadowsForThing/{thingName}" + }, + "input":{"shape":"ListNamedShadowsForThingRequest"}, + "output":{"shape":"ListNamedShadowsForThingResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalFailureException"}, + {"shape":"MethodNotAllowedException"} + ], + "documentation":"

Lists the shadows for the specified thing.

" }, "Publish":{ "name":"Publish", @@ -64,7 +83,7 @@ {"shape":"UnauthorizedException"}, {"shape":"MethodNotAllowedException"} ], - "documentation":"

Publishes state information.

For more information, see HTTP Protocol in the AWS IoT Developer Guide.

" + "documentation":"

Publishes state information.

For more information, see HTTP Protocol in the AWS IoT Developer Guide.

" }, "UpdateThingShadow":{ "name":"UpdateThingShadow", @@ -85,7 +104,7 @@ {"shape":"MethodNotAllowedException"}, {"shape":"UnsupportedDocumentEncodingException"} ], - "documentation":"

Updates the thing shadow for the specified thing.

For more information, see UpdateThingShadow in the AWS IoT Developer Guide.

" + "documentation":"

Updates the shadow for the specified thing.

For more information, see UpdateThingShadow in the AWS IoT Developer Guide.

" } }, "shapes":{ @@ -93,7 +112,7 @@ "type":"structure", "members":{ "message":{ - "shape":"ErrorMessage", + "shape":"errorMessage", "documentation":"

The message for the exception.

" } }, @@ -110,6 +129,12 @@ "documentation":"

The name of the thing.

", "location":"uri", "locationName":"thingName" + }, + "shadowName":{ + "shape":"ShadowName", + "documentation":"

The name of the shadow.

", + "location":"querystring", + "locationName":"name" } }, "documentation":"

The input for the DeleteThingShadow operation.

" @@ -126,7 +151,6 @@ "documentation":"

The output from the DeleteThingShadow operation.

", "payload":"payload" }, - "ErrorMessage":{"type":"string"}, "GetThingShadowRequest":{ "type":"structure", "required":["thingName"], @@ -136,6 +160,12 @@ "documentation":"

The name of the thing.

", "location":"uri", "locationName":"thingName" + }, + "shadowName":{ + "shape":"ShadowName", + "documentation":"

The name of the shadow.

", + "location":"querystring", + "locationName":"name" } }, "documentation":"

The input for the GetThingShadow operation.

" @@ -177,11 +207,52 @@ "exception":true }, "JsonDocument":{"type":"blob"}, + "ListNamedShadowsForThingRequest":{ + "type":"structure", + "required":["thingName"], + "members":{ + "thingName":{ + "shape":"ThingName", + "documentation":"

The name of the thing.

", + "location":"uri", + "locationName":"thingName" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to retrieve the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "pageSize":{ + "shape":"PageSize", + "documentation":"

The result page size.

", + "location":"querystring", + "locationName":"pageSize" + } + } + }, + "ListNamedShadowsForThingResponse":{ + "type":"structure", + "members":{ + "results":{ + "shape":"NamedShadowList", + "documentation":"

The list of shadows for the specified thing.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results, or null if there are no additional results.

" + }, + "timestamp":{ + "shape":"Timestamp", + "documentation":"

The Epoch date and time the response was generated by AWS IoT.

" + } + } + }, "MethodNotAllowedException":{ "type":"structure", "members":{ "message":{ - "shape":"ErrorMessage", + "shape":"errorMessage", "documentation":"

The message for the exception.

" } }, @@ -189,6 +260,16 @@ "error":{"httpStatusCode":405}, "exception":true }, + "NamedShadowList":{ + "type":"list", + "member":{"shape":"ShadowName"} + }, + "NextToken":{"type":"string"}, + "PageSize":{ + "type":"integer", + "max":100, + "min":1 + }, "Payload":{"type":"blob"}, "PublishRequest":{ "type":"structure", @@ -223,7 +304,7 @@ "type":"structure", "members":{ "message":{ - "shape":"ErrorMessage", + "shape":"errorMessage", "documentation":"

The message for the exception.

" } }, @@ -256,11 +337,17 @@ "exception":true, "fault":true }, + "ShadowName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[a-zA-Z0-9:_-]+" + }, "ThingName":{ "type":"string", "max":128, "min":1, - "pattern":"[a-zA-Z0-9_-]+" + "pattern":"[a-zA-Z0-9:_-]+" }, "ThrottlingException":{ "type":"structure", @@ -274,6 +361,7 @@ "error":{"httpStatusCode":429}, "exception":true }, + "Timestamp":{"type":"long"}, "Topic":{"type":"string"}, "UnauthorizedException":{ "type":"structure", @@ -312,6 +400,12 @@ "location":"uri", "locationName":"thingName" }, + "shadowName":{ + "shape":"ShadowName", + "documentation":"

The name of the shadow.

", + "location":"querystring", + "locationName":"name" + }, "payload":{ "shape":"JsonDocument", "documentation":"

The state information, in JSON format.

" @@ -333,5 +427,5 @@ }, "errorMessage":{"type":"string"} }, - "documentation":"AWS IoT

AWS IoT-Data enables secure, bi-directional communication between Internet-connected things (such as sensors, actuators, embedded devices, or smart appliances) and the AWS cloud. It implements a broker for applications and things to publish messages over HTTP (Publish) and retrieve, update, and delete thing shadows. A thing shadow is a persistent representation of your things and their state in the AWS cloud.

" + "documentation":"AWS IoT

AWS IoT-Data enables secure, bi-directional communication between Internet-connected things (such as sensors, actuators, embedded devices, or smart appliances) and the AWS cloud. It implements a broker for applications and things to publish messages over HTTP (Publish) and retrieve, update, and delete shadows. A shadow is a persistent representation of your things and their state in the AWS cloud.

Find the endpoint address for actions in the AWS IoT data plane by running this CLI command:

aws iot describe-endpoint --endpoint-type iot:Data-ATS

The service name used by AWS Signature Version 4 to sign requests is: iotdevicegateway.

" } diff --git a/botocore/data/iot/2015-05-28/service-2.json b/botocore/data/iot/2015-05-28/service-2.json index 638479f1..d76156a0 100644 --- a/botocore/data/iot/2015-05-28/service-2.json +++ b/botocore/data/iot/2015-05-28/service-2.json @@ -2901,7 +2901,7 @@ {"shape":"InternalFailureException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Remove the specified thing from the specified group.

" + "documentation":"

Remove the specified thing from the specified group.

You must specify either a thingGroupArn or a thingGroupName to identify the thing group and either a thingArn or a thingName to identify the thing to remove from the thing group.

" }, "ReplaceTopicRule":{ "name":"ReplaceTopicRule", @@ -3560,10 +3560,10 @@ "members":{ "criteriaList":{ "shape":"AbortCriteriaList", - "documentation":"

The list of abort criteria to define rules to abort the job.

" + "documentation":"

The list of criteria that determine when and how to abort the job.

" } }, - "documentation":"

Details of abort criteria to abort the job.

" + "documentation":"

The criteria that determine when and how a job abort takes place.

" }, "AbortCriteria":{ "type":"structure", @@ -3576,22 +3576,22 @@ "members":{ "failureType":{ "shape":"JobExecutionFailureType", - "documentation":"

The type of job execution failure to define a rule to initiate a job abort.

" + "documentation":"

The type of job execution failures that can initiate a job abort.

" }, "action":{ "shape":"AbortAction", - "documentation":"

The type of abort action to initiate a job abort.

" + "documentation":"

The type of job action to take to initiate the job abort.

" }, "thresholdPercentage":{ "shape":"AbortThresholdPercentage", - "documentation":"

The threshold as a percentage of the total number of executed things that will initiate a job abort.

AWS IoT supports up to two digits after the decimal (for example, 10.9 and 10.99, but not 10.999).

" + "documentation":"

The minimum percentage of job execution failures that must occur to initiate the job abort.

AWS IoT supports up to two digits after the decimal (for example, 10.9 and 10.99, but not 10.999).

" }, "minNumberOfExecutedThings":{ "shape":"MinimumNumberOfExecutedThings", - "documentation":"

Minimum number of executed things before evaluating an abort rule.

" + "documentation":"

The minimum number of things which must receive job execution notifications before the job can be aborted.

" } }, - "documentation":"

Details of abort criteria to define rules to abort the job.

" + "documentation":"

The criteria that determine when and how a job abort takes place.

" }, "AbortCriteriaList":{ "type":"list", @@ -4032,7 +4032,7 @@ }, "target":{ "shape":"PolicyTarget", - "documentation":"

The identity to which the policy is attached.

" + "documentation":"

The identity to which the policy is attached.

" } } }, @@ -4645,16 +4645,108 @@ "AwsIotJobArn":{"type":"string"}, "AwsIotJobId":{"type":"string"}, "AwsIotSqlVersion":{"type":"string"}, + "AwsJobAbortConfig":{ + "type":"structure", + "required":["abortCriteriaList"], + "members":{ + "abortCriteriaList":{ + "shape":"AwsJobAbortCriteriaList", + "documentation":"

The list of criteria that determine when and how to abort the job.

" + } + }, + "documentation":"

The criteria that determine when and how a job abort takes place.

" + }, + "AwsJobAbortCriteria":{ + "type":"structure", + "required":[ + "failureType", + "action", + "thresholdPercentage", + "minNumberOfExecutedThings" + ], + "members":{ + "failureType":{ + "shape":"AwsJobAbortCriteriaFailureType", + "documentation":"

The type of job execution failures that can initiate a job abort.

" + }, + "action":{ + "shape":"AwsJobAbortCriteriaAbortAction", + "documentation":"

The type of job action to take to initiate the job abort.

" + }, + "thresholdPercentage":{ + "shape":"AwsJobAbortCriteriaAbortThresholdPercentage", + "documentation":"

The minimum percentage of job execution failures that must occur to initiate the job abort.

AWS IoT supports up to two digits after the decimal (for example, 10.9 and 10.99, but not 10.999).

" + }, + "minNumberOfExecutedThings":{ + "shape":"AwsJobAbortCriteriaMinimumNumberOfExecutedThings", + "documentation":"

The minimum number of things which must receive job execution notifications before the job can be aborted.

" + } + }, + "documentation":"

The criteria that determine when and how a job abort takes place.

" + }, + "AwsJobAbortCriteriaAbortAction":{ + "type":"string", + "enum":["CANCEL"] + }, + "AwsJobAbortCriteriaAbortThresholdPercentage":{ + "type":"double", + "max":100 + }, + "AwsJobAbortCriteriaFailureType":{ + "type":"string", + "enum":[ + "FAILED", + "REJECTED", + "TIMED_OUT", + "ALL" + ] + }, + "AwsJobAbortCriteriaList":{ + "type":"list", + "member":{"shape":"AwsJobAbortCriteria"}, + "min":1 + }, + "AwsJobAbortCriteriaMinimumNumberOfExecutedThings":{ + "type":"integer", + "min":1 + }, "AwsJobExecutionsRolloutConfig":{ "type":"structure", "members":{ "maximumPerMinute":{ "shape":"MaximumPerMinute", "documentation":"

The maximum number of OTA update job executions started per minute.

" + }, + "exponentialRate":{ + "shape":"AwsJobExponentialRolloutRate", + "documentation":"

The rate of increase for a job rollout. This parameter allows you to define an exponential rate increase for a job rollout.

" } }, "documentation":"

Configuration for the rollout of OTA updates.

" }, + "AwsJobExponentialRolloutRate":{ + "type":"structure", + "required":[ + "baseRatePerMinute", + "incrementFactor", + "rateIncreaseCriteria" + ], + "members":{ + "baseRatePerMinute":{ + "shape":"AwsJobRolloutRatePerMinute", + "documentation":"

The minimum number of things that will be notified of a pending job, per minute, at the start of the job rollout. This is the initial rate of the rollout.

" + }, + "incrementFactor":{ + "shape":"AwsJobRolloutIncrementFactor", + "documentation":"

The rate of increase for a job rollout. The number of things notified is multiplied by this factor.

" + }, + "rateIncreaseCriteria":{ + "shape":"AwsJobRateIncreaseCriteria", + "documentation":"

The criteria to initiate the increase in rate of rollout for a job.

AWS IoT supports up to one digit after the decimal (for example, 1.5, but not 1.55).

" + } + }, + "documentation":"

The rate of increase for a job rollout. This parameter allows you to define an exponential rate increase for a job rollout.

" + }, "AwsJobPresignedUrlConfig":{ "type":"structure", "members":{ @@ -4665,6 +4757,41 @@ }, "documentation":"

Configuration information for pre-signed URLs. Valid when protocols contains HTTP.

" }, + "AwsJobRateIncreaseCriteria":{ + "type":"structure", + "members":{ + "numberOfNotifiedThings":{ + "shape":"AwsJobRateIncreaseCriteriaNumberOfThings", + "documentation":"

When this number of things have been notified, it will initiate an increase in the rollout rate.

" + }, + "numberOfSucceededThings":{ + "shape":"AwsJobRateIncreaseCriteriaNumberOfThings", + "documentation":"

When this number of things have succeeded in their job execution, it will initiate an increase in the rollout rate.

" + } + }, + "documentation":"

The criteria to initiate the increase in rate of rollout for a job.

" + }, + "AwsJobRateIncreaseCriteriaNumberOfThings":{ + "type":"integer", + "min":1 + }, + "AwsJobRolloutIncrementFactor":{"type":"double"}, + "AwsJobRolloutRatePerMinute":{ + "type":"integer", + "max":1000, + "min":1 + }, + "AwsJobTimeoutConfig":{ + "type":"structure", + "members":{ + "inProgressTimeoutInMinutes":{ + "shape":"AwsJobTimeoutInProgressTimeoutInMinutes", + "documentation":"

Specifies the amount of time, in minutes, this device has to finish execution of this job. The timeout interval can be anywhere between 1 minute and 7 days (1 to 10080 minutes). The in progress timer can't be updated and will apply to all job executions for the job. Whenever a job execution remains in the IN_PROGRESS status for longer than this interval, the job execution will fail and switch to the terminal TIMED_OUT status.

" + } + }, + "documentation":"

Specifies the amount of time each device has to finish its execution of the job. A timer is started when the job execution status is set to IN_PROGRESS. If the job execution status is not set to another terminal state before the timer expires, it will be automatically set to TIMED_OUT.

" + }, + "AwsJobTimeoutInProgressTimeoutInMinutes":{"type":"long"}, "Behavior":{ "type":"structure", "required":["name"], @@ -5865,7 +5992,7 @@ }, "targets":{ "shape":"Targets", - "documentation":"

The targeted devices to receive OTA updates.

" + "documentation":"

The devices targeted to receive OTA updates.

" }, "protocols":{ "shape":"Protocols", @@ -5883,13 +6010,21 @@ "shape":"AwsJobPresignedUrlConfig", "documentation":"

Configuration information for pre-signed URLs.

" }, + "awsJobAbortConfig":{ + "shape":"AwsJobAbortConfig", + "documentation":"

The criteria that determine when and how a job abort takes place.

" + }, + "awsJobTimeoutConfig":{ + "shape":"AwsJobTimeoutConfig", + "documentation":"

Specifies the amount of time each device has to finish its execution of the job. A timer is started when the job execution status is set to IN_PROGRESS. If the job execution status is not set to another terminal state before the timer expires, it will be automatically set to TIMED_OUT.

" + }, "files":{ "shape":"OTAUpdateFiles", "documentation":"

The files to be streamed by the OTA update.

" }, "roleArn":{ "shape":"RoleArn", - "documentation":"

The IAM role that allows access to the AWS IoT Jobs service.

" + "documentation":"

The IAM role that grants AWS IoT access to the Amazon S3, AWS IoT jobs and AWS Code Signing resources to create an OTA update job.

" }, "additionalParameters":{ "shape":"AdditionalParameterMap", @@ -6803,7 +6938,7 @@ "members":{ "otaUpdateId":{ "shape":"OTAUpdateId", - "documentation":"

The OTA update ID to delete.

", + "documentation":"

The ID of the OTA update to delete.

", "location":"uri", "locationName":"otaUpdateId" }, @@ -6815,7 +6950,7 @@ }, "forceDeleteAWSJob":{ "shape":"ForceDeleteAWSJob", - "documentation":"

Specifies if the AWS Job associated with the OTA update should be deleted with the OTA update is deleted.

", + "documentation":"

Specifies if the AWS Job associated with the OTA update should be deleted when the OTA update is deleted.

", "location":"querystring", "locationName":"forceDeleteAWSJob" } diff --git a/botocore/data/kms/2014-11-01/service-2.json b/botocore/data/kms/2014-11-01/service-2.json index 54d175e6..04a21ee0 100644 --- a/botocore/data/kms/2014-11-01/service-2.json +++ b/botocore/data/kms/2014-11-01/service-2.json @@ -63,7 +63,7 @@ {"shape":"LimitExceededException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Creates a display name for a customer managed customer master key (CMK). You can use an alias to identify a CMK in cryptographic operations, such as Encrypt and GenerateDataKey. You can change the CMK associated with the alias at any time.

Aliases are easier to remember than key IDs. They can also help to simplify your applications. For example, if you use an alias in your code, you can change the CMK your code uses by associating a given alias with a different CMK.

To run the same code in multiple AWS regions, use an alias in your code, such as alias/ApplicationKey. Then, in each AWS Region, create an alias/ApplicationKey alias that is associated with a CMK in that Region. When you run your code, it uses the alias/ApplicationKey CMK for that AWS Region without any Region-specific code.

This operation does not return a response. To get the alias that you created, use the ListAliases operation.

To use aliases successfully, be aware of the following information.

Because an alias is not a property of a CMK, you can delete and change the aliases of a CMK without affecting the CMK. Also, aliases do not appear in the response from the DescribeKey operation. To get the aliases and alias ARNs of CMKs in each AWS account and Region, use the ListAliases operation.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" + "documentation":"

Creates a display name for a customer managed customer master key (CMK). You can use an alias to identify a CMK in cryptographic operations, such as Encrypt and GenerateDataKey. You can change the CMK associated with the alias at any time.

Aliases are easier to remember than key IDs. They can also help to simplify your applications. For example, if you use an alias in your code, you can change the CMK your code uses by associating a given alias with a different CMK.

To run the same code in multiple AWS regions, use an alias in your code, such as alias/ApplicationKey. Then, in each AWS Region, create an alias/ApplicationKey alias that is associated with a CMK in that Region. When you run your code, it uses the alias/ApplicationKey CMK for that AWS Region without any Region-specific code.

This operation does not return a response. To get the alias that you created, use the ListAliases operation.

To use aliases successfully, be aware of the following information.

Because an alias is not a property of a CMK, you can delete and change the aliases of a CMK without affecting the CMK. Also, aliases do not appear in the response from the DescribeKey operation. To get the aliases and alias ARNs of CMKs in each AWS account and Region, use the ListAliases operation.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" }, "CreateCustomKeyStore":{ "name":"CreateCustomKeyStore", @@ -102,7 +102,7 @@ {"shape":"LimitExceededException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Adds a grant to a customer master key (CMK). The grant allows the grantee principal to use the CMK when the conditions specified in the grant are met. When setting permissions, grants are an alternative to key policies.

To create a grant that allows a cryptographic operation only when the request includes a particular encryption context, use the Constraints parameter. For details, see GrantConstraints.

You can create grants on symmetric and asymmetric CMKs. However, if the grant allows an operation that the CMK does not support, CreateGrant fails with a ValidationException.

For information about symmetric and asymmetric CMKs, see Using Symmetric and Asymmetric CMKs in the AWS Key Management Service Developer Guide.

To perform this operation on a CMK in a different AWS account, specify the key ARN in the value of the KeyId parameter. For more information about grants, see Grants in the AWS Key Management Service Developer Guide .

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" + "documentation":"

Adds a grant to a customer master key (CMK). The grant allows the grantee principal to use the CMK when the conditions specified in the grant are met. When setting permissions, grants are an alternative to key policies.

To create a grant that allows a cryptographic operation only when the request includes a particular encryption context, use the Constraints parameter. For details, see GrantConstraints.

You can create grants on symmetric and asymmetric CMKs. However, if the grant allows an operation that the CMK does not support, CreateGrant fails with a ValidationException.

For information about symmetric and asymmetric CMKs, see Using Symmetric and Asymmetric CMKs in the AWS Key Management Service Developer Guide.

To perform this operation on a CMK in a different AWS account, specify the key ARN in the value of the KeyId parameter. For more information about grants, see Grants in the AWS Key Management Service Developer Guide .

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" }, "CreateKey":{ "name":"CreateKey", @@ -177,7 +177,7 @@ {"shape":"CustomKeyStoreNotFoundException"}, {"shape":"KMSInternalException"} ], - "documentation":"

Deletes a custom key store. This operation does not delete the AWS CloudHSM cluster that is associated with the custom key store, or affect any users or keys in the cluster.

The custom key store that you delete cannot contain any AWS KMS customer master keys (CMKs). Before deleting the key store, verify that you will never need to use any of the CMKs in the key store for any cryptographic operations. Then, use ScheduleKeyDeletion to delete the AWS KMS customer master keys (CMKs) from the key store. When the scheduled waiting period expires, the ScheduleKeyDeletion operation deletes the CMKs. Then it makes a best effort to delete the key material from the associated cluster. However, you might need to manually delete the orphaned key material from the cluster and its backups.

After all CMKs are deleted from AWS KMS, use DisconnectCustomKeyStore to disconnect the key store from AWS KMS. Then, you can delete the custom key store.

Instead of deleting the custom key store, consider using DisconnectCustomKeyStore to disconnect it from AWS KMS. While the key store is disconnected, you cannot create or use the CMKs in the key store. But, you do not need to delete CMKs and you can reconnect a disconnected custom key store at any time.

If the operation succeeds, it returns a JSON object with no properties.

This operation is part of the Custom Key Store feature feature in AWS KMS, which combines the convenience and extensive integration of AWS KMS with the isolation and control of a single-tenant key store.

" + "documentation":"

Deletes a custom key store. This operation does not delete the AWS CloudHSM cluster that is associated with the custom key store, or affect any users or keys in the cluster.

The custom key store that you delete cannot contain any AWS KMS customer master keys (CMKs). Before deleting the key store, verify that you will never need to use any of the CMKs in the key store for any cryptographic operations. Then, use ScheduleKeyDeletion to delete the AWS KMS customer master keys (CMKs) from the key store. When the scheduled waiting period expires, the ScheduleKeyDeletion operation deletes the CMKs. Then it makes a best effort to delete the key material from the associated cluster. However, you might need to manually delete the orphaned key material from the cluster and its backups.

After all CMKs are deleted from AWS KMS, use DisconnectCustomKeyStore to disconnect the key store from AWS KMS. Then, you can delete the custom key store.

Instead of deleting the custom key store, consider using DisconnectCustomKeyStore to disconnect it from AWS KMS. While the key store is disconnected, you cannot create or use the CMKs in the key store. But, you do not need to delete CMKs and you can reconnect a disconnected custom key store at any time.

If the operation succeeds, it returns a JSON object with no properties.

This operation is part of the Custom Key Store feature feature in AWS KMS, which combines the convenience and extensive integration of AWS KMS with the isolation and control of a single-tenant key store.

" }, "DeleteImportedKeyMaterial":{ "name":"DeleteImportedKeyMaterial", @@ -240,7 +240,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Sets the state of a customer master key (CMK) to disabled, thereby preventing its use for cryptographic operations. You cannot perform this operation on a CMK in a different AWS account.

For more information about how key state affects the use of a CMK, see How Key State Affects the Use of a Customer Master Key in the AWS Key Management Service Developer Guide .

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" + "documentation":"

Sets the state of a customer master key (CMK) to disabled, thereby preventing its use for cryptographic operations. You cannot perform this operation on a CMK in a different AWS account.

For more information about how key state affects the use of a CMK, see How Key State Affects the Use of a Customer Master Key in the AWS Key Management Service Developer Guide .

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" }, "DisableKeyRotation":{ "name":"DisableKeyRotation", @@ -273,7 +273,7 @@ {"shape":"CustomKeyStoreNotFoundException"}, {"shape":"KMSInternalException"} ], - "documentation":"

Disconnects the custom key store from its associated AWS CloudHSM cluster. While a custom key store is disconnected, you can manage the custom key store and its customer master keys (CMKs), but you cannot create or use CMKs in the custom key store. You can reconnect the custom key store at any time.

While a custom key store is disconnected, all attempts to create customer master keys (CMKs) in the custom key store or to use existing CMKs in cryptographic operations will fail. This action can prevent users from storing and accessing sensitive data.

To find the connection state of a custom key store, use the DescribeCustomKeyStores operation. To reconnect a custom key store, use the ConnectCustomKeyStore operation.

If the operation succeeds, it returns a JSON object with no properties.

This operation is part of the Custom Key Store feature feature in AWS KMS, which combines the convenience and extensive integration of AWS KMS with the isolation and control of a single-tenant key store.

" + "documentation":"

Disconnects the custom key store from its associated AWS CloudHSM cluster. While a custom key store is disconnected, you can manage the custom key store and its customer master keys (CMKs), but you cannot create or use CMKs in the custom key store. You can reconnect the custom key store at any time.

While a custom key store is disconnected, all attempts to create customer master keys (CMKs) in the custom key store or to use existing CMKs in cryptographic operations will fail. This action can prevent users from storing and accessing sensitive data.

To find the connection state of a custom key store, use the DescribeCustomKeyStores operation. To reconnect a custom key store, use the ConnectCustomKeyStore operation.

If the operation succeeds, it returns a JSON object with no properties.

This operation is part of the Custom Key Store feature feature in AWS KMS, which combines the convenience and extensive integration of AWS KMS with the isolation and control of a single-tenant key store.

" }, "EnableKey":{ "name":"EnableKey", @@ -290,7 +290,7 @@ {"shape":"LimitExceededException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Sets the key state of a customer master key (CMK) to enabled. This allows you to use the CMK for cryptographic operations. You cannot perform this operation on a CMK in a different AWS account.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" + "documentation":"

Sets the key state of a customer master key (CMK) to enabled. This allows you to use the CMK for cryptographic operations. You cannot perform this operation on a CMK in a different AWS account.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" }, "EnableKeyRotation":{ "name":"EnableKeyRotation", @@ -328,7 +328,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Encrypts plaintext into ciphertext by using a customer master key (CMK). The Encrypt operation has two primary use cases:

You don't need to use the Encrypt operation to encrypt a data key. The GenerateDataKey and GenerateDataKeyPair operations return a plaintext data key and an encrypted copy of that data key.

When you encrypt data, you must specify a symmetric or asymmetric CMK to use in the encryption operation. The CMK must have a KeyUsage value of ENCRYPT_DECRYPT. To find the KeyUsage of a CMK, use the DescribeKey operation.

If you use a symmetric CMK, you can use an encryption context to add additional security to your encryption operation. If you specify an EncryptionContext when encrypting data, you must specify the same encryption context (a case-sensitive exact match) when decrypting the data. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

If you specify an asymmetric CMK, you must also specify the encryption algorithm. The algorithm must be compatible with the CMK type.

When you use an asymmetric CMK to encrypt or reencrypt data, be sure to record the CMK and encryption algorithm that you choose. You will be required to provide the same CMK and encryption algorithm when you decrypt the data. If the CMK and algorithm do not match the values used to encrypt the data, the decrypt operation fails.

You are not required to supply the CMK ID and encryption algorithm when you decrypt with symmetric CMKs because AWS KMS stores this information in the ciphertext blob. AWS KMS cannot store metadata in ciphertext generated with asymmetric keys. The standard format for asymmetric key ciphertext does not include configurable fields.

The maximum size of the data that you can encrypt varies with the type of CMK and the encryption algorithm that you choose.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

To perform this operation on a CMK in a different AWS account, specify the key ARN or alias ARN in the value of the KeyId parameter.

" + "documentation":"

Encrypts plaintext into ciphertext by using a customer master key (CMK). The Encrypt operation has two primary use cases:

You don't need to use the Encrypt operation to encrypt a data key. The GenerateDataKey and GenerateDataKeyPair operations return a plaintext data key and an encrypted copy of that data key.

When you encrypt data, you must specify a symmetric or asymmetric CMK to use in the encryption operation. The CMK must have a KeyUsage value of ENCRYPT_DECRYPT. To find the KeyUsage of a CMK, use the DescribeKey operation.

If you use a symmetric CMK, you can use an encryption context to add additional security to your encryption operation. If you specify an EncryptionContext when encrypting data, you must specify the same encryption context (a case-sensitive exact match) when decrypting the data. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

If you specify an asymmetric CMK, you must also specify the encryption algorithm. The algorithm must be compatible with the CMK type.

When you use an asymmetric CMK to encrypt or reencrypt data, be sure to record the CMK and encryption algorithm that you choose. You will be required to provide the same CMK and encryption algorithm when you decrypt the data. If the CMK and algorithm do not match the values used to encrypt the data, the decrypt operation fails.

You are not required to supply the CMK ID and encryption algorithm when you decrypt with symmetric CMKs because AWS KMS stores this information in the ciphertext blob. AWS KMS cannot store metadata in ciphertext generated with asymmetric keys. The standard format for asymmetric key ciphertext does not include configurable fields.

The maximum size of the data that you can encrypt varies with the type of CMK and the encryption algorithm that you choose.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

To perform this operation on a CMK in a different AWS account, specify the key ARN or alias ARN in the value of the KeyId parameter.

" }, "GenerateDataKey":{ "name":"GenerateDataKey", @@ -348,7 +348,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Generates a unique symmetric data key. This operation returns a plaintext copy of the data key and a copy that is encrypted under a customer master key (CMK) that you specify. You can use the plaintext key to encrypt your data outside of AWS KMS and store the encrypted data key with the encrypted data.

GenerateDataKey returns a unique data key for each request. The bytes in the key are not related to the caller or CMK that is used to encrypt the data key.

To generate a data key, specify the symmetric CMK that will be used to encrypt the data key. You cannot use an asymmetric CMK to generate data keys. To get the type of your CMK, use the DescribeKey operation.

You must also specify the length of the data key. Use either the KeySpec or NumberOfBytes parameters (but not both). For 128-bit and 256-bit data keys, use the KeySpec parameter.

If the operation succeeds, the plaintext copy of the data key is in the Plaintext field of the response, and the encrypted copy of the data key in the CiphertextBlob field.

To get only an encrypted copy of the data key, use GenerateDataKeyWithoutPlaintext. To generate an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operation. To get a cryptographically secure random byte string, use GenerateRandom.

You can use the optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

We recommend that you use the following pattern to encrypt data locally in your application:

  1. Use the GenerateDataKey operation to get a data encryption key.

  2. Use the plaintext data key (returned in the Plaintext field of the response) to encrypt data locally, then erase the plaintext data key from memory.

  3. Store the encrypted data key (returned in the CiphertextBlob field of the response) alongside the locally encrypted data.

To decrypt data locally:

  1. Use the Decrypt operation to decrypt the encrypted data key. The operation returns a plaintext copy of the data key.

  2. Use the plaintext data key to decrypt data locally, then erase the plaintext data key from memory.

" + "documentation":"

Generates a unique symmetric data key for client-side encryption. This operation returns a plaintext copy of the data key and a copy that is encrypted under a customer master key (CMK) that you specify. You can use the plaintext key to encrypt your data outside of AWS KMS and store the encrypted data key with the encrypted data.

GenerateDataKey returns a unique data key for each request. The bytes in the plaintext key are not related to the caller or the CMK.

To generate a data key, specify the symmetric CMK that will be used to encrypt the data key. You cannot use an asymmetric CMK to generate data keys. To get the type of your CMK, use the DescribeKey operation. You must also specify the length of the data key. Use either the KeySpec or NumberOfBytes parameters (but not both). For 128-bit and 256-bit data keys, use the KeySpec parameter.

To get only an encrypted copy of the data key, use GenerateDataKeyWithoutPlaintext. To generate an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operation. To get a cryptographically secure random byte string, use GenerateRandom.

You can use the optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

How to use your data key

We recommend that you use the following pattern to encrypt data locally in your application. You can write your own code or use a client-side encryption library, such as the AWS Encryption SDK, the Amazon DynamoDB Encryption Client, or Amazon S3 client-side encryption to do these tasks for you.

To encrypt data outside of AWS KMS:

  1. Use the GenerateDataKey operation to get a data key.

  2. Use the plaintext data key (in the Plaintext field of the response) to encrypt your data outside of AWS KMS. Then erase the plaintext data key from memory.

  3. Store the encrypted data key (in the CiphertextBlob field of the response) with the encrypted data.

To decrypt data outside of AWS KMS:

  1. Use the Decrypt operation to decrypt the encrypted data key. The operation returns a plaintext copy of the data key.

  2. Use the plaintext data key to decrypt data outside of AWS KMS, then erase the plaintext data key from memory.

" }, "GenerateDataKeyPair":{ "name":"GenerateDataKeyPair", @@ -366,9 +366,10 @@ {"shape":"InvalidKeyUsageException"}, {"shape":"InvalidGrantTokenException"}, {"shape":"KMSInternalException"}, - {"shape":"KMSInvalidStateException"} + {"shape":"KMSInvalidStateException"}, + {"shape":"UnsupportedOperationException"} ], - "documentation":"

Generates a unique asymmetric data key pair. The GenerateDataKeyPair operation returns a plaintext public key, a plaintext private key, and a copy of the private key that is encrypted under the symmetric CMK you specify. You can use the data key pair to perform asymmetric cryptography outside of AWS KMS.

GenerateDataKeyPair returns a unique data key pair for each request. The bytes in the keys are not related to the caller or the CMK that is used to encrypt the private key.

You can use the public key that GenerateDataKeyPair returns to encrypt data or verify a signature outside of AWS KMS. Then, store the encrypted private key with the data. When you are ready to decrypt data or sign a message, you can use the Decrypt operation to decrypt the encrypted private key.

To generate a data key pair, you must specify a symmetric customer master key (CMK) to encrypt the private key in a data key pair. You cannot use an asymmetric CMK. To get the type of your CMK, use the DescribeKey operation.

If you are using the data key pair to encrypt data, or for any operation where you don't immediately need a private key, consider using the GenerateDataKeyPairWithoutPlaintext operation. GenerateDataKeyPairWithoutPlaintext returns a plaintext public key and an encrypted private key, but omits the plaintext private key that you need only to decrypt ciphertext or sign a message. Later, when you need to decrypt the data or sign a message, use the Decrypt operation to decrypt the encrypted private key in the data key pair.

You can use the optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" + "documentation":"

Generates a unique asymmetric data key pair. The GenerateDataKeyPair operation returns a plaintext public key, a plaintext private key, and a copy of the private key that is encrypted under the symmetric CMK you specify. You can use the data key pair to perform asymmetric cryptography outside of AWS KMS.

GenerateDataKeyPair returns a unique data key pair for each request. The bytes in the keys are not related to the caller or the CMK that is used to encrypt the private key.

You can use the public key that GenerateDataKeyPair returns to encrypt data or verify a signature outside of AWS KMS. Then, store the encrypted private key with the data. When you are ready to decrypt data or sign a message, you can use the Decrypt operation to decrypt the encrypted private key.

To generate a data key pair, you must specify a symmetric customer master key (CMK) to encrypt the private key in a data key pair. You cannot use an asymmetric CMK or a CMK in a custom key store. To get the type and origin of your CMK, use the DescribeKey operation.

If you are using the data key pair to encrypt data, or for any operation where you don't immediately need a private key, consider using the GenerateDataKeyPairWithoutPlaintext operation. GenerateDataKeyPairWithoutPlaintext returns a plaintext public key and an encrypted private key, but omits the plaintext private key that you need only to decrypt ciphertext or sign a message. Later, when you need to decrypt the data or sign a message, use the Decrypt operation to decrypt the encrypted private key in the data key pair.

You can use the optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" }, "GenerateDataKeyPairWithoutPlaintext":{ "name":"GenerateDataKeyPairWithoutPlaintext", @@ -386,9 +387,10 @@ {"shape":"InvalidKeyUsageException"}, {"shape":"InvalidGrantTokenException"}, {"shape":"KMSInternalException"}, - {"shape":"KMSInvalidStateException"} + {"shape":"KMSInvalidStateException"}, + {"shape":"UnsupportedOperationException"} ], - "documentation":"

Generates a unique asymmetric data key pair. The GenerateDataKeyPairWithoutPlaintext operation returns a plaintext public key and a copy of the private key that is encrypted under the symmetric CMK you specify. Unlike GenerateDataKeyPair, this operation does not return a plaintext private key.

To generate a data key pair, you must specify a symmetric customer master key (CMK) to encrypt the private key in the data key pair. You cannot use an asymmetric CMK. To get the type of your CMK, use the KeySpec field in the DescribeKey response.

You can use the public key that GenerateDataKeyPairWithoutPlaintext returns to encrypt data or verify a signature outside of AWS KMS. Then, store the encrypted private key with the data. When you are ready to decrypt data or sign a message, you can use the Decrypt operation to decrypt the encrypted private key.

GenerateDataKeyPairWithoutPlaintext returns a unique data key pair for each request. The bytes in the key are not related to the caller or CMK that is used to encrypt the private key.

You can use the optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" + "documentation":"

Generates a unique asymmetric data key pair. The GenerateDataKeyPairWithoutPlaintext operation returns a plaintext public key and a copy of the private key that is encrypted under the symmetric CMK you specify. Unlike GenerateDataKeyPair, this operation does not return a plaintext private key.

To generate a data key pair, you must specify a symmetric customer master key (CMK) to encrypt the private key in the data key pair. You cannot use an asymmetric CMK or a CMK in a custom key store. To get the type and origin of your CMK, use the KeySpec field in the DescribeKey response.

You can use the public key that GenerateDataKeyPairWithoutPlaintext returns to encrypt data or verify a signature outside of AWS KMS. Then, store the encrypted private key with the data. When you are ready to decrypt data or sign a message, you can use the Decrypt operation to decrypt the encrypted private key.

GenerateDataKeyPairWithoutPlaintext returns a unique data key pair for each request. The bytes in the key are not related to the caller or CMK that is used to encrypt the private key.

You can use the optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" }, "GenerateDataKeyWithoutPlaintext":{ "name":"GenerateDataKeyWithoutPlaintext", @@ -408,7 +410,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Generates a unique symmetric data key. This operation returns a data key that is encrypted under a customer master key (CMK) that you specify. To request an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operations.

GenerateDataKeyWithoutPlaintext is identical to the GenerateDataKey operation except that returns only the encrypted copy of the data key. This operation is useful for systems that need to encrypt data at some point, but not immediately. When you need to encrypt the data, you call the Decrypt operation on the encrypted copy of the key.

It's also useful in distributed systems with different levels of trust. For example, you might store encrypted data in containers. One component of your system creates new containers and stores an encrypted data key with each container. Then, a different component puts the data into the containers. That component first decrypts the data key, uses the plaintext data key to encrypt data, puts the encrypted data into the container, and then destroys the plaintext data key. In this system, the component that creates the containers never sees the plaintext data key.

GenerateDataKeyWithoutPlaintext returns a unique data key for each request. The bytes in the keys are not related to the caller or CMK that is used to encrypt the private key.

To generate a data key, you must specify the symmetric customer master key (CMK) that is used to encrypt the data key. You cannot use an asymmetric CMK to generate a data key. To get the type of your CMK, use the DescribeKey operation.

If the operation succeeds, you will find the encrypted copy of the data key in the CiphertextBlob field.

You can use the optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" + "documentation":"

Generates a unique symmetric data key. This operation returns a data key that is encrypted under a customer master key (CMK) that you specify. To request an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operations.

GenerateDataKeyWithoutPlaintext is identical to the GenerateDataKey operation except that returns only the encrypted copy of the data key. This operation is useful for systems that need to encrypt data at some point, but not immediately. When you need to encrypt the data, you call the Decrypt operation on the encrypted copy of the key.

It's also useful in distributed systems with different levels of trust. For example, you might store encrypted data in containers. One component of your system creates new containers and stores an encrypted data key with each container. Then, a different component puts the data into the containers. That component first decrypts the data key, uses the plaintext data key to encrypt data, puts the encrypted data into the container, and then destroys the plaintext data key. In this system, the component that creates the containers never sees the plaintext data key.

GenerateDataKeyWithoutPlaintext returns a unique data key for each request. The bytes in the keys are not related to the caller or CMK that is used to encrypt the private key.

To generate a data key, you must specify the symmetric customer master key (CMK) that is used to encrypt the data key. You cannot use an asymmetric CMK to generate a data key. To get the type of your CMK, use the DescribeKey operation.

If the operation succeeds, you will find the encrypted copy of the data key in the CiphertextBlob field.

You can use the optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" }, "GenerateRandom":{ "name":"GenerateRandom", @@ -556,7 +558,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Gets a list of all grants for the specified customer master key (CMK).

To perform this operation on a CMK in a different AWS account, specify the key ARN in the value of the KeyId parameter.

" + "documentation":"

Gets a list of all grants for the specified customer master key (CMK).

To perform this operation on a CMK in a different AWS account, specify the key ARN in the value of the KeyId parameter.

The GranteePrincipal field in the ListGrants response usually contains the user or role designated as the grantee principal in the grant. However, when the grantee principal in the grant is an AWS service, the GranteePrincipal field contains the service principal, which might represent several different grantee principals.

" }, "ListKeyPolicies":{ "name":"ListKeyPolicies", @@ -662,7 +664,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Decrypts ciphertext and then reencrypts it entirely within AWS KMS. You can use this operation to change the customer master key (CMK) under which data is encrypted, such as when you manually rotate a CMK or change the CMK that protects a ciphertext. You can also use it to reencrypt ciphertext under the same CMK, such as to change the encryption context of a ciphertext.

The ReEncrypt operation can decrypt ciphertext that was encrypted by using an AWS KMS CMK in an AWS KMS operation, such as Encrypt or GenerateDataKey. It can also decrypt ciphertext that was encrypted by using the public key of an asymmetric CMK outside of AWS KMS. However, it cannot decrypt ciphertext produced by other libraries, such as the AWS Encryption SDK or Amazon S3 client-side encryption. These libraries return a ciphertext format that is incompatible with AWS KMS.

When you use the ReEncrypt operation, you need to provide information for the decrypt operation and the subsequent encrypt operation.

Unlike other AWS KMS API operations, ReEncrypt callers must have two permissions:

To permit reencryption from

or to a CMK, include the \"kms:ReEncrypt*\" permission in your key policy. This permission is automatically included in the key policy when you use the console to create a CMK. But you must include it manually when you create a CMK programmatically or when you use the PutKeyPolicy operation set a key policy.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" + "documentation":"

Decrypts ciphertext and then reencrypts it entirely within AWS KMS. You can use this operation to change the customer master key (CMK) under which data is encrypted, such as when you manually rotate a CMK or change the CMK that protects a ciphertext. You can also use it to reencrypt ciphertext under the same CMK, such as to change the encryption context of a ciphertext.

The ReEncrypt operation can decrypt ciphertext that was encrypted by using an AWS KMS CMK in an AWS KMS operation, such as Encrypt or GenerateDataKey. It can also decrypt ciphertext that was encrypted by using the public key of an asymmetric CMK outside of AWS KMS. However, it cannot decrypt ciphertext produced by other libraries, such as the AWS Encryption SDK or Amazon S3 client-side encryption. These libraries return a ciphertext format that is incompatible with AWS KMS.

When you use the ReEncrypt operation, you need to provide information for the decrypt operation and the subsequent encrypt operation.

Unlike other AWS KMS API operations, ReEncrypt callers must have two permissions:

To permit reencryption from or to a CMK, include the \"kms:ReEncrypt*\" permission in your key policy. This permission is automatically included in the key policy when you use the console to create a CMK. But you must include it manually when you create a CMK programmatically or when you use the PutKeyPolicy operation to set a key policy.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" }, "RetireGrant":{ "name":"RetireGrant", @@ -780,6 +782,7 @@ {"shape":"DependencyTimeoutException"}, {"shape":"NotFoundException"}, {"shape":"KMSInternalException"}, + {"shape":"LimitExceededException"}, {"shape":"KMSInvalidStateException"} ], "documentation":"

Associates an existing AWS KMS alias with a different customer master key (CMK). Each alias is associated with only one CMK at a time, although a CMK can have multiple aliases. The alias and the CMK must be in the same AWS account and region. You cannot perform this operation on an alias in a different AWS account.

The current and new CMK must be the same type (both symmetric or both asymmetric), and they must have the same key usage (ENCRYPT_DECRYPT or SIGN_VERIFY). This restriction prevents errors in code that uses aliases. If you must assign an alias to a different type of CMK, use DeleteAlias to delete the old alias and CreateAlias to create a new alias.

You cannot use UpdateAlias to change an alias name. To change an alias name, use DeleteAlias to delete the old alias and CreateAlias to create a new alias.

Because an alias is not a property of a CMK, you can create, update, and delete the aliases of a CMK without affecting the CMK. Also, aliases do not appear in the response from the DescribeKey operation. To get the aliases of all CMKs in the account, use the ListAliases operation.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" @@ -909,7 +912,7 @@ "members":{ "KeyId":{ "shape":"KeyIdType", - "documentation":"

The unique identifier of the master key for which deletion is canceled.

" + "documentation":"

The Amazon Resource Name (key ARN) of the CMK whose deletion is canceled.

" } } }, @@ -1081,7 +1084,7 @@ }, "Constraints":{ "shape":"GrantConstraints", - "documentation":"

Allows a cryptographic operation only when the encryption context matches or includes the encryption context specified in this structure. For more information about encryption context, see Encryption Context in the AWS Key Management Service Developer Guide .

" + "documentation":"

Allows a cryptographic operation only when the encryption context matches or includes the encryption context specified in this structure. For more information about encryption context, see Encryption Context in the AWS Key Management Service Developer Guide .

" }, "GrantTokens":{ "shape":"GrantTokenList", @@ -1119,7 +1122,7 @@ }, "KeyUsage":{ "shape":"KeyUsageType", - "documentation":"

Determines the cryptographic operations for which you can use the CMK. The default value is ENCRYPT_DECRYPT. This parameter is required only for asymmetric CMKs. You can't change the KeyUsage value after the CMK is created.

Select only one valid value.

" + "documentation":"

Determines the cryptographic operations for which you can use the CMK. The default value is ENCRYPT_DECRYPT. This parameter is required only for asymmetric CMKs. You can't change the KeyUsage value after the CMK is created.

Select only one valid value.

" }, "CustomerMasterKeySpec":{ "shape":"CustomerMasterKeySpec", @@ -1223,7 +1226,7 @@ }, "ConnectionErrorCode":{ "shape":"ConnectionErrorCodeType", - "documentation":"

Describes the connection error. This field appears in the response only when the ConnectionState is FAILED. For help resolving these errors, see How to Fix a Connection Failure in AWS Key Management Service Developer Guide.

Valid values are:

" + "documentation":"

Describes the connection error. This field appears in the response only when the ConnectionState is FAILED. For help resolving these errors, see How to Fix a Connection Failure in AWS Key Management Service Developer Guide.

Valid values are:

" }, "CreationDate":{ "shape":"DateType", @@ -1275,7 +1278,7 @@ }, "EncryptionContext":{ "shape":"EncryptionContextType", - "documentation":"

Specifies the encryption context to use when decrypting the data. An encryption context is valid only for cryptographic operations with a symmetric CMK. The standard asymmetric encryption algorithms that AWS KMS uses do not support an encryption context.

An encryption context is a collection of non-secret key-value pairs that represents additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is optional when encrypting with a symmetric CMK, but it is highly recommended.

For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

" + "documentation":"

Specifies the encryption context to use when decrypting the data. An encryption context is valid only for cryptographic operations with a symmetric CMK. The standard asymmetric encryption algorithms that AWS KMS uses do not support an encryption context.

An encryption context is a collection of non-secret key-value pairs that represents additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is optional when encrypting with a symmetric CMK, but it is highly recommended.

For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

" }, "GrantTokens":{ "shape":"GrantTokenList", @@ -1296,7 +1299,7 @@ "members":{ "KeyId":{ "shape":"KeyIdType", - "documentation":"

The ARN of the customer master key that was used to perform the decryption.

" + "documentation":"

The Amazon Resource Name (key ARN) of the CMK that was used to decrypt the ciphertext.

" }, "Plaintext":{ "shape":"PlaintextType", @@ -1498,7 +1501,7 @@ }, "EncryptionContext":{ "shape":"EncryptionContextType", - "documentation":"

Specifies the encryption context that will be used to encrypt the data. An encryption context is valid only for cryptographic operations with a symmetric CMK. The standard asymmetric encryption algorithms that AWS KMS uses do not support an encryption context.

An encryption context is a collection of non-secret key-value pairs that represents additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is optional when encrypting with a symmetric CMK, but it is highly recommended.

For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

" + "documentation":"

Specifies the encryption context that will be used to encrypt the data. An encryption context is valid only for cryptographic operations with a symmetric CMK. The standard asymmetric encryption algorithms that AWS KMS uses do not support an encryption context.

An encryption context is a collection of non-secret key-value pairs that represents additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is optional when encrypting with a symmetric CMK, but it is highly recommended.

For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

" }, "GrantTokens":{ "shape":"GrantTokenList", @@ -1519,7 +1522,7 @@ }, "KeyId":{ "shape":"KeyIdType", - "documentation":"

The ID of the key used during encryption.

" + "documentation":"

The Amazon Resource Name (key ARN) of the CMK that was used to encrypt the plaintext.

" }, "EncryptionAlgorithm":{ "shape":"EncryptionAlgorithmSpec", @@ -1575,7 +1578,7 @@ }, "KeyId":{ "shape":"KeyIdType", - "documentation":"

Specifies the symmetric CMK that encrypts the private key in the data key pair. You cannot specify an asymmetric CMKs.

To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. When using an alias name, prefix it with \"alias/\". To specify a CMK in a different AWS account, you must use the key ARN or alias ARN.

For example:

To get the key ID and key ARN for a CMK, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

" + "documentation":"

Specifies the symmetric CMK that encrypts the private key in the data key pair. You cannot specify an asymmetric CMK or a CMK in a custom key store. To get the type and origin of your CMK, use the DescribeKey operation.

To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. When using an alias name, prefix it with \"alias/\". To specify a CMK in a different AWS account, you must use the key ARN or alias ARN.

For example:

To get the key ID and key ARN for a CMK, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

" }, "KeyPairSpec":{ "shape":"DataKeyPairSpec", @@ -1604,7 +1607,7 @@ }, "KeyId":{ "shape":"KeyIdType", - "documentation":"

The identifier of the CMK that encrypted the private key.

" + "documentation":"

The Amazon Resource Name (key ARN) of the CMK that encrypted the private key.

" }, "KeyPairSpec":{ "shape":"DataKeyPairSpec", @@ -1625,7 +1628,7 @@ }, "KeyId":{ "shape":"KeyIdType", - "documentation":"

Specifies the CMK that encrypts the private key in the data key pair. You must specify a symmetric CMK. You cannot use an asymmetric CMK. To get the type of your CMK, use the DescribeKey operation.

To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. When using an alias name, prefix it with \"alias/\".

For example:

To get the key ID and key ARN for a CMK, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

" + "documentation":"

Specifies the CMK that encrypts the private key in the data key pair. You must specify a symmetric CMK. You cannot use an asymmetric CMK or a CMK in a custom key store. To get the type and origin of your CMK, use the DescribeKey operation.

To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. When using an alias name, prefix it with \"alias/\".

For example:

To get the key ID and key ARN for a CMK, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

" }, "KeyPairSpec":{ "shape":"DataKeyPairSpec", @@ -1650,7 +1653,7 @@ }, "KeyId":{ "shape":"KeyIdType", - "documentation":"

Specifies the CMK that encrypted the private key in the data key pair. You must specify a symmetric CMK. You cannot use an asymmetric CMK. To get the type of your CMK, use the DescribeKey operation.

To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. When using an alias name, prefix it with \"alias/\".

For example:

To get the key ID and key ARN for a CMK, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

" + "documentation":"

The Amazon Resource Name (key ARN) of the CMK that encrypted the private key.

" }, "KeyPairSpec":{ "shape":"DataKeyPairSpec", @@ -1697,7 +1700,7 @@ }, "KeyId":{ "shape":"KeyIdType", - "documentation":"

The identifier of the CMK that encrypted the data key.

" + "documentation":"

The Amazon Resource Name (key ARN) of the CMK that encrypted the data key.

" } } }, @@ -1736,7 +1739,7 @@ }, "KeyId":{ "shape":"KeyIdType", - "documentation":"

The identifier of the CMK that encrypted the data key.

" + "documentation":"

The Amazon Resource Name (key ARN) of the CMK that encrypted the data key.

" } } }, @@ -1834,7 +1837,7 @@ "members":{ "KeyId":{ "shape":"KeyIdType", - "documentation":"

The identifier of the CMK to use in a subsequent ImportKeyMaterial request. This is the same CMK specified in the GetParametersForImport request.

" + "documentation":"

The Amazon Resource Name (key ARN) of the CMK to use in a subsequent ImportKeyMaterial request. This is the same CMK specified in the GetParametersForImport request.

" }, "ImportToken":{ "shape":"CiphertextType", @@ -1869,7 +1872,7 @@ "members":{ "KeyId":{ "shape":"KeyIdType", - "documentation":"

The identifier of the asymmetric CMK from which the public key was downloaded.

" + "documentation":"

The Amazon Resource Name (key ARN) of the asymmetric CMK from which the public key was downloaded.

" }, "PublicKey":{ "shape":"PublicKeyType", @@ -1898,14 +1901,14 @@ "members":{ "EncryptionContextSubset":{ "shape":"EncryptionContextType", - "documentation":"

A list of key-value pairs that must be included in the encryption context of the cryptographic operation request. The grant allows the cryptographic operation only when the encryption context in the request includes the key-value pairs specified in this constraint, although it can include additional key-value pairs.

" + "documentation":"

A list of key-value pairs that must be included in the encryption context of the cryptographic operation request. The grant allows the cryptographic operation only when the encryption context in the request includes the key-value pairs specified in this constraint, although it can include additional key-value pairs.

" }, "EncryptionContextEquals":{ "shape":"EncryptionContextType", - "documentation":"

A list of key-value pairs that must match the encryption context in the cryptographic operation request. The grant allows the operation only when the encryption context in the request is the same as the encryption context specified in this constraint.

" + "documentation":"

A list of key-value pairs that must match the encryption context in the cryptographic operation request. The grant allows the operation only when the encryption context in the request is the same as the encryption context specified in this constraint.

" } }, - "documentation":"

Use this structure to allow cryptographic operations in the grant only when the operation request includes the specified encryption context.

AWS KMS applies the grant constraints only when the grant allows a cryptographic operation that accepts an encryption context as input, such as the following.

AWS KMS does not apply the grant constraints to other operations, such as DescribeKey or ScheduleKeyDeletion.

In a cryptographic operation, the encryption context in the decryption operation must be an exact, case-sensitive match for the keys and values in the encryption context of the encryption operation. Only the order of the pairs can vary.

However, in a grant constraint, the key in each key-value pair is not case sensitive, but the value is case sensitive.

To avoid confusion, do not use multiple encryption context pairs that differ only by case. To require a fully case-sensitive encryption context, use the kms:EncryptionContext: and kms:EncryptionContextKeys conditions in an IAM or key policy. For details, see kms:EncryptionContext: in the AWS Key Management Service Developer Guide .

" + "documentation":"

Use this structure to allow cryptographic operations in the grant only when the operation request includes the specified encryption context.

AWS KMS applies the grant constraints only to cryptographic operations that support an encryption context, that is, all cryptographic operations with a symmetric CMK. Grant constraints are not applied to operations that do not support an encryption context, such as cryptographic operations with asymmetric CMKs and management operations, such as DescribeKey or ScheduleKeyDeletion.

In a cryptographic operation, the encryption context in the decryption operation must be an exact, case-sensitive match for the keys and values in the encryption context of the encryption operation. Only the order of the pairs can vary.

However, in a grant constraint, the key in each key-value pair is not case sensitive, but the value is case sensitive.

To avoid confusion, do not use multiple encryption context pairs that differ only by case. To require a fully case-sensitive encryption context, use the kms:EncryptionContext: and kms:EncryptionContextKeys conditions in an IAM or key policy. For details, see kms:EncryptionContext: in the AWS Key Management Service Developer Guide .

" }, "GrantIdType":{ "type":"string", @@ -1937,7 +1940,7 @@ }, "GranteePrincipal":{ "shape":"PrincipalIdType", - "documentation":"

The principal that receives the grant's permissions.

" + "documentation":"

The identity that gets the permissions in the grant.

The GranteePrincipal field in the ListGrants response usually contains the user or role designated as the grantee principal in the grant. However, when the grantee principal in the grant is an AWS service, the GranteePrincipal field contains the service principal, which might represent several different grantee principals.

" }, "RetiringPrincipal":{ "shape":"PrincipalIdType", @@ -1956,7 +1959,7 @@ "documentation":"

A list of key-value pairs that must be present in the encryption context of certain subsequent operations that the grant allows.

" } }, - "documentation":"

Contains information about an entry in a list of grants.

" + "documentation":"

Contains information about a grant.

" }, "GrantNameType":{ "type":"string", @@ -2206,11 +2209,11 @@ }, "KeyUsage":{ "shape":"KeyUsageType", - "documentation":"

The cryptographic operations for which you can use the CMK.

" + "documentation":"

The cryptographic operations for which you can use the CMK.

" }, "KeyState":{ "shape":"KeyState", - "documentation":"

The state of the CMK.

For more information about how key state affects the use of a CMK, see How Key State Affects the Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" + "documentation":"

The current status of the CMK.

For more information about how key state affects the use of a CMK, see Key state: Effect on your CMK in the AWS Key Management Service Developer Guide.

" }, "DeletionDate":{ "shape":"DateType", @@ -2246,11 +2249,11 @@ }, "EncryptionAlgorithms":{ "shape":"EncryptionAlgorithmSpecList", - "documentation":"

A list of encryption algorithms that the CMK supports. You cannot use the CMK with other encryption algorithms within AWS KMS.

This field appears only when the KeyUsage of the CMK is ENCRYPT_DECRYPT.

" + "documentation":"

The encryption algorithms that the CMK supports. You cannot use the CMK with other encryption algorithms within AWS KMS.

This field appears only when the KeyUsage of the CMK is ENCRYPT_DECRYPT.

" }, "SigningAlgorithms":{ "shape":"SigningAlgorithmSpecList", - "documentation":"

A list of signing algorithms that the CMK supports. You cannot use the CMK with other signing algorithms within AWS KMS.

This field appears only when the KeyUsage of the CMK is SIGN_VERIFY.

" + "documentation":"

The signing algorithms that the CMK supports. You cannot use the CMK with other signing algorithms within AWS KMS.

This field appears only when the KeyUsage of the CMK is SIGN_VERIFY.

" } }, "documentation":"

Contains metadata about a customer master key (CMK).

This data type is used as a response element for the CreateKey and DescribeKey operations.

" @@ -2647,7 +2650,7 @@ }, "KeyId":{ "shape":"KeyIdType", - "documentation":"

Unique identifier of the CMK used to reencrypt the data.

" + "documentation":"

The Amazon Resource Name (key ARN) of the CMK that was used to reencrypt the data.

" }, "SourceEncryptionAlgorithm":{ "shape":"EncryptionAlgorithmSpec", @@ -2712,7 +2715,7 @@ "members":{ "KeyId":{ "shape":"KeyIdType", - "documentation":"

The unique identifier of the customer master key (CMK) for which deletion is scheduled.

" + "documentation":"

The Amazon Resource Name (key ARN) of the CMK whose deletion is scheduled.

" }, "DeletionDate":{ "shape":"DateType", @@ -2755,7 +2758,7 @@ "members":{ "KeyId":{ "shape":"KeyIdType", - "documentation":"

The Amazon Resource Name (ARN) of the asymmetric CMK that was used to sign the message.

" + "documentation":"

The Amazon Resource Name (key ARN) of the asymmetric CMK that was used to sign the message.

" }, "Signature":{ "shape":"CiphertextType", @@ -2977,7 +2980,7 @@ "members":{ "KeyId":{ "shape":"KeyIdType", - "documentation":"

The unique identifier for the asymmetric CMK that was used to verify the signature.

" + "documentation":"

The Amazon Resource Name (key ARN) of the asymmetric CMK that was used to verify the signature.

" }, "SignatureValid":{ "shape":"BooleanType", diff --git a/botocore/data/lambda/2015-03-31/service-2.json b/botocore/data/lambda/2015-03-31/service-2.json index a2e70177..73b9d23a 100644 --- a/botocore/data/lambda/2015-03-31/service-2.json +++ b/botocore/data/lambda/2015-03-31/service-2.json @@ -442,6 +442,10 @@ {"shape":"EC2UnexpectedException"}, {"shape":"SubnetIPAddressLimitReachedException"}, {"shape":"ENILimitReachedException"}, + {"shape":"EFSMountConnectivityException"}, + {"shape":"EFSMountFailureException"}, + {"shape":"EFSMountTimeoutException"}, + {"shape":"EFSIOException"}, {"shape":"EC2ThrottledException"}, {"shape":"EC2AccessDeniedException"}, {"shape":"InvalidSubnetIDException"}, @@ -697,7 +701,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Configures options for asynchronous invocation on a function, version, or alias. If a configuration already exists for a function, version, or alias, this operation overwrites it. If you exclude any settings, they are removed. To set one option without affecting existing settings for other options, use PutFunctionEventInvokeConfig.

By default, Lambda retries an asynchronous invocation twice if the function returns an error. It retains events in a queue for up to six hours. When an event fails all processing attempts or stays in the asynchronous invocation queue for too long, Lambda discards it. To retain discarded events, configure a dead-letter queue with UpdateFunctionConfiguration.

To send an invocation record to a queue, topic, function, or event bus, specify a destination. You can configure separate destinations for successful invocations (on-success) and events that fail all processing attempts (on-failure). You can configure destinations in addition to or instead of a dead-letter queue.

" + "documentation":"

Configures options for asynchronous invocation on a function, version, or alias. If a configuration already exists for a function, version, or alias, this operation overwrites it. If you exclude any settings, they are removed. To set one option without affecting existing settings for other options, use UpdateFunctionEventInvokeConfig.

By default, Lambda retries an asynchronous invocation twice if the function returns an error. It retains events in a queue for up to six hours. When an event fails all processing attempts or stays in the asynchronous invocation queue for too long, Lambda discards it. To retain discarded events, configure a dead-letter queue with UpdateFunctionConfiguration.

To send an invocation record to a queue, topic, function, or event bus, specify a destination. You can configure separate destinations for successful invocations (on-success) and events that fail all processing attempts (on-failure). You can configure destinations in addition to or instead of a dead-letter queue.

" }, "PutProvisionedConcurrencyConfig":{ "name":"PutProvisionedConcurrencyConfig", @@ -1100,7 +1104,7 @@ "members":{ "AdditionalVersionWeights":{ "shape":"AdditionalVersionWeights", - "documentation":"

The name of the second alias, and the percentage of traffic that's routed to it.

" + "documentation":"

The second version, and the percentage of traffic that's routed to it.

" } }, "documentation":"

The traffic-shifting configuration of a Lambda function alias.

" @@ -1179,7 +1183,7 @@ }, "RoutingConfig":{ "shape":"AliasRoutingConfiguration", - "documentation":"

The routing configuration of the alias.

" + "documentation":"

The routing configuration of the alias.

" } } }, @@ -1313,6 +1317,10 @@ "Layers":{ "shape":"LayerList", "documentation":"

A list of function layers to add to the function's execution environment. Specify each layer by its ARN, including the version.

" + }, + "FileSystemConfigs":{ + "shape":"FileSystemConfigList", + "documentation":"

Connection settings for an Amazon EFS file system.

" } } }, @@ -1506,6 +1514,46 @@ "error":{"httpStatusCode":502}, "exception":true }, + "EFSIOException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "Message":{"shape":"String"} + }, + "documentation":"

An error occured when reading from or writing to a connected file system.

", + "error":{"httpStatusCode":410}, + "exception":true + }, + "EFSMountConnectivityException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "Message":{"shape":"String"} + }, + "documentation":"

The function couldn't make a network connection to the configured file system.

", + "error":{"httpStatusCode":408}, + "exception":true + }, + "EFSMountFailureException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "Message":{"shape":"String"} + }, + "documentation":"

The function couldn't mount the configured file system due to a permission or configuration issue.

", + "error":{"httpStatusCode":403}, + "exception":true + }, + "EFSMountTimeoutException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "Message":{"shape":"String"} + }, + "documentation":"

The function was able to make a network connection to the configured file system, but the mount operation timed out.

", + "error":{"httpStatusCode":408}, + "exception":true + }, "ENILimitReachedException":{ "type":"structure", "members":{ @@ -1650,6 +1698,34 @@ "min":0, "pattern":"[a-zA-Z0-9._\\-]+" }, + "FileSystemArn":{ + "type":"string", + "max":200, + "pattern":"arn:aws[a-zA-Z-]*:elasticfilesystem:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:\\d{12}:access-point/fsap-[a-f0-9]{17}" + }, + "FileSystemConfig":{ + "type":"structure", + "required":[ + "Arn", + "LocalMountPath" + ], + "members":{ + "Arn":{ + "shape":"FileSystemArn", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon EFS access point that provides access to the file system.

" + }, + "LocalMountPath":{ + "shape":"LocalMountPath", + "documentation":"

The path where the function can access the file system, starting with /mnt/.

" + } + }, + "documentation":"

Details about the connection between a Lambda function and an Amazon EFS file system.

" + }, + "FileSystemConfigList":{ + "type":"list", + "member":{"shape":"FileSystemConfig"}, + "max":1 + }, "FunctionArn":{ "type":"string", "pattern":"arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?" @@ -1796,6 +1872,10 @@ "LastUpdateStatusReasonCode":{ "shape":"LastUpdateStatusReasonCode", "documentation":"

The reason code for the last update that was performed on the function.

" + }, + "FileSystemConfigs":{ + "shape":"FileSystemConfigList", + "documentation":"

Connection settings for an Amazon EFS file system.

" } }, "documentation":"

Details about a function's configuration.

" @@ -2922,6 +3002,11 @@ } } }, + "LocalMountPath":{ + "type":"string", + "max":160, + "pattern":"^/mnt/[a-zA-Z0-9-_.]+$" + }, "LogType":{ "type":"string", "enum":[ @@ -3432,7 +3517,7 @@ "Type":{"shape":"String"}, "Message":{"shape":"String"} }, - "documentation":"

The operation conflicts with the resource's availability. For example, you attempted to update an EventSource Mapping in CREATING, or tried to delete a EventSource mapping currently in the UPDATING state.

", + "documentation":"

The operation conflicts with the resource's availability. For example, you attempted to update an EventSource Mapping in CREATING, or tried to delete a EventSource mapping currently in the UPDATING state.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -3733,7 +3818,7 @@ }, "RoutingConfig":{ "shape":"AliasRoutingConfiguration", - "documentation":"

The routing configuration of the alias.

" + "documentation":"

The routing configuration of the alias.

" }, "RevisionId":{ "shape":"String", @@ -3890,6 +3975,10 @@ "Layers":{ "shape":"LayerList", "documentation":"

A list of function layers to add to the function's execution environment. Specify each layer by its ARN, including the version.

" + }, + "FileSystemConfigs":{ + "shape":"FileSystemConfigList", + "documentation":"

Connection settings for an Amazon EFS file system.

" } } }, diff --git a/botocore/data/lex-models/2017-04-19/service-2.json b/botocore/data/lex-models/2017-04-19/service-2.json index 78808bbe..1cf55e1c 100644 --- a/botocore/data/lex-models/2017-04-19/service-2.json +++ b/botocore/data/lex-models/2017-04-19/service-2.json @@ -1170,6 +1170,10 @@ "checksum":{ "shape":"String", "documentation":"

Checksum of the intent version created.

" + }, + "kendraConfiguration":{ + "shape":"KendraConfiguration", + "documentation":"

Configuration information, if any, for connectin an Amazon Kendra index with the AMAZON.KendraSearchIntent intent.

" } } }, @@ -2191,6 +2195,10 @@ "checksum":{ "shape":"String", "documentation":"

Checksum of the intent.

" + }, + "kendraConfiguration":{ + "shape":"KendraConfiguration", + "documentation":"

Configuration information, if any, to connect to an Amazon Kendra index with the AMAZON.KendraSearchIntent intent.

" } } }, @@ -2457,7 +2465,7 @@ "type":"string", "max":2048, "min":20, - "pattern":"^arn:[\\w\\-]+:iam::[\\d]{12}:role\\/[\\w+=,\\.@\\-]{1,64}$" + "pattern":"^arn:[\\w\\-]+:iam::[\\d]{12}:role/.+$" }, "ImportStatus":{ "type":"string", @@ -2541,6 +2549,34 @@ "exception":true, "fault":true }, + "KendraConfiguration":{ + "type":"structure", + "required":[ + "kendraIndex", + "role" + ], + "members":{ + "kendraIndex":{ + "shape":"KendraIndexArn", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Kendra index that you want the AMAZON.KendraSearchIntent intent to search. The index must be in the same account and Region as the Amazon Lex bot. If the Amazon Kendra index does not exist, you get an exception when you call the PutIntent operation.

" + }, + "queryFilterString":{ + "shape":"QueryFilterString", + "documentation":"

A query filter that Amazon Lex sends to Amazon Kendra to filter the response from the query. The filter is in the format defined by Amazon Kendra. For more information, see Filtering queries.

You can override this filter string with a new filter string at runtime.

" + }, + "role":{ + "shape":"roleArn", + "documentation":"

The Amazon Resource Name (ARN) of an IAM role that has permission to search the Amazon Kendra index. The role must be in the same account and Region as the Amazon Lex bot. If the role does not exist, you get an exception when you call the PutIntent operation.

" + } + }, + "documentation":"

Provides configuration information for the AMAZON.KendraSearchIntent intent. When you use this intent, Amazon Lex searches the specified Amazon Kendra index and returns documents from the index that match the user's utterance. For more information, see AMAZON.KendraSearchIntent.

" + }, + "KendraIndexArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:aws:kendra:[a-z]+-[a-z]+-[0-9]:[0-9]{12}:index\\/[a-zA-Z0-9][a-zA-Z0-9_-]*" + }, "KmsKeyArn":{ "type":"string", "max":2048, @@ -3084,6 +3120,10 @@ "createVersion":{ "shape":"Boolean", "documentation":"

When set to true a new numbered version of the intent is created. This is the same as calling the CreateIntentVersion operation. If you do not specify createVersion, the default is false.

" + }, + "kendraConfiguration":{ + "shape":"KendraConfiguration", + "documentation":"

Configuration information required to use the AMAZON.KendraSearchIntent intent to connect to an Amazon Kendra index. For more information, see AMAZON.KendraSearchIntent.

" } } }, @@ -3153,6 +3193,10 @@ "createVersion":{ "shape":"Boolean", "documentation":"

True if a new version of the intent was created. If the createVersion field was not specified in the request, the createVersion field is set to false in the response.

" + }, + "kendraConfiguration":{ + "shape":"KendraConfiguration", + "documentation":"

Configuration information, if any, required to connect to an Amazon Kendra index and use the AMAZON.KendraSearchIntent intent.

" } } }, @@ -3245,6 +3289,10 @@ } } }, + "QueryFilterString":{ + "type":"string", + "min":0 + }, "ReferenceType":{ "type":"string", "enum":[ @@ -3344,7 +3392,7 @@ }, "priority":{ "shape":"Priority", - "documentation":"

Directs Lex the order in which to elicit this slot value from the user. For example, if the intent has two slots with priorities 1 and 2, AWS Lex first elicits a value for the slot with priority 1.

If multiple slots share the same priority, the order in which Lex elicits values is arbitrary.

" + "documentation":"

Directs Amazon Lex the order in which to elicit this slot value from the user. For example, if the intent has two slots with priorities 1 and 2, AWS Amazon Lex first elicits a value for the slot with priority 1.

If multiple slots share the same priority, the order in which Amazon Lex elicits values is arbitrary.

" }, "sampleUtterances":{ "shape":"SlotUtteranceList", @@ -3712,6 +3760,12 @@ "max":64, "min":1, "pattern":"\\$LATEST|[0-9]+" + }, + "roleArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:aws:iam::[0-9]{12}:role/.*" } }, "documentation":"Amazon Lex Build-Time Actions

Amazon Lex is an AWS service for building conversational voice and text interfaces. Use these actions to create, update, and delete conversational bots for new and existing client applications.

" diff --git a/botocore/data/lightsail/2016-11-28/service-2.json b/botocore/data/lightsail/2016-11-28/service-2.json index fd8165f3..edd9f1cd 100644 --- a/botocore/data/lightsail/2016-11-28/service-2.json +++ b/botocore/data/lightsail/2016-11-28/service-2.json @@ -1179,7 +1179,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Returns the data points for the specified Amazon Lightsail instance metric, given an instance name.

" + "documentation":"

Returns the data points for the specified Amazon Lightsail instance metric, given an instance name.

Metrics report the utilization of your resources, and the error counts generated by them. Monitor and collect metric data regularly to maintain the reliability, availability, and performance of your resources.

" }, "GetInstancePortStates":{ "name":"GetInstancePortStates", @@ -1350,7 +1350,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Returns information about health metrics for your Lightsail load balancer.

" + "documentation":"

Returns information about health metrics for your Lightsail load balancer.

Metrics report the utilization of your resources, and the error counts generated by them. Monitor and collect metric data regularly to maintain the reliability, availability, and performance of your resources.

" }, "GetLoadBalancerTlsCertificates":{ "name":"GetLoadBalancerTlsCertificates", @@ -1616,7 +1616,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Returns the data points of the specified metric for a database in Amazon Lightsail.

" + "documentation":"

Returns the data points of the specified metric for a database in Amazon Lightsail.

Metrics report the utilization of your resources, and the error counts generated by them. Monitor and collect metric data regularly to maintain the reliability, availability, and performance of your resources.

" }, "GetRelationalDatabaseParameters":{ "name":"GetRelationalDatabaseParameters", @@ -2614,7 +2614,7 @@ "members":{ "price":{ "shape":"float", - "documentation":"

The price in US dollars (e.g., 5.0).

" + "documentation":"

The price in US dollars (e.g., 5.0) of the bundle.

" }, "cpuCount":{ "shape":"integer", @@ -4762,7 +4762,7 @@ }, "metricName":{ "shape":"InstanceMetricName", - "documentation":"

The metric for which you want to return information.

Valid instance metric names are listed below, along with the most useful statistics to include in your request, and the published unit value.

" + "documentation":"

The metric for which you want to return information.

Valid instance metric names are listed below, along with the most useful statistics to include in your request, and the published unit value.

" }, "period":{ "shape":"MetricPeriod", @@ -4791,11 +4791,11 @@ "members":{ "metricName":{ "shape":"InstanceMetricName", - "documentation":"

The metric name to return data for.

" + "documentation":"

The name of the metric returned.

" }, "metricData":{ "shape":"MetricDatapointList", - "documentation":"

An array of key-value pairs containing information about the results of your get instance metric data request.

" + "documentation":"

An array of objects that describe the metric data returned.

" } } }, @@ -4994,7 +4994,7 @@ }, "unit":{ "shape":"MetricUnit", - "documentation":"

The unit for the metric data request. Valid units depend on the metric data being required. For the valid units with each available metric, see the metricName parameter.

" + "documentation":"

The unit for the metric data request. Valid units depend on the metric data being requested. For the valid units with each available metric, see the metricName parameter.

" }, "statistics":{ "shape":"MetricStatisticList", @@ -5007,11 +5007,11 @@ "members":{ "metricName":{ "shape":"LoadBalancerMetricName", - "documentation":"

The metric about which you are receiving information. Valid values are listed below, along with the most useful statistics to include in your request.

" + "documentation":"

The name of the metric returned.

" }, "metricData":{ "shape":"MetricDatapointList", - "documentation":"

An array of metric datapoint objects.

" + "documentation":"

An array of objects that describe the metric data returned.

" } } }, @@ -5375,7 +5375,7 @@ }, "unit":{ "shape":"MetricUnit", - "documentation":"

The unit for the metric data request. Valid units depend on the metric data being required. For the valid units with each available metric, see the metricName parameter.

" + "documentation":"

The unit for the metric data request. Valid units depend on the metric data being requested. For the valid units with each available metric, see the metricName parameter.

" }, "statistics":{ "shape":"MetricStatisticList", @@ -5388,11 +5388,11 @@ "members":{ "metricName":{ "shape":"RelationalDatabaseMetricName", - "documentation":"

The name of the metric.

" + "documentation":"

The name of the metric returned.

" }, "metricData":{ "shape":"MetricDatapointList", - "documentation":"

An object describing the result of your get relational database metric data request.

" + "documentation":"

An array of objects that describe the metric data returned.

" } } }, @@ -5864,7 +5864,9 @@ "NetworkOut", "StatusCheckFailed", "StatusCheckFailed_Instance", - "StatusCheckFailed_System" + "StatusCheckFailed_System", + "BurstCapacityTime", + "BurstCapacityPercentage" ] }, "InstanceNetworking":{ @@ -5897,15 +5899,15 @@ "members":{ "fromPort":{ "shape":"Port", - "documentation":"

The first port in a range of open ports on an instance.

Allowed ports:

" + "documentation":"

The first port in a range of open ports on an instance.

Allowed ports:

" }, "toPort":{ "shape":"Port", - "documentation":"

The last port in a range of open ports on an instance.

Allowed ports:

" + "documentation":"

The last port in a range of open ports on an instance.

Allowed ports:

" }, "protocol":{ "shape":"NetworkProtocol", - "documentation":"

The IP protocol name.

The name can be one of the following:

" + "documentation":"

The IP protocol name.

The name can be one of the following:

" }, "accessFrom":{ "shape":"string", @@ -5943,15 +5945,15 @@ "members":{ "fromPort":{ "shape":"Port", - "documentation":"

The first port in a range of open ports on an instance.

Allowed ports:

" + "documentation":"

The first port in a range of open ports on an instance.

Allowed ports:

" }, "toPort":{ "shape":"Port", - "documentation":"

The last port in a range of open ports on an instance.

Allowed ports:

" + "documentation":"

The last port in a range of open ports on an instance.

Allowed ports:

" }, "protocol":{ "shape":"NetworkProtocol", - "documentation":"

The IP protocol name.

The name can be one of the following:

" + "documentation":"

The IP protocol name.

The name can be one of the following:

" }, "state":{ "shape":"PortState", @@ -6324,7 +6326,7 @@ }, "status":{ "shape":"LoadBalancerTlsCertificateStatus", - "documentation":"

The status of the SSL/TLS certificate. Valid values are below.

" + "documentation":"

The validation status of the SSL/TLS certificate. Valid values are below.

" }, "domainName":{ "shape":"DomainName", @@ -6609,7 +6611,9 @@ "DiskQueueDepth", "FreeStorageSpace", "NetworkReceiveThroughput", - "NetworkTransmitThroughput" + "NetworkTransmitThroughput", + "BurstCapacityTime", + "BurstCapacityPercentage" ] }, "MetricPeriod":{ @@ -6969,15 +6973,15 @@ "members":{ "fromPort":{ "shape":"Port", - "documentation":"

The first port in a range of open ports on an instance.

Allowed ports:

" + "documentation":"

The first port in a range of open ports on an instance.

Allowed ports:

" }, "toPort":{ "shape":"Port", - "documentation":"

The last port in a range of open ports on an instance.

Allowed ports:

" + "documentation":"

The last port in a range of open ports on an instance.

Allowed ports:

" }, "protocol":{ "shape":"NetworkProtocol", - "documentation":"

The IP protocol name.

The name can be one of the following:

" + "documentation":"

The IP protocol name.

The name can be one of the following:

" }, "cidrs":{ "shape":"StringList", @@ -7031,7 +7035,7 @@ }, "metricName":{ "shape":"MetricName", - "documentation":"

The name of the metric to associate with the alarm.

You can configure up to two alarms per metric.

The following metrics are available for each resource type:

" + "documentation":"

The name of the metric to associate with the alarm.

You can configure up to two alarms per metric.

The following metrics are available for each resource type:

For more information about these metrics, see Metrics available in Lightsail.

" }, "monitoredResourceName":{ "shape":"ResourceName", diff --git a/botocore/data/macie2/2020-01-01/service-2.json b/botocore/data/macie2/2020-01-01/service-2.json index 9d4d6ccc..7f478e10 100644 --- a/botocore/data/macie2/2020-01-01/service-2.json +++ b/botocore/data/macie2/2020-01-01/service-2.json @@ -57,52 +57,6 @@ ], "documentation": "

Accepts an Amazon Macie membership invitation that was received from a specific account.

" }, - "ArchiveFindings": { - "name": "ArchiveFindings", - "http": { - "method": "POST", - "requestUri": "/findings/archive", - "responseCode": 200 - }, - "input": { - "shape": "ArchiveFindingsRequest" - }, - "output": { - "shape": "ArchiveFindingsResponse", - "documentation": "

The request succeeded and there isn't any content to include in the body of the response (No Content).

" - }, - "errors": [ - { - "shape": "ValidationException", - "documentation": "

The request failed because it contains a syntax error.

" - }, - { - "shape": "InternalServerException", - "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" - }, - { - "shape": "ServiceQuotaExceededException", - "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" - }, - { - "shape": "AccessDeniedException", - "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" - }, - { - "shape": "ResourceNotFoundException", - "documentation": "

The request failed because the specified resource wasn't found.

" - }, - { - "shape": "ThrottlingException", - "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" - }, - { - "shape": "ConflictException", - "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" - } - ], - "documentation": "

Archives one or more findings.

" - }, "BatchGetCustomDataIdentifiers": { "name": "BatchGetCustomDataIdentifiers", "http": { @@ -1158,7 +1112,7 @@ "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" } ], - "documentation": "

Retrieves the configuration settings for exporting data classification results.

" + "documentation": "

Retrieves the configuration settings for storing data classification results.

" }, "GetCustomDataIdentifier": { "name": "GetCustomDataIdentifier", @@ -2003,7 +1957,7 @@ "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" } ], - "documentation": "

Creates or updates the configuration settings for exporting data classification results.

" + "documentation": "

Creates or updates the configuration settings for storing data classification results.

" }, "TagResource": { "name": "TagResource", @@ -2068,52 +2022,6 @@ ], "documentation": "

Tests a custom data identifier.

" }, - "UnarchiveFindings": { - "name": "UnarchiveFindings", - "http": { - "method": "POST", - "requestUri": "/findings/unarchive", - "responseCode": 200 - }, - "input": { - "shape": "UnarchiveFindingsRequest" - }, - "output": { - "shape": "UnarchiveFindingsResponse", - "documentation": "

The request succeeded and there isn't any content to include in the body of the response (No Content).

" - }, - "errors": [ - { - "shape": "ValidationException", - "documentation": "

The request failed because it contains a syntax error.

" - }, - { - "shape": "InternalServerException", - "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" - }, - { - "shape": "ServiceQuotaExceededException", - "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" - }, - { - "shape": "AccessDeniedException", - "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" - }, - { - "shape": "ResourceNotFoundException", - "documentation": "

The request failed because the specified resource wasn't found.

" - }, - { - "shape": "ThrottlingException", - "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" - }, - { - "shape": "ConflictException", - "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" - } - ], - "documentation": "

Reactivates (unarchives) one or more findings.

" - }, "UntagResource": { "name": "UntagResource", "http": { @@ -2498,23 +2406,6 @@ }, "documentation": "

Reserved for future use.

" }, - "ArchiveFindingsRequest": { - "type": "structure", - "members": { - "findingIds": { - "shape": "__listOf__string", - "locationName": "findingIds", - "documentation": "

An array of strings that lists the unique identifiers for the findings to archive.

" - } - }, - "required": [ - "findingIds" - ] - }, - "ArchiveFindingsResponse": { - "type": "structure", - "members": {} - }, "AssumedRole": { "type": "structure", "members": { @@ -2977,10 +2868,10 @@ "s3Destination": { "shape": "S3Destination", "locationName": "s3Destination", - "documentation": "

The S3 bucket to export data classification results to, and the encryption settings to use when storing results in that bucket.

" + "documentation": "

The S3 bucket to store data classification results in, and the encryption settings to use when storing results in that bucket.

" } }, - "documentation": "

Specifies where to export data classification results to, and the encryption settings to use when storing results in that location. Currently, you can export classification results only to an S3 bucket.

" + "documentation": "

Specifies where to store data classification results, and the encryption settings to use when storing results in that location. Currently, you can store classification results only in an S3 bucket.

" }, "ClassificationResult": { "type": "structure", @@ -3148,7 +3039,7 @@ "maximumMatchDistance": { "shape": "__integer", "locationName": "maximumMatchDistance", - "documentation": "

The maximum number of characters that can exist between text that matches the regex pattern and the character sequences specified by the keywords array. Macie includes or excludes a result based on the proximity of a keyword to text that matches the regex pattern. The distance can be 1 - 300 characters. The default value is 300.

" + "documentation": "

The maximum number of characters that can exist between text that matches the regex pattern and the character sequences specified by the keywords array. Macie includes or excludes a result based on the proximity of a keyword to text that matches the regex pattern. The distance can be 1 - 300 characters. The default value is 50.

" }, "name": { "shape": "__string", @@ -3183,7 +3074,7 @@ "action": { "shape": "FindingsFilterAction", "locationName": "action", - "documentation": "

The action to perform on findings that meet the filter criteria (findingCriteria). Valid values are: ARCHIVE, automatically archive the findings; and, NOOP, don't perform any action on the findings.

" + "documentation": "

The action to perform on findings that meet the filter criteria (findingCriteria). Valid values are: ARCHIVE, suppress (automatically archive) the findings; and, NOOP, don't perform any action on the findings.

" }, "clientToken": { "shape": "__string", @@ -3827,7 +3718,7 @@ "findingPublishingFrequency": { "shape": "FindingPublishingFrequency", "locationName": "findingPublishingFrequency", - "documentation": "Specifies how often to publish findings for the account. This includes adding findings to AWS Security Hub and exporting finding events to Amazon CloudWatch." + "documentation": "Specifies how often to publish updates to policy findings for the account. This includes publishing updates to AWS Security Hub and Amazon EventBridge (formerly called Amazon CloudWatch Events)." }, "status": { "shape": "MacieStatus", @@ -3869,7 +3760,8 @@ "enum": [ "NONE", "AES256", - "aws:kms" + "aws:kms", + "UNKNOWN" ] }, "ErrorCode": { @@ -4025,6 +3917,7 @@ }, "FindingActionType": { "type": "string", + "documentation": "

The type of action that occurred for the resource and produced the policy finding.

", "enum": [ "AWS_API_CALL" ] @@ -4045,10 +3938,10 @@ "userIdentity": { "shape": "UserIdentity", "locationName": "userIdentity", - "documentation": "

The name and type of entity who performed the action on the affected resource.

" + "documentation": "

The name and type of entity that performed the action on the affected resource.

" } }, - "documentation": "

Provides information about an entity who performed an action that produced a policy finding for a resource.

" + "documentation": "

Provides information about an entity that performed an action that produced a policy finding for a resource.

" }, "FindingCategory": { "type": "string", @@ -4071,7 +3964,7 @@ }, "FindingPublishingFrequency": { "type": "string", - "documentation": "

The frequency with which Amazon Macie publishes findings for an account. This includes adding findings to AWS Security Hub and exporting finding events to Amazon CloudWatch. Valid values are:

", + "documentation": "

The frequency with which Amazon Macie publishes updates to policy findings for an account. This includes publishing updates to AWS Security Hub and Amazon EventBridge (formerly called Amazon CloudWatch Events). Valid values are:

", "enum": [ "FIFTEEN_MINUTES", "ONE_HOUR", @@ -4120,7 +4013,7 @@ }, "FindingsFilterAction": { "type": "string", - "documentation": "

The action to perform on findings that meet the filter criteria. Valid values are:

", + "documentation": "

The action to perform on findings that meet the filter criteria. To suppress (automatically archive) findings that meet the criteria, set this value to ARCHIVE. Valid values are:

", "enum": [ "ARCHIVE", "NOOP" @@ -4222,7 +4115,7 @@ "configuration": { "shape": "ClassificationExportConfiguration", "locationName": "configuration", - "documentation": "

The location that data classification results are exported to, and the encryption settings that are used when storing results in that location.

" + "documentation": "

The location where data classification results are stored, and the encryption settings that are used when storing results in that location.

" } } }, @@ -4358,7 +4251,7 @@ "action": { "shape": "FindingsFilterAction", "locationName": "action", - "documentation": "

The action that's performed on findings that meet the filter criteria (findingCriteria). Possible values are: ARCHIVE, automatically archive the findings; and, NOOP, don't perform any action on the findings.

" + "documentation": "

The action that's performed on findings that meet the filter criteria (findingCriteria). Possible values are: ARCHIVE, suppress (automatically archive) the findings; and, NOOP, don't perform any action on the findings.

" }, "arn": { "shape": "__string", @@ -4454,7 +4347,7 @@ "findingPublishingFrequency": { "shape": "FindingPublishingFrequency", "locationName": "findingPublishingFrequency", - "documentation": "

The frequency with which Amazon Macie publishes findings for the account. This includes adding findings to AWS Security Hub and exporting finding events to Amazon CloudWatch.

" + "documentation": "

The frequency with which Amazon Macie publishes updates to policy findings for the account. This includes publishing updates to AWS Security Hub and Amazon EventBridge (formerly called Amazon CloudWatch Events).

" }, "serviceRole": { "shape": "__string", @@ -5386,7 +5279,7 @@ "actor": { "shape": "FindingActor", "locationName": "actor", - "documentation": "

The entity who performed the action that produced the finding.

" + "documentation": "

The entity that performed the action that produced the finding.

" } }, "documentation": "

Provides detailed information about a policy finding.

" @@ -5397,7 +5290,7 @@ "configuration": { "shape": "ClassificationExportConfiguration", "locationName": "configuration", - "documentation": "

The location to export data classification results to, and the encryption settings to use when storing results in that location.

" + "documentation": "

The location to store data classification results in, and the encryption settings to use when storing results in that location.

" } }, "required": [ @@ -5410,7 +5303,7 @@ "configuration": { "shape": "ClassificationExportConfiguration", "locationName": "configuration", - "documentation": "

The location that data classification results are exported to, and the encryption settings that are used when storing results in that location.

" + "documentation": "

The location where the data classification results are stored, and the encryption settings that are used when storing results in that location.

" } } }, @@ -5559,7 +5452,7 @@ "bucketName": { "shape": "__string", "locationName": "bucketName", - "documentation": "

The Amazon Resource Name (ARN) of the bucket. This must be the ARN of an existing bucket.

" + "documentation": "

The name of the bucket.

" }, "keyPrefix": { "shape": "__string", @@ -5569,10 +5462,10 @@ "kmsKeyArn": { "shape": "__string", "locationName": "kmsKeyArn", - "documentation": "

The Amazon Resource Name (ARN) of the AWS Key Management Service master key to use for encryption of the exported results. This must be the ARN of an existing KMS key. In addition, the key must be in the same AWS Region as the bucket.

" + "documentation": "

The Amazon Resource Name (ARN) of the AWS Key Management Service customer master key (CMK) to use for encryption of the results. This must be the ARN of an existing CMK that's in the same AWS Region as the bucket.

" } }, - "documentation": "

Specifies an S3 bucket to export data classification results to, and the encryption settings to use when storing results in that bucket.

", + "documentation": "

Specifies an S3 bucket to store data classification results in, and the encryption settings to use when storing results in that bucket.

", "required": [ "bucketName", "kmsKeyArn" @@ -5700,7 +5593,7 @@ "category": { "shape": "SensitiveDataItemCategory", "locationName": "category", - "documentation": "

The category of sensitive data that was detected. For example, FINANCIAL_INFORMATION, for financial information such as credit card numbers, or PERSONAL_INFORMATION, for personally identifiable information such as names and addresses.

" + "documentation": "

The category of sensitive data that was detected. For example: FINANCIAL_INFORMATION, for financial information such as credit card numbers; PERSONAL_INFORMATION, for personally identifiable information such as full names and mailing addresses; or, CUSTOM_IDENTIFIER, for data that was detected by a custom data identifier.

" }, "detections": { "shape": "DefaultDetections", @@ -5717,6 +5610,7 @@ }, "SensitiveDataItemCategory": { "type": "string", + "documentation": "

The category of sensitive data that was detected and produced the finding.

", "enum": [ "FINANCIAL_INFORMATION", "PERSONAL_INFORMATION", @@ -5790,7 +5684,7 @@ "documentation": "

The source and type of credentials that the entity obtained.

" } }, - "documentation": "

Provides information about a session that was created for an entity who performed an action by using temporary security credentials.

" + "documentation": "

Provides information about a session that was created for an entity that performed an action by using temporary security credentials.

" }, "SessionContextAttributes": { "type": "structure", @@ -6037,7 +5931,7 @@ "maximumMatchDistance": { "shape": "__integer", "locationName": "maximumMatchDistance", - "documentation": "

The maximum number of characters that can exist between text that matches the regex pattern and the character sequences specified by the keywords array. Macie includes or excludes a result based on the proximity of a keyword to text that matches the regex pattern. The distance can be 1 - 300 characters. The default value is 300.

" + "documentation": "

The maximum number of characters that can exist between text that matches the regex pattern and the character sequences specified by the keywords array. Macie includes or excludes a result based on the proximity of a keyword to text that matches the regex pattern. The distance can be 1 - 300 characters. The default value is 50.

" }, "regex": { "shape": "__string", @@ -6080,23 +5974,6 @@ "httpStatusCode": 429 } }, - "UnarchiveFindingsRequest": { - "type": "structure", - "members": { - "findingIds": { - "shape": "__listOf__string", - "locationName": "findingIds", - "documentation": "

An array of strings that lists the unique identifiers for the findings to reactivate.

" - } - }, - "required": [ - "findingIds" - ] - }, - "UnarchiveFindingsResponse": { - "type": "structure", - "members": {} - }, "Unit": { "type": "string", "enum": [ @@ -6179,7 +6056,7 @@ "action": { "shape": "FindingsFilterAction", "locationName": "action", - "documentation": "

The action to perform on findings that meet the filter criteria (findingCriteria). Valid values are: ARCHIVE, automatically archive the findings; and, NOOP, don't perform any action on the findings.

" + "documentation": "

The action to perform on findings that meet the filter criteria (findingCriteria). Valid values are: ARCHIVE, suppress (automatically archive) the findings; and, NOOP, don't perform any action on the findings.

" }, "description": { "shape": "__string", @@ -6233,7 +6110,7 @@ "findingPublishingFrequency": { "shape": "FindingPublishingFrequency", "locationName": "findingPublishingFrequency", - "documentation": "Specifies how often to publish findings for the account. This includes adding findings to AWS Security Hub and exporting finding events to Amazon CloudWatch." + "documentation": "Specifies how often to publish updates to policy findings for the account. This includes publishing updates to AWS Security Hub and Amazon EventBridge (formerly called Amazon CloudWatch Events)." }, "status": { "shape": "MacieStatus", @@ -6659,5 +6536,5 @@ "timestampFormat": "unixTimestamp" } }, - "documentation": "

Amazon Macie

" + "documentation": "

Amazon Macie is a fully managed data security and data privacy service that uses machine learning and pattern matching to discover and protect your sensitive data in AWS. Macie automates the discovery of sensitive data, such as PII and intellectual property, to provide you with insight into the data that your organization stores in AWS. Macie also provides an inventory of your Amazon S3 buckets, which it continually monitors for you. If Macie detects sensitive data or potential data access issues, it generates detailed findings for you to review and act upon as necessary.

" } \ No newline at end of file diff --git a/botocore/data/mediaconvert/2017-08-29/service-2.json b/botocore/data/mediaconvert/2017-08-29/service-2.json index 3b7ceab2..d34f00a8 100644 --- a/botocore/data/mediaconvert/2017-08-29/service-2.json +++ b/botocore/data/mediaconvert/2017-08-29/service-2.json @@ -1392,6 +1392,8 @@ "AC3", "EAC3", "EAC3_ATMOS", + "VORBIS", + "OPUS", "PASSTHROUGH" ] }, @@ -1438,13 +1440,23 @@ "locationName": "mp3Settings", "documentation": "Required when you set Codec, under AudioDescriptions>CodecSettings, to the value MP3." }, + "OpusSettings": { + "shape": "OpusSettings", + "locationName": "opusSettings", + "documentation": "Required when you set Codec, under AudioDescriptions>CodecSettings, to the value OPUS." + }, + "VorbisSettings": { + "shape": "VorbisSettings", + "locationName": "vorbisSettings", + "documentation": "Required when you set Codec, under AudioDescriptions>CodecSettings, to the value Vorbis." + }, "WavSettings": { "shape": "WavSettings", "locationName": "wavSettings", "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value WAV." } }, - "documentation": "Audio codec settings (CodecSettings) under (AudioDescriptions) contains the group of settings related to audio encoding. The settings in this group vary depending on the value that you choose for Audio codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * AAC, AacSettings * MP2, Mp2Settings * MP3, Mp3Settings * WAV, WavSettings * AIFF, AiffSettings * AC3, Ac3Settings * EAC3, Eac3Settings * EAC3_ATMOS, Eac3AtmosSettings" + "documentation": "Audio codec settings (CodecSettings) under (AudioDescriptions) contains the group of settings related to audio encoding. The settings in this group vary depending on the value that you choose for Audio codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * AAC, AacSettings * MP2, Mp2Settings * MP3, Mp3Settings * WAV, WavSettings * AIFF, AiffSettings * AC3, Ac3Settings * EAC3, Eac3Settings * EAC3_ATMOS, Eac3AtmosSettings * VORBIS, VorbisSettings * OPUS, OpusSettings" }, "AudioDefaultSelection": { "type": "string", @@ -1480,7 +1492,7 @@ "CodecSettings": { "shape": "AudioCodecSettings", "locationName": "codecSettings", - "documentation": "Audio codec settings (CodecSettings) under (AudioDescriptions) contains the group of settings related to audio encoding. The settings in this group vary depending on the value that you choose for Audio codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * AAC, AacSettings * MP2, Mp2Settings * MP3, Mp3Settings * WAV, WavSettings * AIFF, AiffSettings * AC3, Ac3Settings * EAC3, Eac3Settings * EAC3_ATMOS, Eac3AtmosSettings" + "documentation": "Audio codec settings (CodecSettings) under (AudioDescriptions) contains the group of settings related to audio encoding. The settings in this group vary depending on the value that you choose for Audio codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * AAC, AacSettings * MP2, Mp2Settings * MP3, Mp3Settings * WAV, WavSettings * AIFF, AiffSettings * AC3, Ac3Settings * EAC3, Eac3Settings * EAC3_ATMOS, Eac3AtmosSettings * VORBIS, VorbisSettings * OPUS, OpusSettings" }, "CustomLanguageCode": { "shape": "__stringPatternAZaZ23AZaZ", @@ -1602,7 +1614,7 @@ "documentation": "Enable this setting on one audio selector to set it as the default for the job. The service uses this default for outputs where it can't find the specified input audio. If you don't set a default, those outputs have no audio." }, "ExternalAudioFileInput": { - "shape": "__stringPatternS3MM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEEHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEE", + "shape": "__stringPatternS3WWEEBBMMMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEEHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEE", "locationName": "externalAudioFileInput", "documentation": "Specifies audio data from an external file source." }, @@ -2148,15 +2160,15 @@ "FramerateDenominator": { "shape": "__integerMin1Max1001", "locationName": "framerateDenominator", - "documentation": "Specify the denominator of the fraction that represents the framerate for the setting Caption source framerate (CaptionSourceFramerate). Use this setting along with the setting Framerate numerator (framerateNumerator)." + "documentation": "Specify the denominator of the fraction that represents the frame rate for the setting Caption source frame rate (CaptionSourceFramerate). Use this setting along with the setting Framerate numerator (framerateNumerator)." }, "FramerateNumerator": { "shape": "__integerMin1Max60000", "locationName": "framerateNumerator", - "documentation": "Specify the numerator of the fraction that represents the framerate for the setting Caption source framerate (CaptionSourceFramerate). Use this setting along with the setting Framerate denominator (framerateDenominator)." + "documentation": "Specify the numerator of the fraction that represents the frame rate for the setting Caption source frame rate (CaptionSourceFramerate). Use this setting along with the setting Framerate denominator (framerateDenominator)." } }, - "documentation": "Ignore this setting unless your input captions format is SCC. To have the service compensate for differing framerates between your input captions and input video, specify the framerate of the captions file. Specify this value as a fraction, using the settings Framerate numerator (framerateNumerator) and Framerate denominator (framerateDenominator). For example, you might specify 24 / 1 for 24 fps, 25 / 1 for 25 fps, 24000 / 1001 for 23.976 fps, or 30000 / 1001 for 29.97 fps." + "documentation": "Ignore this setting unless your input captions format is SCC. To have the service compensate for differing frame rates between your input captions and input video, specify the frame rate of the captions file. Specify this value as a fraction, using the settings Framerate numerator (framerateNumerator) and Framerate denominator (framerateDenominator). For example, you might specify 24 / 1 for 24 fps, 25 / 1 for 25 fps, 24000 / 1001 for 23.976 fps, or 30000 / 1001 for 29.97 fps." }, "CaptionSourceSettings": { "type": "structure", @@ -2676,6 +2688,7 @@ "MP4", "MPD", "MXF", + "WEBM", "RAW" ] }, @@ -4114,7 +4127,7 @@ "Framerate": { "shape": "CaptionSourceFramerate", "locationName": "framerate", - "documentation": "Ignore this setting unless your input captions format is SCC. To have the service compensate for differing framerates between your input captions and input video, specify the framerate of the captions file. Specify this value as a fraction, using the settings Framerate numerator (framerateNumerator) and Framerate denominator (framerateDenominator). For example, you might specify 24 / 1 for 24 fps, 25 / 1 for 25 fps, 24000 / 1001 for 23.976 fps, or 30000 / 1001 for 29.97 fps." + "documentation": "Ignore this setting unless your input captions format is SCC. To have the service compensate for differing frame rates between your input captions and input video, specify the frame rate of the captions file. Specify this value as a fraction, using the settings Framerate numerator (framerateNumerator) and Framerate denominator (framerateDenominator). For example, you might specify 24 / 1 for 24 fps, 25 / 1 for 25 fps, 24000 / 1001 for 23.976 fps, or 30000 / 1001 for 29.97 fps." }, "SourceFile": { "shape": "__stringMin14PatternS3SccSCCTtmlTTMLDfxpDFXPStlSTLSrtSRTXmlXMLSmiSMIHttpsSccSCCTtmlTTMLDfxpDFXPStlSTLSrtSRTXmlXMLSmiSMI", @@ -4363,7 +4376,7 @@ }, "H264FramerateConversionAlgorithm": { "type": "string", - "documentation": "When set to INTERPOLATE, produces smoother motion during frame rate conversion.", + "documentation": "Optional. Specify how the transcoder performs framerate conversion. The default behavior is to use duplicate drop conversion.", "enum": [ "DUPLICATE_DROP", "INTERPOLATE" @@ -4398,7 +4411,7 @@ }, "H264ParControl": { "type": "string", - "documentation": "Using the API, enable ParFollowSource if you want the service to use the pixel aspect ratio from the input. Using the console, do this by choosing Follow source for Pixel aspect ratio.", + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To use a different PAR, choose (SPECIFIED). In the console, SPECIFIED corresponds to any value other than Follow source. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -4406,7 +4419,7 @@ }, "H264QualityTuningLevel": { "type": "string", - "documentation": "Use Quality tuning level (H264QualityTuningLevel) to specifiy whether to use fast single-pass, high-quality singlepass, or high-quality multipass video encoding.", + "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding.", "enum": [ "SINGLE_PASS", "SINGLE_PASS_HQ", @@ -4511,7 +4524,7 @@ "FramerateConversionAlgorithm": { "shape": "H264FramerateConversionAlgorithm", "locationName": "framerateConversionAlgorithm", - "documentation": "When set to INTERPOLATE, produces smoother motion during frame rate conversion." + "documentation": "Optional. Specify how the transcoder performs framerate conversion. The default behavior is to use duplicate drop conversion." }, "FramerateDenominator": { "shape": "__integerMin1Max2147483647", @@ -4581,7 +4594,7 @@ "ParControl": { "shape": "H264ParControl", "locationName": "parControl", - "documentation": "Using the API, enable ParFollowSource if you want the service to use the pixel aspect ratio from the input. Using the console, do this by choosing Follow source for Pixel aspect ratio." + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To use a different PAR, choose (SPECIFIED). In the console, SPECIFIED corresponds to any value other than Follow source. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings." }, "ParDenominator": { "shape": "__integerMin1Max2147483647", @@ -4596,7 +4609,7 @@ "QualityTuningLevel": { "shape": "H264QualityTuningLevel", "locationName": "qualityTuningLevel", - "documentation": "Use Quality tuning level (H264QualityTuningLevel) to specifiy whether to use fast single-pass, high-quality singlepass, or high-quality multipass video encoding." + "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding." }, "QvbrSettings": { "shape": "H264QvbrSettings", @@ -4782,7 +4795,7 @@ }, "H265FramerateControl": { "type": "string", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job sepecification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -4825,7 +4838,7 @@ }, "H265ParControl": { "type": "string", - "documentation": "Using the API, enable ParFollowSource if you want the service to use the pixel aspect ratio from the input. Using the console, do this by choosing Follow source for Pixel aspect ratio.", + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To use a different PAR, choose (SPECIFIED). In the console, SPECIFIED corresponds to any value other than Follow source. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -4833,7 +4846,7 @@ }, "H265QualityTuningLevel": { "type": "string", - "documentation": "Use Quality tuning level (H265QualityTuningLevel) to specifiy whether to use fast single-pass, high-quality singlepass, or high-quality multipass video encoding.", + "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding.", "enum": [ "SINGLE_PASS", "SINGLE_PASS_HQ", @@ -4929,7 +4942,7 @@ "FramerateControl": { "shape": "H265FramerateControl", "locationName": "framerateControl", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job sepecification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." }, "FramerateConversionAlgorithm": { "shape": "H265FramerateConversionAlgorithm", @@ -5004,7 +5017,7 @@ "ParControl": { "shape": "H265ParControl", "locationName": "parControl", - "documentation": "Using the API, enable ParFollowSource if you want the service to use the pixel aspect ratio from the input. Using the console, do this by choosing Follow source for Pixel aspect ratio." + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To use a different PAR, choose (SPECIFIED). In the console, SPECIFIED corresponds to any value other than Follow source. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings." }, "ParDenominator": { "shape": "__integerMin1Max2147483647", @@ -5019,7 +5032,7 @@ "QualityTuningLevel": { "shape": "H265QualityTuningLevel", "locationName": "qualityTuningLevel", - "documentation": "Use Quality tuning level (H265QualityTuningLevel) to specifiy whether to use fast single-pass, high-quality singlepass, or high-quality multipass video encoding." + "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding." }, "QvbrSettings": { "shape": "H265QvbrSettings", @@ -5703,12 +5716,12 @@ "AudioSelectors": { "shape": "__mapOfAudioSelector", "locationName": "audioSelectors", - "documentation": "Use Audio selectors (AudioSelectors) to specify a track or set of tracks from the input that you will use in your outputs. You can use mutiple Audio selectors per input." + "documentation": "Use Audio selectors (AudioSelectors) to specify a track or set of tracks from the input that you will use in your outputs. You can use multiple Audio selectors per input." }, "CaptionSelectors": { "shape": "__mapOfCaptionSelector", "locationName": "captionSelectors", - "documentation": "Use Captions selectors (CaptionSelectors) to specify the captions data from the input that you will use in your outputs. You can use mutiple captions selectors per input." + "documentation": "Use Captions selectors (CaptionSelectors) to specify the captions data from the input that you will use in your outputs. You can use multiple captions selectors per input." }, "Crop": { "shape": "Rectangle", @@ -5718,7 +5731,7 @@ "DeblockFilter": { "shape": "InputDeblockFilter", "locationName": "deblockFilter", - "documentation": "Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. Default is disabled. Only manaully controllable for MPEG2 and uncompressed video inputs." + "documentation": "Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. Default is disabled. Only manually controllable for MPEG2 and uncompressed video inputs." }, "DecryptionSettings": { "shape": "InputDecryptionSettings", @@ -5811,7 +5824,7 @@ }, "InputDeblockFilter": { "type": "string", - "documentation": "Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. Default is disabled. Only manaully controllable for MPEG2 and uncompressed video inputs.", + "documentation": "Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. Default is disabled. Only manually controllable for MPEG2 and uncompressed video inputs.", "enum": [ "ENABLED", "DISABLED" @@ -5890,12 +5903,12 @@ "AudioSelectors": { "shape": "__mapOfAudioSelector", "locationName": "audioSelectors", - "documentation": "Use Audio selectors (AudioSelectors) to specify a track or set of tracks from the input that you will use in your outputs. You can use mutiple Audio selectors per input." + "documentation": "Use Audio selectors (AudioSelectors) to specify a track or set of tracks from the input that you will use in your outputs. You can use multiple Audio selectors per input." }, "CaptionSelectors": { "shape": "__mapOfCaptionSelector", "locationName": "captionSelectors", - "documentation": "Use Captions selectors (CaptionSelectors) to specify the captions data from the input that you will use in your outputs. You can use mutiple captions selectors per input." + "documentation": "Use Captions selectors (CaptionSelectors) to specify the captions data from the input that you will use in your outputs. You can use multiple captions selectors per input." }, "Crop": { "shape": "Rectangle", @@ -5905,7 +5918,7 @@ "DeblockFilter": { "shape": "InputDeblockFilter", "locationName": "deblockFilter", - "documentation": "Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. Default is disabled. Only manaully controllable for MPEG2 and uncompressed video inputs." + "documentation": "Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. Default is disabled. Only manually controllable for MPEG2 and uncompressed video inputs." }, "DenoiseFilter": { "shape": "InputDenoiseFilter", @@ -7582,7 +7595,7 @@ }, "Mpeg2FramerateControl": { "type": "string", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job sepecification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -7590,7 +7603,7 @@ }, "Mpeg2FramerateConversionAlgorithm": { "type": "string", - "documentation": "When set to INTERPOLATE, produces smoother motion during frame rate conversion.", + "documentation": "Optional. Specify how the transcoder performs framerate conversion. The default behavior is to use duplicate drop conversion.", "enum": [ "DUPLICATE_DROP", "INTERPOLATE" @@ -7628,7 +7641,7 @@ }, "Mpeg2ParControl": { "type": "string", - "documentation": "Using the API, enable ParFollowSource if you want the service to use the pixel aspect ratio from the input. Using the console, do this by choosing Follow source for Pixel aspect ratio.", + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To use a different PAR, choose (SPECIFIED). In the console, SPECIFIED corresponds to any value other than Follow source. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -7636,7 +7649,7 @@ }, "Mpeg2QualityTuningLevel": { "type": "string", - "documentation": "Use Quality tuning level (Mpeg2QualityTuningLevel) to specifiy whether to use single-pass or multipass video encoding.", + "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding.", "enum": [ "SINGLE_PASS", "MULTI_PASS" @@ -7689,12 +7702,12 @@ "FramerateControl": { "shape": "Mpeg2FramerateControl", "locationName": "framerateControl", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job sepecification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." }, "FramerateConversionAlgorithm": { "shape": "Mpeg2FramerateConversionAlgorithm", "locationName": "framerateConversionAlgorithm", - "documentation": "When set to INTERPOLATE, produces smoother motion during frame rate conversion." + "documentation": "Optional. Specify how the transcoder performs framerate conversion. The default behavior is to use duplicate drop conversion." }, "FramerateDenominator": { "shape": "__integerMin1Max1001", @@ -7759,7 +7772,7 @@ "ParControl": { "shape": "Mpeg2ParControl", "locationName": "parControl", - "documentation": "Using the API, enable ParFollowSource if you want the service to use the pixel aspect ratio from the input. Using the console, do this by choosing Follow source for Pixel aspect ratio." + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To use a different PAR, choose (SPECIFIED). In the console, SPECIFIED corresponds to any value other than Follow source. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings." }, "ParDenominator": { "shape": "__integerMin1Max2147483647", @@ -7774,7 +7787,7 @@ "QualityTuningLevel": { "shape": "Mpeg2QualityTuningLevel", "locationName": "qualityTuningLevel", - "documentation": "Use Quality tuning level (Mpeg2QualityTuningLevel) to specifiy whether to use single-pass or multipass video encoding." + "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding." }, "RateControlMode": { "shape": "Mpeg2RateControlMode", @@ -7979,6 +7992,15 @@ }, "documentation": "Settings for your Nielsen configuration. If you don't do Nielsen measurement and analytics, ignore these settings. When you enable Nielsen configuration (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs in the job. To enable Nielsen configuration programmatically, include an instance of nielsenConfiguration in your JSON job specification. Even if you don't include any children of nielsenConfiguration, you still enable the setting." }, + "NoiseFilterPostTemporalSharpening": { + "type": "string", + "documentation": "Optional. When you set Noise reducer (noiseReducer) to Temporal (TEMPORAL), you can optionally use this setting to apply additional sharpening. The default behavior, Auto (AUTO) allows the transcoder to determine whether to apply filtering, depending on input type and quality.", + "enum": [ + "DISABLED", + "ENABLED", + "AUTO" + ] + }, "NoiseReducer": { "type": "structure", "members": { @@ -8059,6 +8081,11 @@ "locationName": "aggressiveMode", "documentation": "Use Aggressive mode for content that has complex motion. Higher values produce stronger temporal filtering. This filters highly complex scenes more aggressively and creates better VQ for low bitrate outputs." }, + "PostTemporalSharpening": { + "shape": "NoiseFilterPostTemporalSharpening", + "locationName": "postTemporalSharpening", + "documentation": "Optional. When you set Noise reducer (noiseReducer) to Temporal (TEMPORAL), you can optionally use this setting to apply additional sharpening. The default behavior, Auto (AUTO) allows the transcoder to determine whether to apply filtering, depending on input type and quality." + }, "Speed": { "shape": "__integerMinNegative1Max3", "locationName": "speed", @@ -8086,6 +8113,27 @@ }, "documentation": "The resource you requested doesn't exist." }, + "OpusSettings": { + "type": "structure", + "members": { + "Bitrate": { + "shape": "__integerMin32000Max192000", + "locationName": "bitrate", + "documentation": "Optional. Specify the average bitrate in bits per second. Valid values are multiples of 8000, from 32000 through 192000. The default value is 96000, which we recommend for quality and bandwidth." + }, + "Channels": { + "shape": "__integerMin1Max2", + "locationName": "channels", + "documentation": "Specify the number of channels in this output audio track. Choosing Mono on the console gives you 1 output channel; choosing Stereo gives you 2. In the API, valid values are 1 and 2." + }, + "SampleRate": { + "shape": "__integerMin16000Max48000", + "locationName": "sampleRate", + "documentation": "Optional. Sample rate in hz. Valid values are 16000, 24000, and 48000. The default value is 48000." + } + }, + "documentation": "Required when you set Codec, under AudioDescriptions>CodecSettings, to the value OPUS." + }, "Order": { "type": "string", "documentation": "Optional. When you request lists of resources, you can specify whether they are sorted in ASCENDING or DESCENDING order. Default varies by resource.", @@ -8115,7 +8163,7 @@ "Extension": { "shape": "__string", "locationName": "extension", - "documentation": "Use Extension (Extension) to specify the file extension for outputs in File output groups. If you do not specify a value, the service will use default extensions by container type as follows * MPEG-2 transport stream, m2ts * Quicktime, mov * MXF container, mxf * MPEG-4 container, mp4 * No Container, the service will use codec extensions (e.g. AAC, H265, H265, AC3)" + "documentation": "Use Extension (Extension) to specify the file extension for outputs in File output groups. If you do not specify a value, the service will use default extensions by container type as follows * MPEG-2 transport stream, m2ts * Quicktime, mov * MXF container, mxf * MPEG-4 container, mp4 * WebM container, webm * No Container, the service will use codec extensions (e.g. AAC, H265, H265, AC3)" }, "NameModifier": { "shape": "__stringMin1", @@ -8377,7 +8425,7 @@ }, "ProresFramerateControl": { "type": "string", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job sepecification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -8385,7 +8433,7 @@ }, "ProresFramerateConversionAlgorithm": { "type": "string", - "documentation": "When set to INTERPOLATE, produces smoother motion during frame rate conversion.", + "documentation": "Optional. Specify how the transcoder performs framerate conversion. The default behavior is to use duplicate drop conversion.", "enum": [ "DUPLICATE_DROP", "INTERPOLATE" @@ -8404,7 +8452,7 @@ }, "ProresParControl": { "type": "string", - "documentation": "Use (ProresParControl) to specify how the service determines the pixel aspect ratio. Set to Follow source (INITIALIZE_FROM_SOURCE) to use the pixel aspect ratio from the input. To specify a different pixel aspect ratio: Using the console, choose it from the dropdown menu. Using the API, set ProresParControl to (SPECIFIED) and provide for (ParNumerator) and (ParDenominator).", + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To use a different PAR, choose (SPECIFIED). In the console, SPECIFIED corresponds to any value other than Follow source. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -8421,12 +8469,12 @@ "FramerateControl": { "shape": "ProresFramerateControl", "locationName": "framerateControl", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job sepecification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." }, "FramerateConversionAlgorithm": { "shape": "ProresFramerateConversionAlgorithm", "locationName": "framerateConversionAlgorithm", - "documentation": "When set to INTERPOLATE, produces smoother motion during frame rate conversion." + "documentation": "Optional. Specify how the transcoder performs framerate conversion. The default behavior is to use duplicate drop conversion." }, "FramerateDenominator": { "shape": "__integerMin1Max2147483647", @@ -8446,7 +8494,7 @@ "ParControl": { "shape": "ProresParControl", "locationName": "parControl", - "documentation": "Use (ProresParControl) to specify how the service determines the pixel aspect ratio. Set to Follow source (INITIALIZE_FROM_SOURCE) to use the pixel aspect ratio from the input. To specify a different pixel aspect ratio: Using the console, choose it from the dropdown menu. Using the API, set ProresParControl to (SPECIFIED) and provide for (ParNumerator) and (ParDenominator)." + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To use a different PAR, choose (SPECIFIED). In the console, SPECIFIED corresponds to any value other than Follow source. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings." }, "ParDenominator": { "shape": "__integerMin1Max2147483647", @@ -9340,7 +9388,9 @@ "H_264", "H_265", "MPEG2", - "PRORES" + "PRORES", + "VP8", + "VP9" ] }, "VideoCodecSettings": { @@ -9380,9 +9430,19 @@ "shape": "ProresSettings", "locationName": "proresSettings", "documentation": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value PRORES." + }, + "Vp8Settings": { + "shape": "Vp8Settings", + "locationName": "vp8Settings", + "documentation": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value VP8." + }, + "Vp9Settings": { + "shape": "Vp9Settings", + "locationName": "vp9Settings", + "documentation": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value VP9." } }, - "documentation": "Video codec settings, (CodecSettings) under (VideoDescription), contains the group of settings related to video encoding. The settings in this group vary depending on the value that you choose for Video codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * FRAME_CAPTURE, FrameCaptureSettings * AV1, Av1Settings * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings" + "documentation": "Video codec settings, (CodecSettings) under (VideoDescription), contains the group of settings related to video encoding. The settings in this group vary depending on the value that you choose for Video codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * FRAME_CAPTURE, FrameCaptureSettings * AV1, Av1Settings * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings * VP8, Vp8Settings * VP9, Vp9Settings" }, "VideoDescription": { "type": "structure", @@ -9400,7 +9460,7 @@ "CodecSettings": { "shape": "VideoCodecSettings", "locationName": "codecSettings", - "documentation": "Video codec settings, (CodecSettings) under (VideoDescription), contains the group of settings related to video encoding. The settings in this group vary depending on the value that you choose for Video codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * FRAME_CAPTURE, FrameCaptureSettings * AV1, Av1Settings * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings" + "documentation": "Video codec settings, (CodecSettings) under (VideoDescription), contains the group of settings related to video encoding. The settings in this group vary depending on the value that you choose for Video codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * FRAME_CAPTURE, FrameCaptureSettings * AV1, Av1Settings * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings * VP8, Vp8Settings * VP9, Vp9Settings" }, "ColorMetadata": { "shape": "ColorMetadata", @@ -9566,6 +9626,247 @@ "PIC_TIMING_SEI" ] }, + "VorbisSettings": { + "type": "structure", + "members": { + "Channels": { + "shape": "__integerMin1Max2", + "locationName": "channels", + "documentation": "Optional. Specify the number of channels in this output audio track. Choosing Mono on the console gives you 1 output channel; choosing Stereo gives you 2. In the API, valid values are 1 and 2. The default value is 2." + }, + "SampleRate": { + "shape": "__integerMin22050Max48000", + "locationName": "sampleRate", + "documentation": "Optional. Specify the audio sample rate in Hz. Valid values are 22050, 32000, 44100, and 48000. The default value is 48000." + }, + "VbrQuality": { + "shape": "__integerMinNegative1Max10", + "locationName": "vbrQuality", + "documentation": "Optional. Specify the variable audio quality of this Vorbis output from -1 (lowest quality, ~45 kbit/s) to 10 (highest quality, ~500 kbit/s). The default value is 4 (~128 kbit/s). Values 5 and 6 are approximately 160 and 192 kbit/s, respectively." + } + }, + "documentation": "Required when you set Codec, under AudioDescriptions>CodecSettings, to the value Vorbis." + }, + "Vp8FramerateControl": { + "type": "string", + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", + "enum": [ + "INITIALIZE_FROM_SOURCE", + "SPECIFIED" + ] + }, + "Vp8FramerateConversionAlgorithm": { + "type": "string", + "documentation": "Optional. Specify how the transcoder performs framerate conversion. The default behavior is to use Drop duplicate (DUPLICATE_DROP) conversion. When you choose Interpolate (INTERPOLATE) instead, the conversion produces smoother motion.", + "enum": [ + "DUPLICATE_DROP", + "INTERPOLATE" + ] + }, + "Vp8ParControl": { + "type": "string", + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", + "enum": [ + "INITIALIZE_FROM_SOURCE", + "SPECIFIED" + ] + }, + "Vp8QualityTuningLevel": { + "type": "string", + "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, multi-pass encoding.", + "enum": [ + "MULTI_PASS", + "MULTI_PASS_HQ" + ] + }, + "Vp8RateControlMode": { + "type": "string", + "documentation": "With the VP8 codec, you can use only the variable bitrate (VBR) rate control mode.", + "enum": [ + "VBR" + ] + }, + "Vp8Settings": { + "type": "structure", + "members": { + "Bitrate": { + "shape": "__integerMin1000Max1152000000", + "locationName": "bitrate", + "documentation": "Target bitrate in bits/second. For example, enter five megabits per second as 5000000." + }, + "FramerateControl": { + "shape": "Vp8FramerateControl", + "locationName": "framerateControl", + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." + }, + "FramerateConversionAlgorithm": { + "shape": "Vp8FramerateConversionAlgorithm", + "locationName": "framerateConversionAlgorithm", + "documentation": "Optional. Specify how the transcoder performs framerate conversion. The default behavior is to use Drop duplicate (DUPLICATE_DROP) conversion. When you choose Interpolate (INTERPOLATE) instead, the conversion produces smoother motion." + }, + "FramerateDenominator": { + "shape": "__integerMin1Max2147483647", + "locationName": "framerateDenominator", + "documentation": "When you use the API for transcode jobs that use frame rate conversion, specify the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use FramerateDenominator to specify the denominator of this fraction. In this example, use 1001 for the value of FramerateDenominator. When you use the console for transcode jobs that use frame rate conversion, provide the value as a decimal number for Framerate. In this example, specify 23.976." + }, + "FramerateNumerator": { + "shape": "__integerMin1Max2147483647", + "locationName": "framerateNumerator", + "documentation": "When you use the API for transcode jobs that use frame rate conversion, specify the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use FramerateNumerator to specify the numerator of this fraction. In this example, use 24000 for the value of FramerateNumerator. When you use the console for transcode jobs that use frame rate conversion, provide the value as a decimal number for Framerate. In this example, specify 23.976." + }, + "GopSize": { + "shape": "__doubleMin0", + "locationName": "gopSize", + "documentation": "GOP Length (keyframe interval) in frames. Must be greater than zero." + }, + "HrdBufferSize": { + "shape": "__integerMin0Max47185920", + "locationName": "hrdBufferSize", + "documentation": "Optional. Size of buffer (HRD buffer model) in bits. For example, enter five megabits as 5000000." + }, + "MaxBitrate": { + "shape": "__integerMin1000Max1152000000", + "locationName": "maxBitrate", + "documentation": "Ignore this setting unless you set qualityTuningLevel to MULTI_PASS. Optional. Specify the maximum bitrate in bits/second. For example, enter five megabits per second as 5000000. The default behavior uses twice the target bitrate as the maximum bitrate." + }, + "ParControl": { + "shape": "Vp8ParControl", + "locationName": "parControl", + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings." + }, + "ParDenominator": { + "shape": "__integerMin1Max2147483647", + "locationName": "parDenominator", + "documentation": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33." + }, + "ParNumerator": { + "shape": "__integerMin1Max2147483647", + "locationName": "parNumerator", + "documentation": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40." + }, + "QualityTuningLevel": { + "shape": "Vp8QualityTuningLevel", + "locationName": "qualityTuningLevel", + "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, multi-pass encoding." + }, + "RateControlMode": { + "shape": "Vp8RateControlMode", + "locationName": "rateControlMode", + "documentation": "With the VP8 codec, you can use only the variable bitrate (VBR) rate control mode." + } + }, + "documentation": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value VP8." + }, + "Vp9FramerateControl": { + "type": "string", + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", + "enum": [ + "INITIALIZE_FROM_SOURCE", + "SPECIFIED" + ] + }, + "Vp9FramerateConversionAlgorithm": { + "type": "string", + "documentation": "Optional. Specify how the transcoder performs framerate conversion. The default behavior is to use Drop duplicate (DUPLICATE_DROP) conversion. When you choose Interpolate (INTERPOLATE) instead, the conversion produces smoother motion.", + "enum": [ + "DUPLICATE_DROP", + "INTERPOLATE" + ] + }, + "Vp9ParControl": { + "type": "string", + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", + "enum": [ + "INITIALIZE_FROM_SOURCE", + "SPECIFIED" + ] + }, + "Vp9QualityTuningLevel": { + "type": "string", + "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, multi-pass encoding.", + "enum": [ + "MULTI_PASS", + "MULTI_PASS_HQ" + ] + }, + "Vp9RateControlMode": { + "type": "string", + "documentation": "With the VP9 codec, you can use only the variable bitrate (VBR) rate control mode.", + "enum": [ + "VBR" + ] + }, + "Vp9Settings": { + "type": "structure", + "members": { + "Bitrate": { + "shape": "__integerMin1000Max480000000", + "locationName": "bitrate", + "documentation": "Target bitrate in bits/second. For example, enter five megabits per second as 5000000." + }, + "FramerateControl": { + "shape": "Vp9FramerateControl", + "locationName": "framerateControl", + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." + }, + "FramerateConversionAlgorithm": { + "shape": "Vp9FramerateConversionAlgorithm", + "locationName": "framerateConversionAlgorithm", + "documentation": "Optional. Specify how the transcoder performs framerate conversion. The default behavior is to use Drop duplicate (DUPLICATE_DROP) conversion. When you choose Interpolate (INTERPOLATE) instead, the conversion produces smoother motion." + }, + "FramerateDenominator": { + "shape": "__integerMin1Max2147483647", + "locationName": "framerateDenominator", + "documentation": "When you use the API for transcode jobs that use frame rate conversion, specify the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use FramerateDenominator to specify the denominator of this fraction. In this example, use 1001 for the value of FramerateDenominator. When you use the console for transcode jobs that use frame rate conversion, provide the value as a decimal number for Framerate. In this example, specify 23.976." + }, + "FramerateNumerator": { + "shape": "__integerMin1Max2147483647", + "locationName": "framerateNumerator", + "documentation": "When you use the API for transcode jobs that use frame rate conversion, specify the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use FramerateNumerator to specify the numerator of this fraction. In this example, use 24000 for the value of FramerateNumerator. When you use the console for transcode jobs that use frame rate conversion, provide the value as a decimal number for Framerate. In this example, specify 23.976." + }, + "GopSize": { + "shape": "__doubleMin0", + "locationName": "gopSize", + "documentation": "GOP Length (keyframe interval) in frames. Must be greater than zero." + }, + "HrdBufferSize": { + "shape": "__integerMin0Max47185920", + "locationName": "hrdBufferSize", + "documentation": "Size of buffer (HRD buffer model) in bits. For example, enter five megabits as 5000000." + }, + "MaxBitrate": { + "shape": "__integerMin1000Max480000000", + "locationName": "maxBitrate", + "documentation": "Ignore this setting unless you set qualityTuningLevel to MULTI_PASS. Optional. Specify the maximum bitrate in bits/second. For example, enter five megabits per second as 5000000. The default behavior uses twice the target bitrate as the maximum bitrate." + }, + "ParControl": { + "shape": "Vp9ParControl", + "locationName": "parControl", + "documentation": "Optional. Specify how the service determines the pixel aspect ratio for this output. The default behavior is to use the same pixel aspect ratio as your input video." + }, + "ParDenominator": { + "shape": "__integerMin1Max2147483647", + "locationName": "parDenominator", + "documentation": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33." + }, + "ParNumerator": { + "shape": "__integerMin1Max2147483647", + "locationName": "parNumerator", + "documentation": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40." + }, + "QualityTuningLevel": { + "shape": "Vp9QualityTuningLevel", + "locationName": "qualityTuningLevel", + "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, multi-pass encoding." + }, + "RateControlMode": { + "shape": "Vp9RateControlMode", + "locationName": "rateControlMode", + "documentation": "With the VP9 codec, you can use only the variable bitrate (VBR) rate control mode." + } + }, + "documentation": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value VP9." + }, "WavFormat": { "type": "string", "documentation": "The service defaults to using RIFF for WAV outputs. If your output audio is likely to exceed 4 GB in file size, or if you otherwise need the extended support of the RF64 format, set your output WAV file format to RF64.", @@ -9790,6 +10091,11 @@ "min": 1000, "max": 300000000 }, + "__integerMin1000Max480000000": { + "type": "integer", + "min": 1000, + "max": 480000000 + }, "__integerMin10Max48": { "type": "integer", "min": 10, @@ -9800,6 +10106,11 @@ "min": 16000, "max": 320000 }, + "__integerMin16000Max48000": { + "type": "integer", + "min": 16000, + "max": 48000 + }, "__integerMin16Max24": { "type": "integer", "min": 16, @@ -9910,6 +10221,11 @@ "min": 2, "max": 2147483647 }, + "__integerMin32000Max192000": { + "type": "integer", + "min": 32000, + "max": 192000 + }, "__integerMin32000Max384000": { "type": "integer", "min": 32000, @@ -9980,6 +10296,11 @@ "min": -180, "max": 180 }, + "__integerMinNegative1Max10": { + "type": "integer", + "min": -1, + "max": 10 + }, "__integerMinNegative1Max3": { "type": "integer", "min": -1, @@ -10406,14 +10727,14 @@ "type": "string", "pattern": "^s3:\\/\\/.*\\/(ASSETMAP.xml)?$" }, - "__stringPatternS3MM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEEHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEE": { - "type": "string", - "pattern": "^((s3://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[aA][aA][cC]|[aA][iI][fF][fF]|[mM][pP]2|[aA][cC]3|[eE][cC]3|[dD][tT][sS][eE]))))|(https?://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[aA][aA][cC]|[aA][iI][fF][fF]|[mM][pP]2|[aA][cC]3|[eE][cC]3|[dD][tT][sS][eE])))(\\?([^&=]+=[^&]+&)*[^&=]+=[^&]+)?))$" - }, "__stringPatternS3MM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLLHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLL": { "type": "string", "pattern": "^((s3://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[wW][eE][bB][mM]|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[xX][mM][lL]))))|(https?://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[wW][eE][bB][mM]|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[xX][mM][lL])))(\\?([^&=]+=[^&]+&)*[^&=]+=[^&]+)?))$" }, + "__stringPatternS3WWEEBBMMMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEEHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEE": { + "type": "string", + "pattern": "^((s3://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([wW][eE][bB][mM]|[mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[aA][aA][cC]|[aA][iI][fF][fF]|[mM][pP]2|[aA][cC]3|[eE][cC]3|[dD][tT][sS][eE]))))|(https?://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[aA][aA][cC]|[aA][iI][fF][fF]|[mM][pP]2|[aA][cC]3|[eE][cC]3|[dD][tT][sS][eE])))(\\?([^&=]+=[^&]+&)*[^&=]+=[^&]+)?))$" + }, "__stringPatternSNManifestConfirmConditionNotificationNS": { "type": "string", "pattern": "^\\s*<(.|\\n)*ManifestConfirmConditionNotification(.|\\n)*>\\s*$" diff --git a/botocore/data/mediapackage-vod/2018-11-07/service-2.json b/botocore/data/mediapackage-vod/2018-11-07/service-2.json index 8e99301c..70c31d3c 100644 --- a/botocore/data/mediapackage-vod/2018-11-07/service-2.json +++ b/botocore/data/mediapackage-vod/2018-11-07/service-2.json @@ -446,7 +446,7 @@ } }, "ListTagsForResource": { - "documentation": "List tags for a given MediaPackage VOD resource", + "documentation": "Returns a list of the tags assigned to the specified resource.", "errors": [], "http": { "method": "GET", @@ -463,7 +463,7 @@ } }, "TagResource": { - "documentation": "Set tags for a given MediaPackage VOD resource", + "documentation": "Adds tags to the specified resource. You can specify one or more tags to add.", "errors": [], "http": { "method": "POST", @@ -476,7 +476,7 @@ "name": "TagResource" }, "UntagResource": { - "documentation": "Delete tags for a given MediaPackage VOD resource", + "documentation": "Removes tags from the specified resource. You can specify one or more tags to remove.", "errors": [], "http": { "method": "DELETE", @@ -487,6 +487,42 @@ "shape": "UntagResourceRequest" }, "name": "UntagResource" + }, + "UpdatePackagingGroup": { + "documentation": "Updates a specific packaging group. You can't change the id attribute or any other system-generated attributes.", + "errors": [ + { + "shape": "UnprocessableEntityException" + }, + { + "shape": "InternalServerErrorException" + }, + { + "shape": "ForbiddenException" + }, + { + "shape": "NotFoundException" + }, + { + "shape": "ServiceUnavailableException" + }, + { + "shape": "TooManyRequestsException" + } + ], + "http": { + "method": "PUT", + "requestUri": "/packaging_groups/{id}", + "responseCode": 200 + }, + "input": { + "shape": "UpdatePackagingGroupRequest" + }, + "name": "UpdatePackagingGroup", + "output": { + "documentation": "The updated MediaPackage VOD PackagingGroup resource.", + "shape": "UpdatePackagingGroupResponse" + } } }, "shapes": { @@ -650,6 +686,26 @@ }, "type": "structure" }, + "Authorization": { + "documentation": "CDN Authorization credentials", + "members": { + "CdnIdentifierSecret": { + "documentation": "The Amazon Resource Name (ARN) for the secret in AWS Secrets Manager that is used for CDN authorization.", + "locationName": "cdnIdentifierSecret", + "shape": "__string" + }, + "SecretsRoleArn": { + "documentation": "The Amazon Resource Name (ARN) for the IAM role that allows MediaPackage to communicate with AWS Secrets Manager.", + "locationName": "secretsRoleArn", + "shape": "__string" + } + }, + "required": [ + "SecretsRoleArn", + "CdnIdentifierSecret" + ], + "type": "structure" + }, "CmafEncryption": { "documentation": "A CMAF encryption configuration.", "members": { @@ -859,6 +915,10 @@ "CreatePackagingGroupRequest": { "documentation": "A new MediaPackage VOD PackagingGroup resource configuration.", "members": { + "Authorization": { + "locationName": "authorization", + "shape": "Authorization" + }, "Id": { "documentation": "The ID of the PackagingGroup.", "locationName": "id", @@ -881,6 +941,10 @@ "locationName": "arn", "shape": "__string" }, + "Authorization": { + "locationName": "authorization", + "shape": "Authorization" + }, "DomainName": { "documentation": "The fully qualified domain name for Assets in the PackagingGroup.", "locationName": "domainName", @@ -1166,6 +1230,10 @@ "locationName": "arn", "shape": "__string" }, + "Authorization": { + "locationName": "authorization", + "shape": "Authorization" + }, "DomainName": { "documentation": "The fully qualified domain name for Assets in the PackagingGroup.", "locationName": "domainName", @@ -1431,6 +1499,7 @@ "ListTagsForResourceRequest": { "members": { "ResourceArn": { + "documentation": "The Amazon Resource Name (ARN) for the resource. You can get this from the response to any request to the resource.", "location": "uri", "locationName": "resource-arn", "shape": "__string" @@ -1444,6 +1513,7 @@ "ListTagsForResourceResponse": { "members": { "Tags": { + "documentation": "A collection of tags associated with a resource", "locationName": "tags", "shape": "__mapOf__string" } @@ -1632,6 +1702,10 @@ "locationName": "arn", "shape": "__string" }, + "Authorization": { + "locationName": "authorization", + "shape": "Authorization" + }, "DomainName": { "documentation": "The fully qualified domain name for Assets in the PackagingGroup.", "locationName": "domainName", @@ -1652,6 +1726,10 @@ "PackagingGroupCreateParameters": { "documentation": "Parameters used to create a new MediaPackage VOD PackagingGroup resource.", "members": { + "Authorization": { + "locationName": "authorization", + "shape": "Authorization" + }, "Id": { "documentation": "The ID of the PackagingGroup.", "locationName": "id", @@ -1683,6 +1761,16 @@ }, "type": "structure" }, + "PackagingGroupUpdateParameters": { + "documentation": "Parameters used to update a MediaPackage packaging group.", + "members": { + "Authorization": { + "locationName": "authorization", + "shape": "Authorization" + } + }, + "type": "structure" + }, "Profile": { "enum": [ "NONE", @@ -1770,11 +1858,13 @@ "TagResourceRequest": { "members": { "ResourceArn": { + "documentation": "The Amazon Resource Name (ARN) for the resource. You can get this from the response to any request to the resource.", "location": "uri", "locationName": "resource-arn", "shape": "__string" }, "Tags": { + "documentation": "A collection of tags associated with a resource", "locationName": "tags", "shape": "__mapOf__string" } @@ -1798,6 +1888,7 @@ "TagsModel": { "members": { "Tags": { + "documentation": "A collection of tags associated with a resource", "locationName": "tags", "shape": "__mapOf__string" } @@ -1838,12 +1929,13 @@ "UntagResourceRequest": { "members": { "ResourceArn": { + "documentation": "The Amazon Resource Name (ARN) for the resource. You can get this from the response to any request to the resource.", "location": "uri", "locationName": "resource-arn", "shape": "__string" }, "TagKeys": { - "documentation": "The key(s) of tag to be deleted", + "documentation": "A comma-separated list of the tag keys to remove from the resource.", "location": "querystring", "locationName": "tagKeys", "shape": "__listOf__string" @@ -1855,6 +1947,53 @@ ], "type": "structure" }, + "UpdatePackagingGroupRequest": { + "documentation": "A MediaPackage VOD PackagingGroup resource configuration.", + "members": { + "Authorization": { + "locationName": "authorization", + "shape": "Authorization" + }, + "Id": { + "documentation": "The ID of a MediaPackage VOD PackagingGroup resource.", + "location": "uri", + "locationName": "id", + "shape": "__string" + } + }, + "required": [ + "Id" + ], + "type": "structure" + }, + "UpdatePackagingGroupResponse": { + "members": { + "Arn": { + "documentation": "The ARN of the PackagingGroup.", + "locationName": "arn", + "shape": "__string" + }, + "Authorization": { + "locationName": "authorization", + "shape": "Authorization" + }, + "DomainName": { + "documentation": "The fully qualified domain name for Assets in the PackagingGroup.", + "locationName": "domainName", + "shape": "__string" + }, + "Id": { + "documentation": "The ID of the PackagingGroup.", + "locationName": "id", + "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "Tags" + } + }, + "type": "structure" + }, "__PeriodTriggersElement": { "enum": [ "ADS" diff --git a/botocore/data/meteringmarketplace/2016-01-14/service-2.json b/botocore/data/meteringmarketplace/2016-01-14/service-2.json index 4b72bca7..d613dc33 100644 --- a/botocore/data/meteringmarketplace/2016-01-14/service-2.json +++ b/botocore/data/meteringmarketplace/2016-01-14/service-2.json @@ -442,5 +442,5 @@ }, "errorMessage":{"type":"string"} }, - "documentation":"AWS Marketplace Metering Service

This reference provides descriptions of the low-level AWS Marketplace Metering Service API.

AWS Marketplace sellers can use this API to submit usage data for custom usage dimensions.

Submitting Metering Records

Accepting New Customers

Entitlement and Metering for Paid Container Products

BatchMeterUsage API calls are captured by AWS CloudTrail. You can use Cloudtrail to verify that the SaaS metering records that you sent are accurate by searching for records with the eventName of BatchMeterUsage. You can also use CloudTrail to audit records over time. For more information, see the AWS CloudTrail User Guide .

" + "documentation":"AWS Marketplace Metering Service

This reference provides descriptions of the low-level AWS Marketplace Metering Service API.

AWS Marketplace sellers can use this API to submit usage data for custom usage dimensions.

For information on the permissions you need to use this API, see AWS Marketing metering and entitlement API permissions in the AWS Marketplace Seller Guide.

Submitting Metering Records

Accepting New Customers

Entitlement and Metering for Paid Container Products

BatchMeterUsage API calls are captured by AWS CloudTrail. You can use Cloudtrail to verify that the SaaS metering records that you sent are accurate by searching for records with the eventName of BatchMeterUsage. You can also use CloudTrail to audit records over time. For more information, see the AWS CloudTrail User Guide .

" } diff --git a/botocore/data/personalize-runtime/2018-05-22/service-2.json b/botocore/data/personalize-runtime/2018-05-22/service-2.json index 6e9b32c4..789470f0 100644 --- a/botocore/data/personalize-runtime/2018-05-22/service-2.json +++ b/botocore/data/personalize-runtime/2018-05-22/service-2.json @@ -124,6 +124,10 @@ "context":{ "shape":"Context", "documentation":"

The contextual metadata to use when getting recommendations. Contextual metadata includes any interaction information that might be relevant when getting a user's recommendations, such as the user's current location or device type.

" + }, + "filterArn":{ + "shape":"Arn", + "documentation":"

The ARN of the filter to apply to the returned recommendations. For more information, see Using Filters with Amazon Personalize.

" } } }, @@ -170,7 +174,7 @@ }, "score":{ "shape":"Score", - "documentation":"

A numeric representation of the model's certainty in the item's suitability. For more information on scoring logic, see how-scores-work.

" + "documentation":"

A numeric representation of the model's certainty that the item will be the next user selection. For more information on scoring logic, see how-scores-work.

" } }, "documentation":"

An object that identifies an item.

The and APIs return a list of PredictedItems.

" diff --git a/botocore/data/personalize/2018-05-22/service-2.json b/botocore/data/personalize/2018-05-22/service-2.json index f9d3a0bb..200ea427 100644 --- a/botocore/data/personalize/2018-05-22/service-2.json +++ b/botocore/data/personalize/2018-05-22/service-2.json @@ -116,6 +116,22 @@ "documentation":"

Creates an event tracker that you use when sending event data to the specified dataset group using the PutEvents API.

When Amazon Personalize creates an event tracker, it also creates an event-interactions dataset in the dataset group associated with the event tracker. The event-interactions dataset stores the event data from the PutEvents call. The contents of this dataset are not available to the user.

Only one event tracker can be associated with a dataset group. You will get an error if you call CreateEventTracker using the same dataset group as an existing event tracker.

When you send event data you include your tracking ID. The tracking ID identifies the customer and authorizes the customer to send the data.

The event tracker can be in one of the following states:

To get the status of the event tracker, call DescribeEventTracker.

The event tracker must be in the ACTIVE state before using the tracking ID.

Related APIs

", "idempotent":true }, + "CreateFilter":{ + "name":"CreateFilter", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateFilterRequest"}, + "output":{"shape":"CreateFilterResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Creates a recommendation filter. For more information, see Using Filters with Amazon Personalize.

" + }, "CreateSchema":{ "name":"CreateSchema", "http":{ @@ -224,6 +240,19 @@ "documentation":"

Deletes the event tracker. Does not delete the event-interactions dataset from the associated dataset group. For more information on event trackers, see CreateEventTracker.

", "idempotent":true }, + "DeleteFilter":{ + "name":"DeleteFilter", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteFilterRequest"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Deletes a filter.

" + }, "DeleteSchema":{ "name":"DeleteSchema", "http":{ @@ -374,6 +403,21 @@ "documentation":"

Describes the given feature transformation.

", "idempotent":true }, + "DescribeFilter":{ + "name":"DescribeFilter", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFilterRequest"}, + "output":{"shape":"DescribeFilterResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Describes a filter's properties.

", + "idempotent":true + }, "DescribeRecipe":{ "name":"DescribeRecipe", "http":{ @@ -538,6 +582,21 @@ "documentation":"

Returns the list of event trackers associated with the account. The response provides the properties for each event tracker, including the Amazon Resource Name (ARN) and tracking ID. For more information on event trackers, see CreateEventTracker.

", "idempotent":true }, + "ListFilters":{ + "name":"ListFilters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListFiltersRequest"}, + "output":{"shape":"ListFiltersResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"InvalidNextTokenException"} + ], + "documentation":"

Lists all filters that belong to a given dataset group.

", + "idempotent":true + }, "ListRecipes":{ "name":"ListRecipes", "http":{ @@ -729,6 +788,10 @@ "shape":"Arn", "documentation":"

The Amazon Resource Name (ARN) of the batch inference job.

" }, + "filterArn":{ + "shape":"Arn", + "documentation":"

The ARN of the filter used on the batch inference job.

" + }, "failureReason":{ "shape":"FailureReason", "documentation":"

If the batch inference job failed, the reason for the failure.

" @@ -1011,6 +1074,10 @@ "shape":"Arn", "documentation":"

The Amazon Resource Name (ARN) of the solution version that will be used to generate the batch inference recommendations.

" }, + "filterArn":{ + "shape":"Arn", + "documentation":"

The ARN of the filter to apply to the batch inference job. For more information on using filters, see Using Filters with Amazon Personalize.

" + }, "numResults":{ "shape":"NumBatchResults", "documentation":"

The number of recommendations to retreive.

" @@ -1198,6 +1265,37 @@ } } }, + "CreateFilterRequest":{ + "type":"structure", + "required":[ + "name", + "datasetGroupArn", + "filterExpression" + ], + "members":{ + "name":{ + "shape":"Name", + "documentation":"

The name of the filter to create.

" + }, + "datasetGroupArn":{ + "shape":"Arn", + "documentation":"

The ARN of the dataset group that the filter will belong to.

" + }, + "filterExpression":{ + "shape":"FilterExpression", + "documentation":"

The filter expression that designates the interaction types that the filter will filter out. A filter expression must follow the following format:

EXCLUDE itemId WHERE INTERACTIONS.event_type in (\"EVENT_TYPE\")

Where \"EVENT_TYPE\" is the type of event to filter out. To filter out all items with any interactions history, set \"*\" as the EVENT_TYPE. For more information, see Using Filters with Amazon Personalize.

" + } + } + }, + "CreateFilterResponse":{ + "type":"structure", + "members":{ + "filterArn":{ + "shape":"Arn", + "documentation":"

The ARN of the new filter.

" + } + } + }, "CreateSchemaRequest":{ "type":"structure", "required":[ @@ -1714,6 +1812,16 @@ } } }, + "DeleteFilterRequest":{ + "type":"structure", + "required":["filterArn"], + "members":{ + "filterArn":{ + "shape":"Arn", + "documentation":"

The ARN of the filter to delete.

" + } + } + }, "DeleteSchemaRequest":{ "type":"structure", "required":["schemaArn"], @@ -1886,6 +1994,25 @@ } } }, + "DescribeFilterRequest":{ + "type":"structure", + "required":["filterArn"], + "members":{ + "filterArn":{ + "shape":"Arn", + "documentation":"

The ARN of the filter to describe.

" + } + } + }, + "DescribeFilterResponse":{ + "type":"structure", + "members":{ + "filter":{ + "shape":"Filter", + "documentation":"

The filter's details.

" + } + } + }, "DescribeRecipeRequest":{ "type":"structure", "required":["recipeArn"], @@ -2088,6 +2215,89 @@ "value":{"shape":"ParameterValue"}, "max":100 }, + "Filter":{ + "type":"structure", + "members":{ + "name":{ + "shape":"Name", + "documentation":"

The name of the filter.

" + }, + "filterArn":{ + "shape":"Arn", + "documentation":"

The ARN of the filter.

" + }, + "creationDateTime":{ + "shape":"Date", + "documentation":"

The time at which the filter was created.

" + }, + "lastUpdatedDateTime":{ + "shape":"Date", + "documentation":"

The time at which the filter was last updated.

" + }, + "datasetGroupArn":{ + "shape":"Arn", + "documentation":"

The ARN of the dataset group to which the filter belongs.

" + }, + "failureReason":{ + "shape":"FailureReason", + "documentation":"

If the filter failed, the reason for its failure.

" + }, + "filterExpression":{ + "shape":"FilterExpression", + "documentation":"

Specifies the type of item interactions to filter out of recommendation results. The filter expression must follow the following format:

EXCLUDE itemId WHERE INTERACTIONS.event_type in (\"EVENT_TYPE\")

Where \"EVENT_TYPE\" is the type of event to filter out. For more information, see Using Filters with Amazon Personalize.

" + }, + "status":{ + "shape":"Status", + "documentation":"

The status of the filter.

" + } + }, + "documentation":"

Contains information on a recommendation filter, including its ARN, status, and filter expression.

" + }, + "FilterExpression":{ + "type":"string", + "max":2500, + "min":1, + "sensitive":true + }, + "FilterSummary":{ + "type":"structure", + "members":{ + "name":{ + "shape":"Name", + "documentation":"

The name of the filter.

" + }, + "filterArn":{ + "shape":"Arn", + "documentation":"

The ARN of the filter.

" + }, + "creationDateTime":{ + "shape":"Date", + "documentation":"

The time at which the filter was created.

" + }, + "lastUpdatedDateTime":{ + "shape":"Date", + "documentation":"

The time at which the filter was last updated.

" + }, + "datasetGroupArn":{ + "shape":"Arn", + "documentation":"

The ARN of the dataset group to which the filter belongs.

" + }, + "failureReason":{ + "shape":"FailureReason", + "documentation":"

If the filter failed, the reason for the failure.

" + }, + "status":{ + "shape":"Status", + "documentation":"

The status of the filter.

" + } + }, + "documentation":"

A short summary of a filter's attributes.

" + }, + "Filters":{ + "type":"list", + "member":{"shape":"FilterSummary"}, + "max":100 + }, "GetSolutionMetricsRequest":{ "type":"structure", "required":["solutionVersionArn"], @@ -2425,6 +2635,36 @@ } } }, + "ListFiltersRequest":{ + "type":"structure", + "members":{ + "datasetGroupArn":{ + "shape":"Arn", + "documentation":"

The ARN of the dataset group that contains the filters.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

A token returned from the previous call to ListFilters for getting the next set of filters (if they exist).

" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of filters to return.

" + } + } + }, + "ListFiltersResponse":{ + "type":"structure", + "members":{ + "Filters":{ + "shape":"Filters", + "documentation":"

A list of returned filters.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

A token for getting the next set of filters (if they exist).

" + } + } + }, "ListRecipesRequest":{ "type":"structure", "members":{ diff --git a/botocore/data/pinpoint/2016-12-01/service-2.json b/botocore/data/pinpoint/2016-12-01/service-2.json index 65366d79..018e0faf 100644 --- a/botocore/data/pinpoint/2016-12-01/service-2.json +++ b/botocore/data/pinpoint/2016-12-01/service-2.json @@ -5692,6 +5692,10 @@ "Activity": { "type": "structure", "members": { + "CUSTOM": { + "shape": "CustomMessageActivity", + "documentation": "

The settings for a custom message activity. This type of activity calls an AWS Lambda function or web hook that sends messages to participants.

" + }, "ConditionalSplit": { "shape": "ConditionalSplitActivity", "documentation": "

The settings for a yes/no split activity. This type of activity sends participants down one of two paths in a journey, based on conditions that you specify.

" @@ -5712,10 +5716,18 @@ "shape": "MultiConditionalSplitActivity", "documentation": "

The settings for a multivariate split activity. This type of activity sends participants down one of as many as five paths (including a default Else path) in a journey, based on conditions that you specify.

" }, + "PUSH": { + "shape": "PushMessageActivity", + "documentation": "

The settings for a push notification activity. This type of activity sends a push notification to participants.

" + }, "RandomSplit": { "shape": "RandomSplitActivity", "documentation": "

The settings for a random split activity. This type of activity randomly sends specified percentages of participants down one of as many as five paths in a journey, based on conditions that you specify.

" }, + "SMS": { + "shape": "SMSMessageActivity", + "documentation": "

The settings for an SMS activity. This type of activity sends a text message to participants.

" + }, "Wait": { "shape": "WaitActivity", "documentation": "

The settings for a wait activity. This type of activity waits for a certain amount of time or until a specific date and time before moving participants to the next activity in a journey.

" @@ -6434,7 +6446,7 @@ }, "MessageType": { "shape": "MessageType", - "documentation": "

The type of SMS message. Valid values are: TRANSACTIONAL, the message is critical or time-sensitive, such as a one-time password that supports a customer transaction; and, PROMOTIONAL, the message isn't critical or time-sensitive, such as a marketing message.

" + "documentation": "

The SMS message type. Valid values are TRANSACTIONAL (for messages that are critical or time-sensitive, such as a one-time passwords) and PROMOTIONAL (for messsages that aren't critical or time-sensitive, such as marketing messages).

" }, "SenderId": { "shape": "__string", @@ -6526,6 +6538,7 @@ "ChannelType": { "type": "string", "enum": [ + "PUSH", "GCM", "APNS", "APNS_SANDBOX", @@ -7014,6 +7027,36 @@ "DeliveryUri" ] }, + "CustomMessageActivity": { + "type": "structure", + "members": { + "DeliveryUri": { + "shape": "__string", + "documentation": "

The destination to send the custom message to. This value can be one of the following:

" + }, + "EndpointTypes": { + "shape": "ListOf__EndpointTypesElement", + "documentation": "

The types of endpoints to send the custom message to. Each valid value maps to a type of channel that you can associate with an endpoint by using the ChannelType property of an endpoint.

" + }, + "MessageConfig": { + "shape": "JourneyCustomMessage", + "documentation": "

Specifies the message data included in a custom channel message that's sent to participants in a journey.

" + }, + "NextActivity": { + "shape": "__string", + "documentation": "

The unique identifier for the next activity to perform, after Amazon Pinpoint calls the AWS Lambda function or web hook.

" + }, + "TemplateName": { + "shape": "__string", + "documentation": "

The name of the custom message template to use for the message. If specified, this value must match the name of an existing message template.

" + }, + "TemplateVersion": { + "shape": "__string", + "documentation": "

The unique identifier for the version of the message template to use for the message. If specified, this value must match the identifier for an existing template version. To retrieve a list of versions and version identifiers for a template, use the Template Versions resource.

If you don't specify a value for this property, Amazon Pinpoint uses the active version of the template. The active version is typically the version of a template that's been most recently reviewed and approved for use, depending on your workflow. It isn't necessarily the latest version of a template.

" + } + }, + "documentation": "

The settings for a custom message activity. This type of activity calls an AWS Lambda function or web hook that sends messages to participants.

" + }, "DefaultMessage": { "type": "structure", "members": { @@ -7927,7 +7970,7 @@ "members": { "MessageConfig": { "shape": "JourneyEmailMessage", - "documentation": "

The \"From\" address to use for the message.

" + "documentation": "

Specifies the sender address for an email message that's sent to participants in the journey.

" }, "NextActivity": { "shape": "__string", @@ -7935,7 +7978,7 @@ }, "TemplateName": { "shape": "__string", - "documentation": "

The name of the email template to use for the message.

" + "documentation": "

The name of the email message template to use for the message. If specified, this value must match the name of an existing message template.

" }, "TemplateVersion": { "shape": "__string", @@ -10712,6 +10755,16 @@ "FAILED" ] }, + "JourneyCustomMessage": { + "type": "structure", + "members": { + "Data": { + "shape": "__string", + "documentation": "

The message content that's passed to an AWS Lambda function or to a web hook.

" + } + }, + "documentation": "

Specifies the message content for a custom channel message that's sent to participants in a journey.

" + }, "JourneyDateRangeKpiResponse": { "type": "structure", "members": { @@ -10848,6 +10901,16 @@ }, "documentation": "

Specifies limits on the messages that a journey can send and the number of times participants can enter a journey.

" }, + "JourneyPushMessage": { + "type": "structure", + "members": { + "TimeToLive": { + "shape": "__string", + "documentation": "

The number of seconds that the push notification service should keep the message, if the service is unable to deliver the notification the first time. This value is converted to an expiration value when it's sent to a push-notification service. If this value is 0, the service treats the notification as if it expires immediately and the service doesn't store or try to deliver the notification again.

This value doesn't apply to messages that are sent through the Amazon Device Messaging (ADM) service.

" + } + }, + "documentation": "

Specifies the message configuration for a push notification that's sent to participants in a journey.

" + }, "JourneyResponse": { "type": "structure", "members": { @@ -10920,6 +10983,20 @@ "ApplicationId" ] }, + "JourneySMSMessage": { + "type": "structure", + "members": { + "MessageType": { + "shape": "MessageType", + "documentation": "

The SMS message type. Valid values are TRANSACTIONAL (for messages that are critical or time-sensitive, such as a one-time passwords) and PROMOTIONAL (for messsages that aren't critical or time-sensitive, such as marketing messages).

" + }, + "SenderId": { + "shape": "__string", + "documentation": "

The sender ID to display as the sender of the message on a recipient's device. Support for sender IDs varies by country or region. For more information, see Supported Countries and Regions in the Amazon Pinpoint User Guide.

" + } + }, + "documentation": "

Specifies the sender ID and message type for an SMS message that's sent to participants in a journey.

" + }, "JourneySchedule": { "type": "structure", "members": { @@ -11601,6 +11678,28 @@ }, "documentation": "

Specifies the properties and attributes of an endpoint that's associated with an event.

" }, + "PushMessageActivity": { + "type": "structure", + "members": { + "MessageConfig": { + "shape": "JourneyPushMessage", + "documentation": "

Specifies the time to live (TTL) value for push notifications that are sent to participants in a journey.

" + }, + "NextActivity": { + "shape": "__string", + "documentation": "

The unique identifier for the next activity to perform, after the message is sent.

" + }, + "TemplateName": { + "shape": "__string", + "documentation": "

The name of the push notification template to use for the message. If specified, this value must match the name of an existing message template.

" + }, + "TemplateVersion": { + "shape": "__string", + "documentation": "

The unique identifier for the version of the push notification template to use for the message. If specified, this value must match the identifier for an existing template version. To retrieve a list of versions and version identifiers for a template, use the Template Versions resource.

If you don't specify a value for this property, Amazon Pinpoint uses the active version of the template. The active version is typically the version of a template that's been most recently reviewed and approved for use, depending on your workflow. It isn't necessarily the latest version of a template.

" + } + }, + "documentation": "

Specifies the settings for a push notification activity in a journey. This type of activity sends a push notification to participants.

" + }, "PushNotificationTemplateRequest": { "type": "structure", "members": { @@ -12095,7 +12194,7 @@ }, "MessageType": { "shape": "MessageType", - "documentation": "

The SMS message type. Valid values are: TRANSACTIONAL, the message is critical or time-sensitive, such as a one-time password that supports a customer transaction; and, PROMOTIONAL, the message is not critical or time-sensitive, such as a marketing message.

" + "documentation": "

The SMS message type. Valid values are TRANSACTIONAL (for messages that are critical or time-sensitive, such as a one-time passwords) and PROMOTIONAL (for messsages that aren't critical or time-sensitive, such as marketing messages).

" }, "OriginationNumber": { "shape": "__string", @@ -12112,6 +12211,28 @@ }, "documentation": "

Specifies the default settings for a one-time SMS message that's sent directly to an endpoint.

" }, + "SMSMessageActivity": { + "type": "structure", + "members": { + "MessageConfig": { + "shape": "JourneySMSMessage", + "documentation": "

Specifies the sender ID and message type for an SMS message that's sent to participants in a journey.

" + }, + "NextActivity": { + "shape": "__string", + "documentation": "

The unique identifier for the next activity to perform, after the message is sent.

" + }, + "TemplateName": { + "shape": "__string", + "documentation": "

The name of the SMS message template to use for the message. If specified, this value must match the name of an existing message template.

" + }, + "TemplateVersion": { + "shape": "__string", + "documentation": "

The unique identifier for the version of the SMS template to use for the message. If specified, this value must match the identifier for an existing template version. To retrieve a list of versions and version identifiers for a template, use the Template Versions resource.

If you don't specify a value for this property, Amazon Pinpoint uses the active version of the template. The active version is typically the version of a template that's been most recently reviewed and approved for use, depending on your workflow. It isn't necessarily the latest version of a template.

" + } + }, + "documentation": "

Specifies the settings for an SMS activity in a journey. This type of activity sends a text message to participants.

" + }, "SMSTemplateRequest": { "type": "structure", "members": { @@ -14352,6 +14473,7 @@ "__EndpointTypesElement": { "type": "string", "enum": [ + "PUSH", "GCM", "APNS", "APNS_SANDBOX", diff --git a/botocore/data/polly/2016-06-10/service-2.json b/botocore/data/polly/2016-06-10/service-2.json index a3c4055f..314149d3 100644 --- a/botocore/data/polly/2016-06-10/service-2.json +++ b/botocore/data/polly/2016-06-10/service-2.json @@ -1035,6 +1035,7 @@ "Justin", "Karl", "Kendra", + "Kevin", "Kimberly", "Lea", "Liv", diff --git a/botocore/data/qldb/2019-01-02/service-2.json b/botocore/data/qldb/2019-01-02/service-2.json index 3268ab5c..e60515db 100644 --- a/botocore/data/qldb/2019-01-02/service-2.json +++ b/botocore/data/qldb/2019-01-02/service-2.json @@ -128,7 +128,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ResourcePreconditionNotMetException"} ], - "documentation":"

Returns a journal block object at a specified address in a ledger. Also returns a proof of the specified block for verification if DigestTipAddress is provided.

If the specified ledger doesn't exist or is in DELETING status, then throws ResourceNotFoundException.

If the specified ledger is in CREATING status, then throws ResourcePreconditionNotMetException.

If no block exists with the specified address, then throws InvalidParameterException.

" + "documentation":"

Returns a block object at a specified address in a journal. Also returns a proof of the specified block for verification if DigestTipAddress is provided.

For information about the data contents in a block, see Journal contents in the Amazon QLDB Developer Guide.

If the specified ledger doesn't exist or is in DELETING status, then throws ResourceNotFoundException.

If the specified ledger is in CREATING status, then throws ResourcePreconditionNotMetException.

If no block exists with the specified address, then throws InvalidParameterException.

" }, "GetDigest":{ "name":"GetDigest", @@ -232,7 +232,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ResourcePreconditionNotMetException"} ], - "documentation":"

Creates a stream for a given Amazon QLDB ledger that delivers the journal data to a specified Amazon Kinesis Data Streams resource. The stream captures every document revision that is committed to your journal and sends it to the Kinesis data stream.

" + "documentation":"

Creates a journal stream for a given Amazon QLDB ledger. The stream captures every document revision that is committed to the ledger's journal and delivers the data to a specified Amazon Kinesis Data Streams resource.

" }, "TagResource":{ "name":"TagResource", @@ -323,7 +323,7 @@ "members":{ "Name":{ "shape":"LedgerName", - "documentation":"

The name of the ledger that you want to create. The name must be unique among all of your ledgers in the current AWS Region.

" + "documentation":"

The name of the ledger that you want to create. The name must be unique among all of your ledgers in the current AWS Region.

Naming constraints for ledger names are defined in Quotas in Amazon QLDB in the Amazon QLDB Developer Guide.

" }, "Tags":{ "shape":"Tags", @@ -788,7 +788,7 @@ }, "AggregationEnabled":{ "shape":"Boolean", - "documentation":"

Enables QLDB to publish multiple stream records in a single Kinesis Data Streams record. To learn more, see KPL Key Concepts in the Amazon Kinesis Data Streams Developer Guide.

" + "documentation":"

Enables QLDB to publish multiple data records in a single Kinesis Data Streams record. To learn more, see KPL Key Concepts in the Amazon Kinesis Data Streams Developer Guide.

" } }, "documentation":"

The configuration settings of the Amazon Kinesis Data Streams destination for your Amazon QLDB journal stream.

" @@ -1171,7 +1171,7 @@ }, "ExclusiveEndTime":{ "shape":"Timestamp", - "documentation":"

The exclusive date and time that specifies when the stream ends. If you keep this parameter blank, the stream runs indefinitely until you cancel it.

The ExclusiveEndTime must be in ISO 8601 date and time format and in Universal Coordinated Time (UTC). For example: 2019-06-13T21:36:34Z

" + "documentation":"

The exclusive date and time that specifies when the stream ends. If you don't define this parameter, the stream runs indefinitely until you cancel it.

The ExclusiveEndTime must be in ISO 8601 date and time format and in Universal Coordinated Time (UTC). For example: 2019-06-13T21:36:34Z

" }, "KinesisConfiguration":{ "shape":"KinesisConfiguration", @@ -1179,7 +1179,7 @@ }, "StreamName":{ "shape":"StreamName", - "documentation":"

The name that you want to assign to the QLDB journal stream. User-defined names can help identify and indicate the purpose of a stream.

Your stream name must be unique among other active streams for a given ledger. If you try to create a stream with the same name and configuration of an active, existing stream for the same ledger, QLDB simply returns the existing stream. Stream names have the same naming constraints as ledger names, as defined in Quotas in Amazon QLDB in the Amazon QLDB Developer Guide.

" + "documentation":"

The name that you want to assign to the QLDB journal stream. User-defined names can help identify and indicate the purpose of a stream.

Your stream name must be unique among other active streams for a given ledger. Stream names have the same naming constraints as ledger names, as defined in Quotas in Amazon QLDB in the Amazon QLDB Developer Guide.

" } } }, @@ -1337,7 +1337,7 @@ "documentation":"

An Amazon Ion plaintext value contained in a ValueHolder structure.

" } }, - "documentation":"

A structure that can contain an Amazon Ion value in multiple encoding formats.

", + "documentation":"

A structure that can contain a value in multiple encoding formats.

", "sensitive":true } }, diff --git a/botocore/data/route53/2013-04-01/service-2.json b/botocore/data/route53/2013-04-01/service-2.json index 6d992829..0e834958 100644 --- a/botocore/data/route53/2013-04-01/service-2.json +++ b/botocore/data/route53/2013-04-01/service-2.json @@ -31,7 +31,8 @@ {"shape":"InvalidInput"}, {"shape":"PublicZoneVPCAssociation"}, {"shape":"ConflictingDomainExists"}, - {"shape":"LimitsExceeded"} + {"shape":"LimitsExceeded"}, + {"shape":"PriorRequestNotComplete"} ], "documentation":"

Associates an Amazon VPC with a private hosted zone.

To perform the association, the VPC and the private hosted zone must already exist. Also, you can't convert a public hosted zone into a private hosted zone.

If you want to associate a VPC that was created by one AWS account with a private hosted zone that was created by a different account, do one of the following:

" }, diff --git a/botocore/data/sagemaker-runtime/2017-05-13/service-2.json b/botocore/data/sagemaker-runtime/2017-05-13/service-2.json index 41224d5f..a12c74b6 100644 --- a/botocore/data/sagemaker-runtime/2017-05-13/service-2.json +++ b/botocore/data/sagemaker-runtime/2017-05-13/service-2.json @@ -32,7 +32,7 @@ "shapes":{ "BodyBlob":{ "type":"blob", - "max":5242880, + "max":6291456, "sensitive":true }, "CustomAttributesHeader":{ @@ -77,7 +77,7 @@ }, "Body":{ "shape":"BodyBlob", - "documentation":"

Provides input data, in the format specified in the ContentType request header. Amazon SageMaker passes all of the data in the body to the model.

For information about the format of the request body, see Common Data Formats—Inference.

" + "documentation":"

Provides input data, in the format specified in the ContentType request header. Amazon SageMaker passes all of the data in the body to the model.

For information about the format of the request body, see Common Data Formats-Inference.

" }, "ContentType":{ "shape":"Header", @@ -99,9 +99,15 @@ }, "TargetModel":{ "shape":"TargetModelHeader", - "documentation":"

Specifies the model to be requested for an inference when invoking a multi-model endpoint.

", + "documentation":"

The model to request for inference when invoking a multi-model endpoint.

", "location":"header", "locationName":"X-Amzn-SageMaker-Target-Model" + }, + "TargetVariant":{ + "shape":"TargetVariantHeader", + "documentation":"

Specify the production variant to send the inference request to when invoking an endpoint that is running two or more variants. Note that this parameter overrides the default behavior for the endpoint, which is to distribute the invocation traffic based on the variant weights.

", + "location":"header", + "locationName":"X-Amzn-SageMaker-Target-Variant" } }, "payload":"Body" @@ -112,7 +118,7 @@ "members":{ "Body":{ "shape":"BodyBlob", - "documentation":"

Includes the inference provided by the model.

For information about the format of the response body, see Common Data Formats—Inference.

" + "documentation":"

Includes the inference provided by the model.

For information about the format of the response body, see Common Data Formats-Inference.

" }, "ContentType":{ "shape":"Header", @@ -179,6 +185,11 @@ "min":1, "pattern":"\\A\\S[\\p{Print}]*\\z" }, + "TargetVariantHeader":{ + "type":"string", + "max":63, + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*" + }, "ValidationError":{ "type":"structure", "members":{ diff --git a/botocore/data/sagemaker/2017-07-24/service-2.json b/botocore/data/sagemaker/2017-07-24/service-2.json index 4dc1ddca..c8e59e22 100644 --- a/botocore/data/sagemaker/2017-07-24/service-2.json +++ b/botocore/data/sagemaker/2017-07-24/service-2.json @@ -60,7 +60,7 @@ {"shape":"ResourceLimitExceeded"}, {"shape":"ResourceInUse"} ], - "documentation":"

Creates a running App for the specified UserProfile. Supported Apps are JupyterServer, KernelGateway, and TensorBoard. This operation is automatically invoked by Amazon SageMaker Studio upon access to the associated Studio Domain, and when new kernel configurations are selected by the user. A user may have multiple Apps active simultaneously. Apps will automatically terminate and be deleted when stopped from within Studio, or when the DeleteApp API is manually called. UserProfiles are limited to 5 concurrently running Apps at a time.

" + "documentation":"

Creates a running App for the specified UserProfile. Supported Apps are JupyterServer, KernelGateway, and TensorBoard. This operation is automatically invoked by Amazon SageMaker Studio upon access to the associated Domain, and when new kernel configurations are selected by the user. A user may have multiple Apps active simultaneously. UserProfiles are limited to 5 concurrently running Apps at a time.

" }, "CreateAutoMLJob":{ "name":"CreateAutoMLJob", @@ -112,7 +112,7 @@ {"shape":"ResourceLimitExceeded"}, {"shape":"ResourceInUse"} ], - "documentation":"

Creates a Domain for Amazon SageMaker Studio, which can be accessed by end-users in a web browser. A Domain has an associated directory, list of authorized users, and a variety of security, application, policies, and Amazon Virtual Private Cloud configurations. An AWS account is limited to one Domain, per region. Users within a domain can share notebook files and other artifacts with each other. When a Domain is created, an Amazon Elastic File System (EFS) is also created for use by all of the users within the Domain. Each user receives a private home directory within the EFS for notebooks, Git repositories, and data files.

" + "documentation":"

Creates a Domain used by SageMaker Studio. A domain consists of an associated directory, a list of authorized users, and a variety of security, application, policy, and Amazon Virtual Private Cloud (VPC) configurations. An AWS account is limited to one domain per region. Users within a domain can share notebook files and other artifacts with each other.

When a domain is created, an Amazon Elastic File System (EFS) volume is also created for use by all of the users within the domain. Each user receives a private home directory within the EFS for notebooks, Git repositories, and data files.

All traffic between the domain and the EFS volume is communicated through the specified subnet IDs. All other traffic goes over the Internet through an Amazon SageMaker system VPC. The EFS traffic uses the NFS/TCP protocol over port 2049.

NFS traffic over TCP on port 2049 needs to be allowed in both inbound and outbound rules in order to launch a SageMaker Studio app successfully.

" }, "CreateEndpoint":{ "name":"CreateEndpoint", @@ -125,7 +125,7 @@ "errors":[ {"shape":"ResourceLimitExceeded"} ], - "documentation":"

Creates an endpoint using the endpoint configuration specified in the request. Amazon SageMaker uses the endpoint to provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API.

Use this API to deploy models using Amazon SageMaker hosting services.

For an example that calls this method when deploying a model to Amazon SageMaker hosting services, see Deploy the Model to Amazon SageMaker Hosting Services (AWS SDK for Python (Boto 3)).

You must not delete an EndpointConfig that is in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

The endpoint name must be unique within an AWS Region in your AWS account.

When it receives the request, Amazon SageMaker creates the endpoint, launches the resources (ML compute instances), and deploys the model(s) on them.

When Amazon SageMaker receives the request, it sets the endpoint status to Creating. After it creates the endpoint, it sets the status to InService. Amazon SageMaker can then process incoming requests for inferences. To check the status of an endpoint, use the DescribeEndpoint API.

If any of the models hosted at this endpoint get model data from an Amazon S3 location, Amazon SageMaker uses AWS Security Token Service to download model artifacts from the S3 path you provided. AWS STS is activated in your IAM user account by default. If you previously deactivated AWS STS for a region, you need to reactivate AWS STS for that region. For more information, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

" + "documentation":"

Creates an endpoint using the endpoint configuration specified in the request. Amazon SageMaker uses the endpoint to provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API.

Use this API to deploy models using Amazon SageMaker hosting services.

For an example that calls this method when deploying a model to Amazon SageMaker hosting services, see Deploy the Model to Amazon SageMaker Hosting Services (AWS SDK for Python (Boto 3)).

You must not delete an EndpointConfig that is in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

The endpoint name must be unique within an AWS Region in your AWS account.

When it receives the request, Amazon SageMaker creates the endpoint, launches the resources (ML compute instances), and deploys the model(s) on them.

When you call CreateEndpoint, a load call is made to DynamoDB to verify that your endpoint configuration exists. When you read data from a DynamoDB table supporting Eventually Consistent Reads , the response might not reflect the results of a recently completed write operation. The response might include some stale data. If the dependent entities are not yet in DynamoDB, this causes a validation error. If you repeat your read request after a short time, the response should return the latest data. So retry logic is recommended to handle these possible issues. We also recommend that customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent read.

When Amazon SageMaker receives the request, it sets the endpoint status to Creating. After it creates the endpoint, it sets the status to InService. Amazon SageMaker can then process incoming requests for inferences. To check the status of an endpoint, use the DescribeEndpoint API.

If any of the models hosted at this endpoint get model data from an Amazon S3 location, Amazon SageMaker uses AWS Security Token Service to download model artifacts from the S3 path you provided. AWS STS is activated in your IAM user account by default. If you previously deactivated AWS STS for a region, you need to reactivate AWS STS for that region. For more information, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

" }, "CreateEndpointConfig":{ "name":"CreateEndpointConfig", @@ -138,7 +138,7 @@ "errors":[ {"shape":"ResourceLimitExceeded"} ], - "documentation":"

Creates an endpoint configuration that Amazon SageMaker hosting services uses to deploy models. In the configuration, you identify one or more models, created using the CreateModel API, to deploy and the resources that you want Amazon SageMaker to provision. Then you call the CreateEndpoint API.

Use this API if you want to use Amazon SageMaker hosting services to deploy models into production.

In the request, you define a ProductionVariant, for each model that you want to deploy. Each ProductionVariant parameter also describes the resources that you want Amazon SageMaker to provision. This includes the number and type of ML compute instances to deploy.

If you are hosting multiple models, you also assign a VariantWeight to specify how much traffic you want to allocate to each model. For example, suppose that you want to host two models, A and B, and you assign traffic weight 2 for model A and 1 for model B. Amazon SageMaker distributes two-thirds of the traffic to Model A, and one-third to model B.

For an example that calls this method when deploying a model to Amazon SageMaker hosting services, see Deploy the Model to Amazon SageMaker Hosting Services (AWS SDK for Python (Boto 3)).

" + "documentation":"

Creates an endpoint configuration that Amazon SageMaker hosting services uses to deploy models. In the configuration, you identify one or more models, created using the CreateModel API, to deploy and the resources that you want Amazon SageMaker to provision. Then you call the CreateEndpoint API.

Use this API if you want to use Amazon SageMaker hosting services to deploy models into production.

In the request, you define a ProductionVariant, for each model that you want to deploy. Each ProductionVariant parameter also describes the resources that you want Amazon SageMaker to provision. This includes the number and type of ML compute instances to deploy.

If you are hosting multiple models, you also assign a VariantWeight to specify how much traffic you want to allocate to each model. For example, suppose that you want to host two models, A and B, and you assign traffic weight 2 for model A and 1 for model B. Amazon SageMaker distributes two-thirds of the traffic to Model A, and one-third to model B.

For an example that calls this method when deploying a model to Amazon SageMaker hosting services, see Deploy the Model to Amazon SageMaker Hosting Services (AWS SDK for Python (Boto 3)).

When you call CreateEndpoint, a load call is made to DynamoDB to verify that your endpoint configuration exists. When you read data from a DynamoDB table supporting Eventually Consistent Reads , the response might not reflect the results of a recently completed write operation. The response might include some stale data. If the dependent entities are not yet in DynamoDB, this causes a validation error. If you repeat your read request after a short time, the response should return the latest data. So retry logic is recommended to handle these possible issues. We also recommend that customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent read.

" }, "CreateExperiment":{ "name":"CreateExperiment", @@ -283,7 +283,7 @@ "errors":[ {"shape":"ResourceNotFound"} ], - "documentation":"

Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the user will be automatically signed in to Amazon SageMaker Studio, and granted access to all of the Apps and files associated with that Amazon Elastic File System (EFS). This operation can only be called when AuthMode equals IAM.

" + "documentation":"

Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the user will be automatically signed in to Amazon SageMaker Studio, and granted access to all of the Apps and files associated with the Domain's Amazon Elastic File System (EFS) volume. This operation can only be called when the authentication mode equals IAM.

" }, "CreatePresignedNotebookInstanceUrl":{ "name":"CreatePresignedNotebookInstanceUrl", @@ -293,7 +293,7 @@ }, "input":{"shape":"CreatePresignedNotebookInstanceUrlInput"}, "output":{"shape":"CreatePresignedNotebookInstanceUrlOutput"}, - "documentation":"

Returns a URL that you can use to connect to the Jupyter server from a notebook instance. In the Amazon SageMaker console, when you choose Open next to a notebook instance, Amazon SageMaker opens a new tab showing the Jupyter server home page from the notebook instance. The console uses this API to get the URL and show the page.

IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the notebook instance.For example, you can restrict access to this API and to the URL that it returns to a list of IP addresses that you specify. Use the NotIpAddress condition operator and the aws:SourceIP condition context key to specify the list of IP addresses that you want to have access to the notebook instance. For more information, see Limit Access to a Notebook Instance by IP Address.

The URL that you get from a call to CreatePresignedNotebookInstanceUrl is valid only for 5 minutes. If you try to use the URL after the 5-minute limit expires, you are directed to the AWS console sign-in page.

" + "documentation":"

Returns a URL that you can use to connect to the Jupyter server from a notebook instance. In the Amazon SageMaker console, when you choose Open next to a notebook instance, Amazon SageMaker opens a new tab showing the Jupyter server home page from the notebook instance. The console uses this API to get the URL and show the page.

The IAM role or user used to call this API defines the permissions to access the notebook instance. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the notebook instance.

You can restrict access to this API and to the URL that it returns to a list of IP addresses that you specify. Use the NotIpAddress condition operator and the aws:SourceIP condition context key to specify the list of IP addresses that you want to have access to the notebook instance. For more information, see Limit Access to a Notebook Instance by IP Address.

The URL that you get from a call to CreatePresignedNotebookInstanceUrl is valid only for 5 minutes. If you try to use the URL after the 5-minute limit expires, you are directed to the AWS console sign-in page.

" }, "CreateProcessingJob":{ "name":"CreateProcessingJob", @@ -379,7 +379,7 @@ {"shape":"ResourceLimitExceeded"}, {"shape":"ResourceInUse"} ], - "documentation":"

Creates a user profile. A user profile represents a single user within a Domain, and is the main way to reference a \"person\" for the purposes of sharing, reporting and other user-oriented features. This entity is created during on-boarding to Amazon SageMaker Studio. If an administrator invites a person by email or imports them from SSO, a UserProfile is automatically created.

This entity is the primary holder of settings for an individual user and, through the domain, has a reference to the user's private Amazon Elastic File System (EFS) home directory.

" + "documentation":"

Creates a user profile. A user profile represents a single user within a domain, and is the main way to reference a \"person\" for the purposes of sharing, reporting, and other user-oriented features. This entity is created when a user onboards to Amazon SageMaker Studio. If an administrator invites a person by email or imports them from SSO, a user profile is automatically created. A user profile is the primary holder of settings for an individual user and has a reference to the user's private Amazon Elastic File System (EFS) home directory.

" }, "CreateWorkteam":{ "name":"CreateWorkteam", @@ -437,7 +437,7 @@ {"shape":"ResourceInUse"}, {"shape":"ResourceNotFound"} ], - "documentation":"

Used to delete a domain. Use with caution. If RetentionPolicy is set to Delete, all of the members of the domain will lose access to their EFS volume, including data, notebooks, and other artifacts.

" + "documentation":"

Used to delete a domain. If you onboarded with IAM mode, you will need to delete your domain to onboard again using SSO. Use with caution. All of the members of the domain will lose access to their EFS volume, including data, notebooks, and other artifacts.

" }, "DeleteEndpoint":{ "name":"DeleteEndpoint", @@ -455,7 +455,7 @@ "requestUri":"/" }, "input":{"shape":"DeleteEndpointConfigInput"}, - "documentation":"

Deletes an endpoint configuration. The DeleteEndpointConfig API deletes only the specified configuration. It does not delete endpoints created using the configuration.

" + "documentation":"

Deletes an endpoint configuration. The DeleteEndpointConfig API deletes only the specified configuration. It does not delete endpoints created using the configuration.

You must not delete an EndpointConfig in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. If you delete the EndpointConfig of an endpoint that is active or being created or updated you may lose visibility into the instance type the endpoint is using. The endpoint must be deleted in order to stop incurring charges.

" }, "DeleteExperiment":{ "name":"DeleteExperiment", @@ -578,7 +578,7 @@ {"shape":"ResourceInUse"}, {"shape":"ResourceNotFound"} ], - "documentation":"

Deletes a user profile.

" + "documentation":"

Deletes a user profile. When a user profile is deleted, the user loses access to their EFS volume, including data, notebooks, and other artifacts.

" }, "DeleteWorkteam":{ "name":"DeleteWorkteam", @@ -663,7 +663,7 @@ "errors":[ {"shape":"ResourceNotFound"} ], - "documentation":"

The desciption of the domain.

" + "documentation":"

The description of the domain.

" }, "DescribeEndpoint":{ "name":"DescribeEndpoint", @@ -889,7 +889,7 @@ "errors":[ {"shape":"ResourceNotFound"} ], - "documentation":"

Describes the user profile.

" + "documentation":"

Describes a user profile. For more information, see CreateUserProfile.

" }, "DescribeWorkforce":{ "name":"DescribeWorkforce", @@ -1431,7 +1431,7 @@ {"shape":"ResourceInUse"}, {"shape":"ResourceNotFound"} ], - "documentation":"

Updates a domain. Changes will impact all of the people in the domain.

" + "documentation":"

Updates the default settings for new user profiles in the domain.

" }, "UpdateEndpoint":{ "name":"UpdateEndpoint", @@ -1444,7 +1444,7 @@ "errors":[ {"shape":"ResourceLimitExceeded"} ], - "documentation":"

Deploys the new EndpointConfig specified in the request, switches to using newly created endpoint, and then deletes resources provisioned for the endpoint using the previous EndpointConfig (there is no availability loss).

When Amazon SageMaker receives the request, it sets the endpoint status to Updating. After updating the endpoint, it sets the status to InService. To check the status of an endpoint, use the DescribeEndpoint API.

You must not delete an EndpointConfig in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

" + "documentation":"

Deploys the new EndpointConfig specified in the request, switches to using newly created endpoint, and then deletes resources provisioned for the endpoint using the previous EndpointConfig (there is no availability loss).

When Amazon SageMaker receives the request, it sets the endpoint status to Updating. After updating the endpoint, it sets the status to InService. To check the status of an endpoint, use the DescribeEndpoint API.

You must not delete an EndpointConfig in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

If you delete the EndpointConfig of an endpoint that is active or being created or updated you may lose visibility into the instance type the endpoint is using. The endpoint must be deleted in order to stop incurring charges.

" }, "UpdateEndpointWeightsAndCapacities":{ "name":"UpdateEndpointWeightsAndCapacities", @@ -1804,7 +1804,7 @@ "members":{ "AnnotationConsolidationLambdaArn":{ "shape":"LambdaFunctionArn", - "documentation":"

The Amazon Resource Name (ARN) of a Lambda function implements the logic for annotation consolidation.

For the built-in bounding box, image classification, semantic segmentation, and text classification task types, Amazon SageMaker Ground Truth provides the following Lambda functions:

For more information, see Annotation Consolidation.

" + "documentation":"

The Amazon Resource Name (ARN) of a Lambda function implements the logic for annotation consolidation.

For the built-in bounding box, image classification, semantic segmentation, and text classification task types, Amazon SageMaker Ground Truth provides the following Lambda functions:

Bounding box - Finds the most similar boxes from different workers based on the Jaccard index of the boxes.

Image classification - Uses a variant of the Expectation Maximization approach to estimate the true class of an image based on annotations from individual workers.

Multi-label image classification - Uses a variant of the Expectation Maximization approach to estimate the true classes of an image based on annotations from individual workers.

Semantic segmentation - Treats each pixel in an image as a multi-class classification and treats pixel annotations from workers as \"votes\" for the correct label.

Text classification - Uses a variant of the Expectation Maximization approach to estimate the true class of text based on annotations from individual workers.

Multi-label text classification - Uses a variant of the Expectation Maximization approach to estimate the true classes of text based on annotations from individual workers.

Named entity recognition - Groups similar selections and calculates aggregate boundaries, resolving to most-assigned label.

Bounding box verification - Uses a variant of the Expectation Maximization approach to estimate the true class of verification judgement for bounding box labels based on annotations from individual workers.

Semantic segmentation verification - Uses a variant of the Expectation Maximization approach to estimate the true class of verification judgment for semantic segmentation labels based on annotations from individual workers.

Bounding box adjustment - Finds the most similar boxes from different workers based on the Jaccard index of the adjusted annotations.

Semantic segmentation adjustment - Treats each pixel in an image as a multi-class classification and treats pixel adjusted annotations from workers as \"votes\" for the correct label.

For more information, see Annotation Consolidation.

" } }, "documentation":"

Configures how labels are consolidated across human workers.

" @@ -3127,7 +3127,7 @@ "members":{ "AppArn":{ "shape":"AppArn", - "documentation":"

The app's Amazon Resource Name (ARN).

" + "documentation":"

The App's Amazon Resource Name (ARN).

" } } }, @@ -3273,7 +3273,7 @@ }, "AuthMode":{ "shape":"AuthMode", - "documentation":"

The mode of authentication that member use to access the domain.

" + "documentation":"

The mode of authentication that members use to access the domain.

" }, "DefaultUserSettings":{ "shape":"UserSettings", @@ -3281,15 +3281,15 @@ }, "SubnetIds":{ "shape":"Subnets", - "documentation":"

Security setting to limit to a set of subnets.

" + "documentation":"

The VPC subnets to use for communication with the EFS volume.

" }, "VpcId":{ "shape":"VpcId", - "documentation":"

Security setting to limit the domain's communication to a Amazon Virtual Private Cloud.

" + "documentation":"

The ID of the Amazon Virtual Private Cloud (VPC) to use for communication with the EFS volume.

" }, "Tags":{ "shape":"TagList", - "documentation":"

Each tag consists of a key and an optional value. Tag keys must be unique per resource.

" + "documentation":"

Tags to associated with the Domain. Each tag consists of a key and an optional value. Tag keys must be unique per resource. Tags are searchable using the Search API.

" }, "HomeEfsFileSystemKmsKeyId":{ "shape":"KmsKeyId", @@ -3506,7 +3506,7 @@ }, "TrainingJobDefinitions":{ "shape":"HyperParameterTrainingJobDefinitions", - "documentation":"

" + "documentation":"

A list of the HyperParameterTrainingJobDefinition objects launched for this tuning job.

" }, "WarmStartConfig":{ "shape":"HyperParameterTuningJobWarmStartConfig", @@ -5475,7 +5475,7 @@ }, "TrainingJobDefinitions":{ "shape":"HyperParameterTrainingJobDefinitions", - "documentation":"

" + "documentation":"

A list of the HyperParameterTrainingJobDefinition objects launched for this tuning job.

" }, "HyperParameterTuningJobStatus":{ "shape":"HyperParameterTuningJobStatus", @@ -6446,7 +6446,7 @@ "members":{ "DomainId":{ "shape":"DomainId", - "documentation":"

The domain ID.

" + "documentation":"

The ID of the domain that contains the profile.

" }, "UserProfileArn":{ "shape":"UserProfileArn", @@ -6458,7 +6458,7 @@ }, "HomeEfsFileSystemUid":{ "shape":"EfsUid", - "documentation":"

The home Amazon Elastic File System (EFS) Uid.

" + "documentation":"

The ID of the user's profile in the Amazon Elastic File System (EFS) volume.

" }, "Status":{ "shape":"UserProfileStatus", @@ -7438,7 +7438,7 @@ }, "PreHumanTaskLambdaArn":{ "shape":"LambdaFunctionArn", - "documentation":"

The Amazon Resource Name (ARN) of a Lambda function that is run before a data object is sent to a human worker. Use this function to provide input to a custom labeling job.

For the built-in bounding box, image classification, semantic segmentation, and text classification task types, Amazon SageMaker Ground Truth provides the following Lambda functions:

US East (Northern Virginia) (us-east-1):

US East (Ohio) (us-east-2):

US West (Oregon) (us-west-2):

Canada (Central) (ca-central-1):

EU (Ireland) (eu-west-1):

EU (London) (eu-west-2):

EU Frankfurt (eu-central-1):

Asia Pacific (Tokyo) (ap-northeast-1):

Asia Pacific (Seoul) (ap-northeast-2):

Asia Pacific (Mumbai) (ap-south-1):

Asia Pacific (Singapore) (ap-southeast-1):

Asia Pacific (Sydney) (ap-southeast-2):

" + "documentation":"

The Amazon Resource Name (ARN) of a Lambda function that is run before a data object is sent to a human worker. Use this function to provide input to a custom labeling job.

For the built-in bounding box, image classification, semantic segmentation, and text classification task types, Amazon SageMaker Ground Truth provides the following Lambda functions:

Bounding box - Finds the most similar boxes from different workers based on the Jaccard index of the boxes.

Image classification - Uses a variant of the Expectation Maximization approach to estimate the true class of an image based on annotations from individual workers.

Multi-label image classification - Uses a variant of the Expectation Maximization approach to estimate the true classes of an image based on annotations from individual workers.

Semantic segmentation - Treats each pixel in an image as a multi-class classification and treats pixel annotations from workers as \"votes\" for the correct label.

Text classification - Uses a variant of the Expectation Maximization approach to estimate the true class of text based on annotations from individual workers.

Multi-label text classification - Uses a variant of the Expectation Maximization approach to estimate the true classes of text based on annotations from individual workers.

Named entity recognition - Groups similar selections and calculates aggregate boundaries, resolving to most-assigned label.

Bounding box verification - Uses a variant of the Expectation Maximization approach to estimate the true class of verification judgement for bounding box labels based on annotations from individual workers.

Bounding box adjustment - Finds the most similar boxes from different workers based on the Jaccard index of the adjusted annotations.

Semantic segmentation verification - Uses a variant of the Expectation Maximization approach to estimate the true class of verification judgment for semantic segmentation labels based on annotations from individual workers.

Semantic segmentation adjustment - Treats each pixel in an image as a multi-class classification and treats pixel adjusted annotations from workers as \"votes\" for the correct label.

" }, "TaskKeywords":{ "shape":"TaskKeywords", @@ -10209,7 +10209,7 @@ "documentation":"

The path of the S3 object that contains the model artifacts. For example, s3://bucket-name/keynameprefix/model.tar.gz.

" } }, - "documentation":"

Provides information about the location that is configured for storing model artifacts.

" + "documentation":"

Provides information about the location that is configured for storing model artifacts.

Model artifacts are the output that results from training a model, and typically consist of trained parameters, a model defintion that desribes how to compute inferences, and other metadata.

" }, "ModelName":{ "type":"string", @@ -11960,6 +11960,10 @@ "RoleArn":{ "shape":"RoleArn", "documentation":"

The Amazon Resource Name (ARN) that has access to the S3 objects that are used by the template.

" + }, + "HumanTaskUiArn":{ + "shape":"HumanTaskUiArn", + "documentation":"

The HumanTaskUiArn of the worker UI that you want to render. Do not provide a HumanTaskUiArn if you use the UiTemplate parameter.

" } } }, @@ -12121,7 +12125,7 @@ "documentation":"

The instance type.

" } }, - "documentation":"

The instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. The ARN is stored as metadata in Amazon SageMaker Studio notebooks.

" + "documentation":"

The instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. The ARN is stored as metadata in SageMaker Studio notebooks.

" }, "ResourceType":{ "type":"string", @@ -12457,18 +12461,18 @@ "members":{ "NotebookOutputOption":{ "shape":"NotebookOutputOption", - "documentation":"

The notebook output option.

" + "documentation":"

Whether to include the notebook cell output when sharing the notebook. The default is Disabled.

" }, "S3OutputPath":{ "shape":"S3Uri", - "documentation":"

The Amazon S3 output path.

" + "documentation":"

When NotebookOutputOption is Allowed, the Amazon S3 bucket used to save the notebook cell output. If S3OutputPath isn't specified, a default bucket is used.

" }, "S3KmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The AWS Key Management Service encryption key ID.

" + "documentation":"

When NotebookOutputOption is Allowed, the AWS Key Management Service (KMS) encryption key ID used to encrypt the notebook cell output in the Amazon S3 bucket.

" } }, - "documentation":"

The sharing settings.

" + "documentation":"

Specifies options when sharing an Amazon SageMaker Studio notebook. These settings are specified as part of DefaultUserSettings when the CreateDomain API is called, and as part of UserSettings when the CreateUserProfile API is called.

" }, "ShuffleConfig":{ "type":"structure", @@ -12890,7 +12894,7 @@ }, "TaskTimeLimitInSeconds":{ "type":"integer", - "max":28800, + "max":604800, "min":30 }, "TaskTitle":{ @@ -14035,7 +14039,11 @@ "members":{ "UiTemplateS3Uri":{ "shape":"S3Uri", - "documentation":"

The Amazon S3 bucket location of the UI template. For more information about the contents of a UI template, see Creating Your Custom Labeling Task Template.

" + "documentation":"

The Amazon S3 bucket location of the UI template, or worker task template. This is the template used to render the worker UI and tools for labeling job tasks. For more information about the contents of a UI template, see Creating Your Custom Labeling Task Template.

" + }, + "HumanTaskUiArn":{ + "shape":"HumanTaskUiArn", + "documentation":"

The ARN of the worker task template used to render the worker UI and tools for labeling job tasks. Do not use this parameter if you use UiTemplateS3Uri.

" } }, "documentation":"

Provided configuration information for the worker UI for a labeling job.

" @@ -14095,7 +14103,7 @@ "members":{ "DomainId":{ "shape":"DomainId", - "documentation":"

The domain ID.

" + "documentation":"

The ID of the domain to be updated.

" }, "DefaultUserSettings":{ "shape":"UserSettings", @@ -14108,7 +14116,7 @@ "members":{ "DomainArn":{ "shape":"DomainArn", - "documentation":"

The domain Amazon Resource Name (ARN).

" + "documentation":"

The Amazon Resource Name (ARN) of the domain.

" } } }, diff --git a/botocore/data/servicecatalog/2015-12-10/service-2.json b/botocore/data/servicecatalog/2015-12-10/service-2.json index 47c01ef0..06ca301a 100644 --- a/botocore/data/servicecatalog/2015-12-10/service-2.json +++ b/botocore/data/servicecatalog/2015-12-10/service-2.json @@ -71,7 +71,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Associates the specified product with the specified portfolio.

" + "documentation":"

Associates the specified product with the specified portfolio.

A delegated admin is authorized to invoke this command.

" }, "AssociateServiceActionWithProvisioningArtifact":{ "name":"AssociateServiceActionWithProvisioningArtifact", @@ -160,7 +160,7 @@ {"shape":"LimitExceededException"}, {"shape":"DuplicateResourceException"} ], - "documentation":"

Creates a constraint.

" + "documentation":"

Creates a constraint.

A delegated admin is authorized to invoke this command.

" }, "CreatePortfolio":{ "name":"CreatePortfolio", @@ -175,7 +175,7 @@ {"shape":"LimitExceededException"}, {"shape":"TagOptionNotMigratedException"} ], - "documentation":"

Creates a portfolio.

" + "documentation":"

Creates a portfolio.

A delegated admin is authorized to invoke this command.

" }, "CreatePortfolioShare":{ "name":"CreatePortfolioShare", @@ -192,7 +192,7 @@ {"shape":"OperationNotSupportedException"}, {"shape":"InvalidStateException"} ], - "documentation":"

Shares the specified portfolio with the specified account or organization node. Shares to an organization node can only be created by the master account of an Organization. AWSOrganizationsAccess must be enabled in order to create a portfolio share to an organization node.

" + "documentation":"

Shares the specified portfolio with the specified account or organization node. Shares to an organization node can only be created by the master account of an organization or by a delegated administrator. You can share portfolios to an organization, an organizational unit, or a specific account.

Note that if a delegated admin is de-registered, they can no longer create portfolio shares.

AWSOrganizationsAccess must be enabled in order to create a portfolio share to an organization node.

" }, "CreateProduct":{ "name":"CreateProduct", @@ -207,7 +207,7 @@ {"shape":"LimitExceededException"}, {"shape":"TagOptionNotMigratedException"} ], - "documentation":"

Creates a product.

" + "documentation":"

Creates a product.

A delegated admin is authorized to invoke this command.

" }, "CreateProvisionedProductPlan":{ "name":"CreateProvisionedProductPlan", @@ -280,7 +280,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParametersException"} ], - "documentation":"

Deletes the specified constraint.

" + "documentation":"

Deletes the specified constraint.

A delegated admin is authorized to invoke this command.

" }, "DeletePortfolio":{ "name":"DeletePortfolio", @@ -296,7 +296,7 @@ {"shape":"ResourceInUseException"}, {"shape":"TagOptionNotMigratedException"} ], - "documentation":"

Deletes the specified portfolio.

You cannot delete a portfolio if it was shared with you or if it has associated products, users, constraints, or shared accounts.

" + "documentation":"

Deletes the specified portfolio.

You cannot delete a portfolio if it was shared with you or if it has associated products, users, constraints, or shared accounts.

A delegated admin is authorized to invoke this command.

" }, "DeletePortfolioShare":{ "name":"DeletePortfolioShare", @@ -312,7 +312,7 @@ {"shape":"OperationNotSupportedException"}, {"shape":"InvalidStateException"} ], - "documentation":"

Stops sharing the specified portfolio with the specified account or organization node. Shares to an organization node can only be deleted by the master account of an Organization.

" + "documentation":"

Stops sharing the specified portfolio with the specified account or organization node. Shares to an organization node can only be deleted by the master account of an organization or by a delegated administrator.

Note that if a delegated admin is de-registered, portfolio shares created from that account are removed.

" }, "DeleteProduct":{ "name":"DeleteProduct", @@ -328,7 +328,7 @@ {"shape":"InvalidParametersException"}, {"shape":"TagOptionNotMigratedException"} ], - "documentation":"

Deletes the specified product.

You cannot delete a product if it was shared with you or is associated with a portfolio.

" + "documentation":"

Deletes the specified product.

You cannot delete a product if it was shared with you or is associated with a portfolio.

A delegated admin is authorized to invoke this command.

" }, "DeleteProvisionedProductPlan":{ "name":"DeleteProvisionedProductPlan", @@ -425,7 +425,7 @@ "errors":[ {"shape":"ResourceNotFoundException"} ], - "documentation":"

Gets information about the specified portfolio.

" + "documentation":"

Gets information about the specified portfolio.

A delegated admin is authorized to invoke this command.

" }, "DescribePortfolioShareStatus":{ "name":"DescribePortfolioShareStatus", @@ -440,7 +440,7 @@ {"shape":"InvalidParametersException"}, {"shape":"OperationNotSupportedException"} ], - "documentation":"

Gets the status of the specified portfolio share operation. This API can only be called by the master account in the organization.

" + "documentation":"

Gets the status of the specified portfolio share operation. This API can only be called by the master account in the organization or by a delegated admin.

" }, "DescribeProduct":{ "name":"DescribeProduct", @@ -465,7 +465,8 @@ "input":{"shape":"DescribeProductAsAdminInput"}, "output":{"shape":"DescribeProductAsAdminOutput"}, "errors":[ - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParametersException"} ], "documentation":"

Gets information about the specified product. This operation is run with administrator access.

" }, @@ -519,7 +520,8 @@ "input":{"shape":"DescribeProvisioningArtifactInput"}, "output":{"shape":"DescribeProvisioningArtifactOutput"}, "errors":[ - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParametersException"} ], "documentation":"

Gets information about the specified provisioning artifact (also known as a version) for the specified product.

" }, @@ -604,7 +606,7 @@ {"shape":"InvalidStateException"}, {"shape":"OperationNotSupportedException"} ], - "documentation":"

Disable portfolio sharing through AWS Organizations feature. This feature will not delete your current shares but it will prevent you from creating new shares throughout your organization. Current shares will not be in sync with your organization structure if it changes after calling this API. This API can only be called by the master account in the organization.

" + "documentation":"

Disable portfolio sharing through AWS Organizations feature. This feature will not delete your current shares but it will prevent you from creating new shares throughout your organization. Current shares will not be in sync with your organization structure if it changes after calling this API. This API can only be called by the master account in the organization.

This API can't be invoked if there are active delegated administrators in the organization.

Note that a delegated administrator is not authorized to invoke DisableAWSOrganizationsAccess.

" }, "DisassociateBudgetFromResource":{ "name":"DisassociateBudgetFromResource", @@ -646,7 +648,7 @@ {"shape":"ResourceInUseException"}, {"shape":"InvalidParametersException"} ], - "documentation":"

Disassociates the specified product from the specified portfolio.

" + "documentation":"

Disassociates the specified product from the specified portfolio.

A delegated admin is authorized to invoke this command.

" }, "DisassociateServiceActionFromProvisioningArtifact":{ "name":"DisassociateServiceActionFromProvisioningArtifact", @@ -688,7 +690,7 @@ {"shape":"InvalidStateException"}, {"shape":"OperationNotSupportedException"} ], - "documentation":"

Enable portfolio sharing feature through AWS Organizations. This API will allow Service Catalog to receive updates on your organization in order to sync your shares with the current structure. This API can only be called by the master account in the organization.

By calling this API Service Catalog will make a call to organizations:EnableAWSServiceAccess on your behalf so that your shares can be in sync with any changes in your AWS Organizations structure.

" + "documentation":"

Enable portfolio sharing feature through AWS Organizations. This API will allow Service Catalog to receive updates on your organization in order to sync your shares with the current structure. This API can only be called by the master account in the organization.

By calling this API Service Catalog will make a call to organizations:EnableAWSServiceAccess on your behalf so that your shares can be in sync with any changes in your AWS Organizations structure.

Note that a delegated administrator is not authorized to invoke EnableAWSOrganizationsAccess.

" }, "ExecuteProvisionedProductPlan":{ "name":"ExecuteProvisionedProductPlan", @@ -732,7 +734,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"OperationNotSupportedException"} ], - "documentation":"

Get the Access Status for AWS Organization portfolio share feature. This API can only be called by the master account in the organization.

" + "documentation":"

Get the Access Status for AWS Organization portfolio share feature. This API can only be called by the master account in the organization or by a delegated admin.

" }, "ListAcceptedPortfolioShares":{ "name":"ListAcceptedPortfolioShares", @@ -803,7 +805,7 @@ {"shape":"InvalidParametersException"}, {"shape":"OperationNotSupportedException"} ], - "documentation":"

Lists the organization nodes that have access to the specified portfolio. This API can only be called by the master account in the organization.

" + "documentation":"

Lists the organization nodes that have access to the specified portfolio. This API can only be called by the master account in the organization or by a delegated admin.

If a delegated admin is de-registered, they can no longer perform this operation.

" }, "ListPortfolioAccess":{ "name":"ListPortfolioAccess", @@ -817,7 +819,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParametersException"} ], - "documentation":"

Lists the account IDs that have access to the specified portfolio.

" + "documentation":"

Lists the account IDs that have access to the specified portfolio.

A delegated admin can list the accounts that have access to the shared portfolio. Note that if a delegated admin is de-registered, they can no longer perform this operation.

" }, "ListPortfolios":{ "name":"ListPortfolios", @@ -1988,7 +1990,7 @@ }, "Definition":{ "shape":"ServiceActionDefinitionMap", - "documentation":"

The self-service action definition. Can be one of the following:

Name

The name of the AWS Systems Manager Document. For example, AWS-RestartEC2Instance.

Version

The AWS Systems Manager automation document version. For example, \"Version\": \"1\"

AssumeRole

The Amazon Resource Name (ARN) of the role that performs the self-service actions on your behalf. For example, \"AssumeRole\": \"arn:aws:iam::12345678910:role/ActionRole\".

To reuse the provisioned product launch role, set to \"AssumeRole\": \"LAUNCH_ROLE\".

Parameters

The list of parameters in JSON format.

For example: [{\\\"Name\\\":\\\"InstanceId\\\",\\\"Type\\\":\\\"TARGET\\\"}] or [{\\\"Name\\\":\\\"InstanceId\\\",\\\"Type\\\":\\\"TEXT_VALUE\\\"}].

" + "documentation":"

The self-service action definition. Can be one of the following:

Name

The name of the AWS Systems Manager document (SSM document). For example, AWS-RestartEC2Instance.

If you are using a shared SSM document, you must provide the ARN instead of the name.

Version

The AWS Systems Manager automation document version. For example, \"Version\": \"1\"

AssumeRole

The Amazon Resource Name (ARN) of the role that performs the self-service actions on your behalf. For example, \"AssumeRole\": \"arn:aws:iam::12345678910:role/ActionRole\".

To reuse the provisioned product launch role, set to \"AssumeRole\": \"LAUNCH_ROLE\".

Parameters

The list of parameters in JSON format.

For example: [{\\\"Name\\\":\\\"InstanceId\\\",\\\"Type\\\":\\\"TARGET\\\"}] or [{\\\"Name\\\":\\\"InstanceId\\\",\\\"Type\\\":\\\"TEXT_VALUE\\\"}].

" }, "Description":{ "shape":"ServiceActionDescription", @@ -2348,7 +2350,6 @@ }, "DescribeProductAsAdminInput":{ "type":"structure", - "required":["Id"], "members":{ "AcceptLanguage":{ "shape":"AcceptLanguage", @@ -2357,6 +2358,10 @@ "Id":{ "shape":"Id", "documentation":"

The product identifier.

" + }, + "Name":{ + "shape":"ProductViewName", + "documentation":"

The product name.

" } } }, @@ -2387,7 +2392,6 @@ }, "DescribeProductInput":{ "type":"structure", - "required":["Id"], "members":{ "AcceptLanguage":{ "shape":"AcceptLanguage", @@ -2396,6 +2400,10 @@ "Id":{ "shape":"Id", "documentation":"

The product identifier.

" + }, + "Name":{ + "shape":"ProductViewName", + "documentation":"

The product name.

" } } }, @@ -2413,6 +2421,10 @@ "Budgets":{ "shape":"Budgets", "documentation":"

Information about the associated budgets.

" + }, + "LaunchPaths":{ + "shape":"LaunchPaths", + "documentation":"

Information about the associated launch paths.

" } } }, @@ -2511,10 +2523,6 @@ }, "DescribeProvisioningArtifactInput":{ "type":"structure", - "required":[ - "ProvisioningArtifactId", - "ProductId" - ], "members":{ "AcceptLanguage":{ "shape":"AcceptLanguage", @@ -2528,6 +2536,14 @@ "shape":"Id", "documentation":"

The product identifier.

" }, + "ProvisioningArtifactName":{ + "shape":"ProvisioningArtifactName", + "documentation":"

The provisioning artifact name.

" + }, + "ProductName":{ + "shape":"ProductViewName", + "documentation":"

The product name.

" + }, "Verbose":{ "shape":"Verbose", "documentation":"

Indicates whether a verbose level of detail is enabled.

" @@ -3075,6 +3091,20 @@ "exception":true }, "LastRequestId":{"type":"string"}, + "LaunchPath":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"Id", + "documentation":"

The identifier of the launch path.

" + }, + "Name":{ + "shape":"PortfolioName", + "documentation":"

The name of the launch path.

" + } + }, + "documentation":"

A launch path object.

" + }, "LaunchPathSummaries":{ "type":"list", "member":{"shape":"LaunchPathSummary"} @@ -3101,6 +3131,10 @@ }, "documentation":"

Summary information about a product path for a user.

" }, + "LaunchPaths":{ + "type":"list", + "member":{"shape":"LaunchPath"} + }, "LimitExceededException":{ "type":"structure", "members":{ @@ -4501,7 +4535,10 @@ }, "ProvisioningArtifactActive":{"type":"boolean"}, "ProvisioningArtifactCreatedTime":{"type":"timestamp"}, - "ProvisioningArtifactDescription":{"type":"string"}, + "ProvisioningArtifactDescription":{ + "type":"string", + "max":8192 + }, "ProvisioningArtifactDetail":{ "type":"structure", "members":{ @@ -4556,7 +4593,10 @@ }, "ProvisioningArtifactInfoKey":{"type":"string"}, "ProvisioningArtifactInfoValue":{"type":"string"}, - "ProvisioningArtifactName":{"type":"string"}, + "ProvisioningArtifactName":{ + "type":"string", + "max":8192 + }, "ProvisioningArtifactParameter":{ "type":"structure", "members":{ @@ -5912,7 +5952,7 @@ }, "ProvisionedProductProperties":{ "shape":"ProvisionedProductProperties", - "documentation":"

A map that contains the provisioned product properties to be updated.

The OWNER key only accepts user ARNs. The owner is the user that is allowed to see, update, terminate, and execute service actions in the provisioned product.

The administrator can change the owner of a provisioned product to another IAM user within the same account. Both end user owners and administrators can see ownership history of the provisioned product using the ListRecordHistory API. The new owner can describe all past records for the provisioned product using the DescribeRecord API. The previous owner can no longer use DescribeRecord, but can still see the product's history from when he was an owner using ListRecordHistory.

If a provisioned product ownership is assigned to an end user, they can see and perform any action through the API or Service Catalog console such as update, terminate, and execute service actions. If an end user provisions a product and the owner is updated to someone else, they will no longer be able to see or perform any actions through API or the Service Catalog console on that provisioned product.

" + "documentation":"

A map that contains the provisioned product properties to be updated.

The OWNER key accepts user ARNs and role ARNs. The owner is the user that is allowed to see, update, terminate, and execute service actions in the provisioned product.

The administrator can change the owner of a provisioned product to another IAM user within the same account. Both end user owners and administrators can see ownership history of the provisioned product using the ListRecordHistory API. The new owner can describe all past records for the provisioned product using the DescribeRecord API. The previous owner can no longer use DescribeRecord, but can still see the product's history from when he was an owner using ListRecordHistory.

If a provisioned product ownership is assigned to an end user, they can see and perform any action through the API or Service Catalog console such as update, terminate, and execute service actions. If an end user provisions a product and the owner is updated to someone else, they will no longer be able to see or perform any actions through API or the Service Catalog console on that provisioned product.

" }, "IdempotencyToken":{ "shape":"IdempotencyToken", diff --git a/botocore/data/servicediscovery/2017-03-14/service-2.json b/botocore/data/servicediscovery/2017-03-14/service-2.json index 26fdf271..21f6d107 100644 --- a/botocore/data/servicediscovery/2017-03-14/service-2.json +++ b/botocore/data/servicediscovery/2017-03-14/service-2.json @@ -25,7 +25,8 @@ {"shape":"InvalidInput"}, {"shape":"NamespaceAlreadyExists"}, {"shape":"ResourceLimitExceeded"}, - {"shape":"DuplicateRequest"} + {"shape":"DuplicateRequest"}, + {"shape":"TooManyTagsException"} ], "documentation":"

Creates an HTTP namespace. Service instances that you register using an HTTP namespace can be discovered using a DiscoverInstances request but can't be discovered using DNS.

For the current limit on the number of namespaces that you can create using the same AWS account, see AWS Cloud Map Limits in the AWS Cloud Map Developer Guide.

" }, @@ -41,7 +42,8 @@ {"shape":"InvalidInput"}, {"shape":"NamespaceAlreadyExists"}, {"shape":"ResourceLimitExceeded"}, - {"shape":"DuplicateRequest"} + {"shape":"DuplicateRequest"}, + {"shape":"TooManyTagsException"} ], "documentation":"

Creates a private namespace based on DNS, which will be visible only inside a specified Amazon VPC. The namespace defines your service naming scheme. For example, if you name your namespace example.com and name your service backend, the resulting DNS name for the service will be backend.example.com. For the current limit on the number of namespaces that you can create using the same AWS account, see AWS Cloud Map Limits in the AWS Cloud Map Developer Guide.

" }, @@ -57,7 +59,8 @@ {"shape":"InvalidInput"}, {"shape":"NamespaceAlreadyExists"}, {"shape":"ResourceLimitExceeded"}, - {"shape":"DuplicateRequest"} + {"shape":"DuplicateRequest"}, + {"shape":"TooManyTagsException"} ], "documentation":"

Creates a public namespace based on DNS, which will be visible on the internet. The namespace defines your service naming scheme. For example, if you name your namespace example.com and name your service backend, the resulting DNS name for the service will be backend.example.com. For the current limit on the number of namespaces that you can create using the same AWS account, see AWS Cloud Map Limits in the AWS Cloud Map Developer Guide.

" }, @@ -73,7 +76,8 @@ {"shape":"InvalidInput"}, {"shape":"ResourceLimitExceeded"}, {"shape":"NamespaceNotFound"}, - {"shape":"ServiceAlreadyExists"} + {"shape":"ServiceAlreadyExists"}, + {"shape":"TooManyTagsException"} ], "documentation":"

Creates a service, which defines the configuration for the following entities:

After you create the service, you can submit a RegisterInstance request, and AWS Cloud Map uses the values in the configuration to create the specified entities.

For the current limit on the number of instances that you can register using the same namespace and using the same service, see AWS Cloud Map Limits in the AWS Cloud Map Developer Guide.

" }, @@ -136,7 +140,8 @@ "errors":[ {"shape":"ServiceNotFound"}, {"shape":"NamespaceNotFound"}, - {"shape":"InvalidInput"} + {"shape":"InvalidInput"}, + {"shape":"RequestLimitExceeded"} ], "documentation":"

Discovers registered instances for a specified namespace and service. You can use DiscoverInstances to discover instances for any type of namespace. For public and private DNS namespaces, you can also use DNS queries to discover instances.

", "endpoint":{"hostPrefix":"data-"} @@ -266,6 +271,20 @@ ], "documentation":"

Lists summary information for all the services that are associated with one or more specified namespaces.

" }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidInput"} + ], + "documentation":"

Lists tags for the specified resource.

" + }, "RegisterInstance":{ "name":"RegisterInstance", "http":{ @@ -283,6 +302,35 @@ ], "documentation":"

Creates or updates one or more records and, optionally, creates a health check based on the settings in a specified service. When you submit a RegisterInstance request, the following occurs:

One RegisterInstance request must complete before you can submit another request and specify the same service ID and instance ID.

For more information, see CreateService.

When AWS Cloud Map receives a DNS query for the specified DNS name, it returns the applicable value:

For the current limit on the number of instances that you can register using the same namespace and using the same service, see AWS Cloud Map Limits in the AWS Cloud Map Developer Guide.

" }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyTagsException"}, + {"shape":"InvalidInput"} + ], + "documentation":"

Adds one or more tags to the specified resource.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidInput"} + ], + "documentation":"

Removes one or more tags from the specified resource.

" + }, "UpdateInstanceCustomHealthStatus":{ "name":"UpdateInstanceCustomHealthStatus", "http":{ @@ -315,17 +363,24 @@ } }, "shapes":{ + "AmazonResourceName":{ + "type":"string", + "max":1011, + "min":1 + }, "Arn":{ "type":"string", "max":255 }, "AttrKey":{ "type":"string", - "max":255 + "max":255, + "pattern":"^[a-zA-Z0-9!-~]+$" }, "AttrValue":{ "type":"string", - "max":1024 + "max":1024, + "pattern":"^([a-zA-Z0-9!-~][ \\ta-zA-Z0-9!-~]*){0,1}[a-zA-Z0-9!-~]{0,1}$" }, "Attributes":{ "type":"map", @@ -349,6 +404,10 @@ "Description":{ "shape":"ResourceDescription", "documentation":"

A description for the namespace.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags to add to the namespace. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" } } }, @@ -384,6 +443,10 @@ "Vpc":{ "shape":"ResourceId", "documentation":"

The ID of the Amazon VPC that you want to associate the namespace with.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags to add to the namespace. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" } } }, @@ -412,6 +475,10 @@ "Description":{ "shape":"ResourceDescription", "documentation":"

A description for the namespace.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags to add to the namespace. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" } } }, @@ -456,6 +523,10 @@ "HealthCheckCustomConfig":{ "shape":"HealthCheckCustomConfig", "documentation":"

A complex type that contains information about an optional custom health check.

If you specify a health check configuration, you can specify either HealthCheckCustomConfig or HealthCheckConfig but not both.

You can't add, update, or delete a HealthCheckCustomConfig configuration from an existing service.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags to add to the service. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" } } }, @@ -559,7 +630,7 @@ "documentation":"

The name of the service that you specified when you registered the instance.

" }, "MaxResults":{ - "shape":"MaxResults", + "shape":"DiscoverMaxResults", "documentation":"

The maximum number of instances that you want AWS Cloud Map to return in the response to a DiscoverInstances request. If you don't specify a value for MaxResults, AWS Cloud Map returns up to 100 instances.

" }, "QueryParameters":{ @@ -581,6 +652,11 @@ } } }, + "DiscoverMaxResults":{ + "type":"integer", + "max":1000, + "min":1 + }, "DnsConfig":{ "type":"structure", "required":["DnsRecords"], @@ -1075,6 +1151,25 @@ } } }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceARN"], + "members":{ + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

The Amazon Resource Name (ARN) of the resource that you want to retrieve tags for.

" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagList", + "documentation":"

The tags that are assigned to the resource.

" + } + } + }, "MaxResults":{ "type":"integer", "max":100, @@ -1419,7 +1514,7 @@ }, "Attributes":{ "shape":"Attributes", - "documentation":"

A string map that contains the following information for the service that you specify in ServiceId:

Supported attribute keys include the following:

AWS_ALIAS_DNS_NAME

If you want AWS Cloud Map to create an Amazon Route 53 alias record that routes traffic to an Elastic Load Balancing load balancer, specify the DNS name that is associated with the load balancer. For information about how to get the DNS name, see \"DNSName\" in the topic AliasTarget in the Route 53 API Reference.

Note the following:

AWS_INIT_HEALTH_STATUS

If the service configuration includes HealthCheckCustomConfig, you can optionally use AWS_INIT_HEALTH_STATUS to specify the initial status of the custom health check, HEALTHY or UNHEALTHY. If you don't specify a value for AWS_INIT_HEALTH_STATUS, the initial status is HEALTHY.

AWS_INSTANCE_CNAME

If the service configuration includes a CNAME record, the domain name that you want Route 53 to return in response to DNS queries, for example, example.com.

This value is required if the service specified by ServiceId includes settings for an CNAME record.

AWS_INSTANCE_IPV4

If the service configuration includes an A record, the IPv4 address that you want Route 53 to return in response to DNS queries, for example, 192.0.2.44.

This value is required if the service specified by ServiceId includes settings for an A record. If the service includes settings for an SRV record, you must specify a value for AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both.

AWS_INSTANCE_IPV6

If the service configuration includes an AAAA record, the IPv6 address that you want Route 53 to return in response to DNS queries, for example, 2001:0db8:85a3:0000:0000:abcd:0001:2345.

This value is required if the service specified by ServiceId includes settings for an AAAA record. If the service includes settings for an SRV record, you must specify a value for AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both.

AWS_INSTANCE_PORT

If the service includes an SRV record, the value that you want Route 53 to return for the port.

If the service includes HealthCheckConfig, the port on the endpoint that you want Route 53 to send requests to.

This value is required if you specified settings for an SRV record or a Route 53 health check when you created the service.

Custom attributes

You can add up to 30 custom attributes. For each key-value pair, the maximum length of the attribute name is 255 characters, and the maximum length of the attribute value is 1,024 characters.

" + "documentation":"

A string map that contains the following information for the service that you specify in ServiceId:

Supported attribute keys include the following:

AWS_ALIAS_DNS_NAME

If you want AWS Cloud Map to create an Amazon Route 53 alias record that routes traffic to an Elastic Load Balancing load balancer, specify the DNS name that is associated with the load balancer. For information about how to get the DNS name, see \"DNSName\" in the topic AliasTarget in the Route 53 API Reference.

Note the following:

  • The configuration for the service that is specified by ServiceId must include settings for an A record, an AAAA record, or both.

  • In the service that is specified by ServiceId, the value of RoutingPolicy must be WEIGHTED.

  • If the service that is specified by ServiceId includes HealthCheckConfig settings, AWS Cloud Map will create the Route 53 health check, but it won't associate the health check with the alias record.

  • Auto naming currently doesn't support creating alias records that route traffic to AWS resources other than ELB load balancers.

  • If you specify a value for AWS_ALIAS_DNS_NAME, don't specify values for any of the AWS_INSTANCE attributes.

AWS_INIT_HEALTH_STATUS

If the service configuration includes HealthCheckCustomConfig, you can optionally use AWS_INIT_HEALTH_STATUS to specify the initial status of the custom health check, HEALTHY or UNHEALTHY. If you don't specify a value for AWS_INIT_HEALTH_STATUS, the initial status is HEALTHY.

AWS_INSTANCE_CNAME

If the service configuration includes a CNAME record, the domain name that you want Route 53 to return in response to DNS queries, for example, example.com.

This value is required if the service specified by ServiceId includes settings for an CNAME record.

AWS_INSTANCE_IPV4

If the service configuration includes an A record, the IPv4 address that you want Route 53 to return in response to DNS queries, for example, 192.0.2.44.

This value is required if the service specified by ServiceId includes settings for an A record. If the service includes settings for an SRV record, you must specify a value for AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both.

AWS_INSTANCE_IPV6

If the service configuration includes an AAAA record, the IPv6 address that you want Route 53 to return in response to DNS queries, for example, 2001:0db8:85a3:0000:0000:abcd:0001:2345.

This value is required if the service specified by ServiceId includes settings for an AAAA record. If the service includes settings for an SRV record, you must specify a value for AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both.

AWS_INSTANCE_PORT

If the service includes an SRV record, the value that you want Route 53 to return for the port.

If the service includes HealthCheckConfig, the port on the endpoint that you want Route 53 to send requests to.

This value is required if you specified settings for an SRV record or a Route 53 health check when you created the service.

Custom attributes

You can add up to 30 custom attributes. For each key-value pair, the maximum length of the attribute name is 255 characters, and the maximum length of the attribute value is 1,024 characters. Total size of all provided attributes (sum of all keys and values) must not exceed 5,000 characters.

" } } }, @@ -1432,6 +1527,14 @@ } } }, + "RequestLimitExceeded":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The operation can't be completed because you've reached the limit on the number of requests.

", + "exception":true + }, "ResourceCount":{"type":"integer"}, "ResourceDescription":{ "type":"string", @@ -1457,6 +1560,14 @@ "documentation":"

The resource can't be created because you've reached the limit on the number of resources.

", "exception":true }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The operation can't be completed because the resource was not found.

", + "exception":true + }, "ResourcePath":{ "type":"string", "max":255 @@ -1536,7 +1647,6 @@ }, "ServiceChange":{ "type":"structure", - "required":["DnsConfig"], "members":{ "Description":{ "shape":"ResourceDescription", @@ -1629,7 +1739,103 @@ }, "documentation":"

A complex type that contains information about a specified service.

" }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

The key identifier, or name, of the tag.

" + }, + "Value":{ + "shape":"TagValue", + "documentation":"

The string value that's associated with the key of the tag. You can set the value of a tag to an empty string, but you can't set the value of a tag to null.

" + } + }, + "documentation":"

A custom key-value pair associated with a resource.

" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceARN", + "Tags" + ], + "members":{ + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

The Amazon Resource Name (ARN) of the resource that you want to retrieve tags for.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags to add to the specified resource. Specifying the tag key is required. You can set the value of a tag to an empty string, but you can't set the value of a tag to null.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, "Timestamp":{"type":"timestamp"}, + "TooManyTagsException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"}, + "ResourceName":{ + "shape":"AmazonResourceName", + "documentation":"

The name of the resource.

" + } + }, + "documentation":"

The list of tags on the resource is over the limit. The maximum number of tags that can be applied to a resource is 50.

", + "exception":true + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceARN", + "TagKeys" + ], + "members":{ + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

The Amazon Resource Name (ARN) of the resource that you want to retrieve tags for.

" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

The tag keys to remove from the specified resource.

" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateInstanceCustomHealthStatusRequest":{ "type":"structure", "required":[ diff --git a/botocore/data/shield/2016-06-02/service-2.json b/botocore/data/shield/2016-06-02/service-2.json index c9226f46..a37e6df3 100644 --- a/botocore/data/shield/2016-06-02/service-2.json +++ b/botocore/data/shield/2016-06-02/service-2.json @@ -31,7 +31,7 @@ {"shape":"OptimisticLockException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Authorizes the DDoS Response team (DRT) to access the specified Amazon S3 bucket containing your AWS WAF logs. You can associate up to 10 Amazon S3 buckets with your subscription.

To use the services of the DRT and make an AssociateDRTLogBucket request, you must be subscribed to the Business Support plan or the Enterprise Support plan.

" + "documentation":"

Authorizes the DDoS Response Team (DRT) to access the specified Amazon S3 bucket containing your AWS WAF logs. You can associate up to 10 Amazon S3 buckets with your subscription.

To use the services of the DRT and make an AssociateDRTLogBucket request, you must be subscribed to the Business Support plan or the Enterprise Support plan.

" }, "AssociateDRTRole":{ "name":"AssociateDRTRole", @@ -49,7 +49,7 @@ {"shape":"OptimisticLockException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Authorizes the DDoS Response team (DRT), using the specified role, to access your AWS account to assist with DDoS attack mitigation during potential attacks. This enables the DRT to inspect your AWS WAF configuration and create or update AWS WAF rules and web ACLs.

You can associate only one RoleArn with your subscription. If you submit an AssociateDRTRole request for an account that already has an associated role, the new RoleArn will replace the existing RoleArn.

Prior to making the AssociateDRTRole request, you must attach the AWSShieldDRTAccessPolicy managed policy to the role you will specify in the request. For more information see Attaching and Detaching IAM Policies. The role must also trust the service principal drt.shield.amazonaws.com. For more information, see IAM JSON Policy Elements: Principal.

The DRT will have access only to your AWS WAF and Shield resources. By submitting this request, you authorize the DRT to inspect your AWS WAF and Shield configuration and create and update AWS WAF rules and web ACLs on your behalf. The DRT takes these actions only if explicitly authorized by you.

You must have the iam:PassRole permission to make an AssociateDRTRole request. For more information, see Granting a User Permissions to Pass a Role to an AWS Service.

To use the services of the DRT and make an AssociateDRTRole request, you must be subscribed to the Business Support plan or the Enterprise Support plan.

" + "documentation":"

Authorizes the DDoS Response Team (DRT), using the specified role, to access your AWS account to assist with DDoS attack mitigation during potential attacks. This enables the DRT to inspect your AWS WAF configuration and create or update AWS WAF rules and web ACLs.

You can associate only one RoleArn with your subscription. If you submit an AssociateDRTRole request for an account that already has an associated role, the new RoleArn will replace the existing RoleArn.

Prior to making the AssociateDRTRole request, you must attach the AWSShieldDRTAccessPolicy managed policy to the role you will specify in the request. For more information see Attaching and Detaching IAM Policies. The role must also trust the service principal drt.shield.amazonaws.com. For more information, see IAM JSON Policy Elements: Principal.

The DRT will have access only to your AWS WAF and Shield resources. By submitting this request, you authorize the DRT to inspect your AWS WAF and Shield configuration and create and update AWS WAF rules and web ACLs on your behalf. The DRT takes these actions only if explicitly authorized by you.

You must have the iam:PassRole permission to make an AssociateDRTRole request. For more information, see Granting a User Permissions to Pass a Role to an AWS Service.

To use the services of the DRT and make an AssociateDRTRole request, you must be subscribed to the Business Support plan or the Enterprise Support plan.

" }, "AssociateHealthCheck":{ "name":"AssociateHealthCheck", @@ -68,6 +68,23 @@ ], "documentation":"

Adds health-based detection to the Shield Advanced protection for a resource. Shield Advanced health-based detection uses the health of your AWS resource to improve responsiveness and accuracy in attack detection and mitigation.

You define the health check in Route 53 and then associate it with your Shield Advanced protection. For more information, see Shield Advanced Health-Based Detection in the AWS WAF and AWS Shield Developer Guide.

" }, + "AssociateProactiveEngagementDetails":{ + "name":"AssociateProactiveEngagementDetails", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateProactiveEngagementDetailsRequest"}, + "output":{"shape":"AssociateProactiveEngagementDetailsResponse"}, + "errors":[ + {"shape":"InternalErrorException"}, + {"shape":"InvalidOperationException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"OptimisticLockException"} + ], + "documentation":"

Initializes proactive engagement and sets the list of contacts for the DDoS Response Team (DRT) to use. You must provide at least one phone number in the emergency contact list.

After you have initialized proactive engagement using this call, to disable or enable proactive engagement, use the calls DisableProactiveEngagement and EnableProactiveEngagement.

This call defines the list of email addresses and phone numbers that the DDoS Response Team (DRT) can use to contact you for escalations to the DRT and to initiate proactive customer support.

The contacts that you provide in the request replace any contacts that were already defined. If you already have contacts defined and want to use them, retrieve the list using DescribeEmergencyContactSettings and then provide it to this call.

" + }, "CreateProtection":{ "name":"CreateProtection", "http":{ @@ -99,7 +116,7 @@ {"shape":"InternalErrorException"}, {"shape":"ResourceAlreadyExistsException"} ], - "documentation":"

Activates AWS Shield Advanced for an account.

As part of this request you can specify EmergencySettings that automaticaly grant the DDoS response team (DRT) needed permissions to assist you during a suspected DDoS attack. For more information see Authorize the DDoS Response Team to Create Rules and Web ACLs on Your Behalf.

To use the services of the DRT, you must be subscribed to the Business Support plan or the Enterprise Support plan.

When you initally create a subscription, your subscription is set to be automatically renewed at the end of the existing subscription period. You can change this by submitting an UpdateSubscription request.

" + "documentation":"

Activates AWS Shield Advanced for an account.

When you initally create a subscription, your subscription is set to be automatically renewed at the end of the existing subscription period. You can change this by submitting an UpdateSubscription request.

" }, "DeleteProtection":{ "name":"DeleteProtection", @@ -158,7 +175,7 @@ {"shape":"InternalErrorException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Returns the current role and list of Amazon S3 log buckets used by the DDoS Response team (DRT) to access your AWS account while assisting with attack mitigation.

" + "documentation":"

Returns the current role and list of Amazon S3 log buckets used by the DDoS Response Team (DRT) to access your AWS account while assisting with attack mitigation.

" }, "DescribeEmergencyContactSettings":{ "name":"DescribeEmergencyContactSettings", @@ -172,7 +189,7 @@ {"shape":"InternalErrorException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Lists the email addresses that the DRT can use to contact you during a suspected attack.

" + "documentation":"

A list of email addresses and phone numbers that the DDoS Response Team (DRT) can use to contact you if you have proactive engagement enabled, for escalations to the DRT and to initiate proactive customer support.

" }, "DescribeProtection":{ "name":"DescribeProtection", @@ -203,6 +220,23 @@ ], "documentation":"

Provides details about the AWS Shield Advanced subscription for an account.

" }, + "DisableProactiveEngagement":{ + "name":"DisableProactiveEngagement", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableProactiveEngagementRequest"}, + "output":{"shape":"DisableProactiveEngagementResponse"}, + "errors":[ + {"shape":"InternalErrorException"}, + {"shape":"InvalidOperationException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"OptimisticLockException"} + ], + "documentation":"

Removes authorization from the DDoS Response Team (DRT) to notify contacts about escalations to the DRT and to initiate proactive customer support.

" + }, "DisassociateDRTLogBucket":{ "name":"DisassociateDRTLogBucket", "http":{ @@ -219,7 +253,7 @@ {"shape":"OptimisticLockException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Removes the DDoS Response team's (DRT) access to the specified Amazon S3 bucket containing your AWS WAF logs.

To make a DisassociateDRTLogBucket request, you must be subscribed to the Business Support plan or the Enterprise Support plan. However, if you are not subscribed to one of these support plans, but had been previously and had granted the DRT access to your account, you can submit a DisassociateDRTLogBucket request to remove this access.

" + "documentation":"

Removes the DDoS Response Team's (DRT) access to the specified Amazon S3 bucket containing your AWS WAF logs.

To make a DisassociateDRTLogBucket request, you must be subscribed to the Business Support plan or the Enterprise Support plan. However, if you are not subscribed to one of these support plans, but had been previously and had granted the DRT access to your account, you can submit a DisassociateDRTLogBucket request to remove this access.

" }, "DisassociateDRTRole":{ "name":"DisassociateDRTRole", @@ -235,7 +269,7 @@ {"shape":"OptimisticLockException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Removes the DDoS Response team's (DRT) access to your AWS account.

To make a DisassociateDRTRole request, you must be subscribed to the Business Support plan or the Enterprise Support plan. However, if you are not subscribed to one of these support plans, but had been previously and had granted the DRT access to your account, you can submit a DisassociateDRTRole request to remove this access.

" + "documentation":"

Removes the DDoS Response Team's (DRT) access to your AWS account.

To make a DisassociateDRTRole request, you must be subscribed to the Business Support plan or the Enterprise Support plan. However, if you are not subscribed to one of these support plans, but had been previously and had granted the DRT access to your account, you can submit a DisassociateDRTRole request to remove this access.

" }, "DisassociateHealthCheck":{ "name":"DisassociateHealthCheck", @@ -253,6 +287,23 @@ ], "documentation":"

Removes health-based detection from the Shield Advanced protection for a resource. Shield Advanced health-based detection uses the health of your AWS resource to improve responsiveness and accuracy in attack detection and mitigation.

You define the health check in Route 53 and then associate or disassociate it with your Shield Advanced protection. For more information, see Shield Advanced Health-Based Detection in the AWS WAF and AWS Shield Developer Guide.

" }, + "EnableProactiveEngagement":{ + "name":"EnableProactiveEngagement", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableProactiveEngagementRequest"}, + "output":{"shape":"EnableProactiveEngagementResponse"}, + "errors":[ + {"shape":"InternalErrorException"}, + {"shape":"InvalidOperationException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"OptimisticLockException"} + ], + "documentation":"

Authorizes the DDoS Response Team (DRT) to use email and phone to notify contacts about escalations to the DRT and to initiate proactive customer support.

" + }, "GetSubscriptionState":{ "name":"GetSubscriptionState", "http":{ @@ -310,7 +361,7 @@ {"shape":"OptimisticLockException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Updates the details of the list of email addresses that the DRT can use to contact you during a suspected attack.

" + "documentation":"

Updates the details of the list of email addresses and phone numbers that the DDoS Response Team (DRT) can use to contact you if you have proactive engagement enabled, for escalations to the DRT and to initiate proactive customer support.

" }, "UpdateSubscription":{ "name":"UpdateSubscription", @@ -344,7 +395,7 @@ "members":{ "message":{"shape":"errorMessage"} }, - "documentation":"

In order to grant the necessary access to the DDoS Response Team, the user submitting the request must have the iam:PassRole permission. This error indicates the user did not have the appropriate permissions. For more information, see Granting a User Permissions to Pass a Role to an AWS Service.

", + "documentation":"

In order to grant the necessary access to the DDoS Response Team (DRT), the user submitting the request must have the iam:PassRole permission. This error indicates the user did not have the appropriate permissions. For more information, see Granting a User Permissions to Pass a Role to an AWS Service.

", "exception":true }, "AssociateDRTLogBucketRequest":{ @@ -399,6 +450,21 @@ "members":{ } }, + "AssociateProactiveEngagementDetailsRequest":{ + "type":"structure", + "required":["EmergencyContactList"], + "members":{ + "EmergencyContactList":{ + "shape":"EmergencyContactList", + "documentation":"

A list of email addresses and phone numbers that the DDoS Response Team (DRT) can use to contact you for escalations to the DRT and to initiate proactive customer support.

To enable proactive engagement, the contact list must include at least one phone number.

The contacts that you provide here replace any contacts that were already defined. If you already have contacts defined and want to use them, retrieve the list using DescribeEmergencyContactSettings and then provide it here.

" + } + } + }, + "AssociateProactiveEngagementDetailsResponse":{ + "type":"structure", + "members":{ + } + }, "AttackDetail":{ "type":"structure", "members":{ @@ -546,6 +612,12 @@ "DISABLED" ] }, + "ContactNotes":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^[\\w\\s\\.\\-,:/()+@]*$" + }, "Contributor":{ "type":"structure", "members":{ @@ -670,7 +742,7 @@ "members":{ "EmergencyContactList":{ "shape":"EmergencyContactList", - "documentation":"

A list of email addresses that the DRT can use to contact you during a suspected attack.

" + "documentation":"

A list of email addresses and phone numbers that the DDoS Response Team (DRT) can use to contact you if you have proactive engagement enabled, for escalations to the DRT and to initiate proactive customer support.

" } } }, @@ -710,6 +782,16 @@ } } }, + "DisableProactiveEngagementRequest":{ + "type":"structure", + "members":{ + } + }, + "DisableProactiveEngagementResponse":{ + "type":"structure", + "members":{ + } + }, "DisassociateDRTLogBucketRequest":{ "type":"structure", "required":["LogBucket"], @@ -774,10 +856,18 @@ "members":{ "EmailAddress":{ "shape":"EmailAddress", - "documentation":"

An email address that the DRT can use to contact you during a suspected attack.

" + "documentation":"

The email address for the contact.

" + }, + "PhoneNumber":{ + "shape":"PhoneNumber", + "documentation":"

The phone number for the contact.

" + }, + "ContactNotes":{ + "shape":"ContactNotes", + "documentation":"

Additional notes regarding the contact.

" } }, - "documentation":"

Contact information that the DRT can use to contact you during a suspected attack.

" + "documentation":"

Contact information that the DRT can use to contact you if you have proactive engagement enabled, for escalations to the DRT and to initiate proactive customer support.

" }, "EmergencyContactList":{ "type":"list", @@ -785,6 +875,16 @@ "max":10, "min":0 }, + "EnableProactiveEngagementRequest":{ + "type":"structure", + "members":{ + } + }, + "EnableProactiveEngagementResponse":{ + "type":"structure", + "members":{ + } + }, "GetSubscriptionStateRequest":{ "type":"structure", "members":{ @@ -1001,9 +1101,23 @@ "members":{ "message":{"shape":"errorMessage"} }, - "documentation":"

Exception that indicates that the protection state has been modified by another client. You can retry the request.

", + "documentation":"

Exception that indicates that the resource state has been modified by another client. Retrieve the resource and then retry your request.

", "exception":true }, + "PhoneNumber":{ + "type":"string", + "max":16, + "min":1, + "pattern":"^\\+[1-9]\\d{1,14}$" + }, + "ProactiveEngagementStatus":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED", + "PENDING" + ] + }, "Protection":{ "type":"structure", "members":{ @@ -1130,6 +1244,10 @@ "Limits":{ "shape":"Limits", "documentation":"

Specifies how many protections of a given type you can create.

" + }, + "ProactiveEngagementStatus":{ + "shape":"ProactiveEngagementStatus", + "documentation":"

If ENABLED, the DDoS Response Team (DRT) will use email and phone to notify contacts about escalations to the DRT and to initiate proactive customer support.

If PENDING, you have requested proactive engagement and the request is pending. The status changes to ENABLED when your request is fully processed.

If DISABLED, the DRT will not proactively notify contacts about escalations or to initiate proactive customer support.

" } }, "documentation":"

Information about the AWS Shield Advanced subscription for an account.

" @@ -1233,7 +1351,7 @@ "members":{ "EmergencyContactList":{ "shape":"EmergencyContactList", - "documentation":"

A list of email addresses that the DRT can use to contact you during a suspected attack.

" + "documentation":"

A list of email addresses and phone numbers that the DDoS Response Team (DRT) can use to contact you if you have proactive engagement enabled, for escalations to the DRT and to initiate proactive customer support.

If you have proactive engagement enabled, the contact list must include at least one phone number.

" } } }, diff --git a/botocore/data/snowball/2016-06-30/service-2.json b/botocore/data/snowball/2016-06-30/service-2.json index 5b68ce4e..8f1fedb9 100644 --- a/botocore/data/snowball/2016-06-30/service-2.json +++ b/botocore/data/snowball/2016-06-30/service-2.json @@ -464,7 +464,7 @@ }, "SnowballType":{ "shape":"SnowballType", - "documentation":"

The type of AWS Snowball device to use for this cluster. Currently, the only supported device type for cluster jobs is EDGE.

For more information, see Snowball Edge Device Options in the Snowball Edge Developer Guide.

" + "documentation":"

The type of AWS Snowball device to use for this cluster.

For cluster jobs, AWS Snowball currently supports only the EDGE device type.

" }, "CreationDate":{ "shape":"Timestamp", @@ -580,11 +580,11 @@ }, "SnowballType":{ "shape":"SnowballType", - "documentation":"

The type of AWS Snowball device to use for this cluster. Currently, the only supported device type for cluster jobs is EDGE.

For more information, see Snowball Edge Device Options in the Snowball Edge Developer Guide.

" + "documentation":"

The type of AWS Snowball device to use for this cluster.

For cluster jobs, AWS Snowball currently supports only the EDGE device type.

" }, "ShippingOption":{ "shape":"ShippingOption", - "documentation":"

The shipping speed for each node in this cluster. This speed doesn't dictate how soon you'll get each Snowball Edge device, rather it represents how quickly each device moves to its destination while in transit. Regional shipping speeds are as follows:

  • In Australia, you have access to express shipping. Typically, devices shipped express are delivered in about a day.

  • In the European Union (EU), you have access to express shipping. Typically, Snowball Edges shipped express are delivered in about a day. In addition, most countries in the EU have access to standard shipping, which typically takes less than a week, one way.

  • In India, Snowball Edges are delivered in one to seven days.

  • In the US, you have access to one-day shipping and two-day shipping.

" + "documentation":"

The shipping speed for each node in this cluster. This speed doesn't dictate how soon you'll get each Snowball Edge device, rather it represents how quickly each device moves to its destination while in transit. Regional shipping speeds are as follows:

  • In Australia, you have access to express shipping. Typically, Snowballs shipped express are delivered in about a day.

  • In the European Union (EU), you have access to express shipping. Typically, Snowballs shipped express are delivered in about a day. In addition, most countries in the EU have access to standard shipping, which typically takes less than a week, one way.

  • In India, Snowballs are delivered in one to seven days.

  • In the United States of America (US), you have access to one-day shipping and two-day shipping.

  • In Australia, you have access to express shipping. Typically, devices shipped express are delivered in about a day.

  • In the European Union (EU), you have access to express shipping. Typically, Snowball Edges shipped express are delivered in about a day. In addition, most countries in the EU have access to standard shipping, which typically takes less than a week, one way.

  • In India, Snowball Edges are delivered in one to seven days.

  • In the US, you have access to one-day shipping and two-day shipping.

" }, "Notification":{ "shape":"Notification", @@ -654,7 +654,7 @@ }, "SnowballType":{ "shape":"SnowballType", - "documentation":"

The type of AWS Snowball device to use for this job. Currently, the only supported device type for cluster jobs is EDGE.

For more information, see Snowball Edge Device Options in the Snowball Edge Developer Guide.

" + "documentation":"

The type of AWS Snowball device to use for this job.

For cluster jobs, AWS Snowball currently supports only the EDGE device type.

The type of AWS Snowball device to use for this job. Currently, the only supported device type for cluster jobs is EDGE.

For more information, see Snowball Edge Device Options in the Snowball Edge Developer Guide.

" }, "ForwardingAddressId":{ "shape":"AddressId", @@ -663,6 +663,10 @@ "TaxDocuments":{ "shape":"TaxDocuments", "documentation":"

The tax documents required in your AWS Region.

" + }, + "DeviceConfiguration":{ + "shape":"DeviceConfiguration", + "documentation":"

Defines the device configuration for an AWS Snowcone job.

" } } }, @@ -784,6 +788,16 @@ } } }, + "DeviceConfiguration":{ + "type":"structure", + "members":{ + "SnowconeDeviceConfiguration":{ + "shape":"SnowconeDeviceConfiguration", + "documentation":"

Returns information about the device configuration for an AWS Snowcone job.

" + } + }, + "documentation":"

The container for SnowconeDeviceConfiguration.

" + }, "Ec2AmiResource":{ "type":"structure", "required":["AmiId"], @@ -928,7 +942,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

Job or cluster creation failed. One ore more inputs were invalid. Confirm that the CreateClusterRequest$SnowballType value supports your CreateJobRequest$JobType, and try again.

", + "documentation":"

Job or cluster creation failed. One or more inputs were invalid. Confirm that the CreateClusterRequest$SnowballType value supports your CreateJobRequest$JobType, and try again.

", "exception":true }, "InvalidJobStateException":{ @@ -1095,7 +1109,8 @@ "TaxDocuments":{ "shape":"TaxDocuments", "documentation":"

The metadata associated with the tax documents required in your AWS Region.

" - } + }, + "DeviceConfiguration":{"shape":"DeviceConfiguration"} }, "documentation":"

Contains information about a specific job including shipping information, job status, and other important metadata. This information is returned as a part of the response syntax of the DescribeJob action.

" }, @@ -1405,6 +1420,7 @@ "T100", "T42", "T98", + "T8", "NoPreference" ] }, @@ -1415,9 +1431,20 @@ "EDGE", "EDGE_C", "EDGE_CG", - "EDGE_S" + "EDGE_S", + "SNC1_HDD" ] }, + "SnowconeDeviceConfiguration":{ + "type":"structure", + "members":{ + "WirelessConnection":{ + "shape":"WirelessConnection", + "documentation":"

Configures the wireless connection for the AWS Snowcone device.

" + } + }, + "documentation":"

Specifies the device configuration for an AWS Snowcone job.

" + }, "SnsTopicARN":{ "type":"string", "max":255, @@ -1430,10 +1457,7 @@ "TaxDocuments":{ "type":"structure", "members":{ - "IND":{ - "shape":"INDTaxDocuments", - "documentation":"

The tax documents required in AWS Regions in India.

" - } + "IND":{"shape":"INDTaxDocuments"} }, "documentation":"

The tax documents required in your AWS Region.

" }, @@ -1535,6 +1559,16 @@ "type":"structure", "members":{ } + }, + "WirelessConnection":{ + "type":"structure", + "members":{ + "IsWifiEnabled":{ + "shape":"Boolean", + "documentation":"

Enables the Wi-Fi adapter on an AWS Snowcone device.

" + } + }, + "documentation":"

Configures the wireless connection on an AWS Snowcone device.

" } }, "documentation":"

AWS Snowball is a petabyte-scale data transport solution that uses secure devices to transfer large amounts of data between your on-premises data centers and Amazon Simple Storage Service (Amazon S3). The Snowball commands described here provide access to the same functionality that is available in the AWS Snowball Management Console, which enables you to create and manage jobs for Snowball. To transfer data locally with a Snowball device, you'll need to use the Snowball client or the Amazon S3 API adapter for Snowball. For more information, see the User Guide.

" diff --git a/botocore/data/ssm/2014-11-06/service-2.json b/botocore/data/ssm/2014-11-06/service-2.json index cc24bbf2..e6779a81 100644 --- a/botocore/data/ssm/2014-11-06/service-2.json +++ b/botocore/data/ssm/2014-11-06/service-2.json @@ -2066,6 +2066,7 @@ "documentation":"

Error returned if an attempt is made to register a patch group with a patch baseline that is already registered with a different patch baseline.

", "exception":true }, + "ApplyOnlyAtCronInterval":{"type":"boolean"}, "ApproveAfterDays":{ "type":"integer", "max":100, @@ -2227,6 +2228,10 @@ "SyncCompliance":{ "shape":"AssociationSyncCompliance", "documentation":"

The mode for generating association compliance. You can specify AUTO or MANUAL. In AUTO mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT. If the association execution doesn't run successfully, the association is NON-COMPLIANT.

In MANUAL mode, you must specify the AssociationId as a parameter for the PutComplianceItems API action. In this case, compliance data is not managed by State Manager. It is managed by your direct call to the PutComplianceItems API action.

By default, all associations use AUTO mode.

" + }, + "ApplyOnlyAtCronInterval":{ + "shape":"ApplyOnlyAtCronInterval", + "documentation":"

By default, when you create a new associations, the system runs it immediately after it is created and then according to the schedule you specified. Specify this option if you don't want an association to run immediately after you create it.

" } }, "documentation":"

Describes the parameters for a document.

" @@ -2628,6 +2633,10 @@ "SyncCompliance":{ "shape":"AssociationSyncCompliance", "documentation":"

The mode for generating association compliance. You can specify AUTO or MANUAL. In AUTO mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT. If the association execution doesn't run successfully, the association is NON-COMPLIANT.

In MANUAL mode, you must specify the AssociationId as a parameter for the PutComplianceItems API action. In this case, compliance data is not managed by State Manager. It is managed by your direct call to the PutComplianceItems API action.

By default, all associations use AUTO mode.

" + }, + "ApplyOnlyAtCronInterval":{ + "shape":"ApplyOnlyAtCronInterval", + "documentation":"

By default, when you create a new associations, the system runs it immediately after it is created and then according to the schedule you specified. Specify this option if you don't want an association to run immediately after you create it.

" } }, "documentation":"

Information about the association version.

" @@ -3929,6 +3938,10 @@ "SyncCompliance":{ "shape":"AssociationSyncCompliance", "documentation":"

The mode for generating association compliance. You can specify AUTO or MANUAL. In AUTO mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT. If the association execution doesn't run successfully, the association is NON-COMPLIANT.

In MANUAL mode, you must specify the AssociationId as a parameter for the PutComplianceItems API action. In this case, compliance data is not managed by State Manager. It is managed by your direct call to the PutComplianceItems API action.

By default, all associations use AUTO mode.

" + }, + "ApplyOnlyAtCronInterval":{ + "shape":"ApplyOnlyAtCronInterval", + "documentation":"

By default, when you create a new associations, the system runs it immediately after it is created and then according to the schedule you specified. Specify this option if you don't want an association to run immediately after you create it.

" } }, "documentation":"

Describes the association of a Systems Manager SSM document and an instance.

" @@ -4001,6 +4014,10 @@ "SyncCompliance":{ "shape":"AssociationSyncCompliance", "documentation":"

The mode for generating association compliance. You can specify AUTO or MANUAL. In AUTO mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT. If the association execution doesn't run successfully, the association is NON-COMPLIANT.

In MANUAL mode, you must specify the AssociationId as a parameter for the PutComplianceItems API action. In this case, compliance data is not managed by State Manager. It is managed by your direct call to the PutComplianceItems API action.

By default, all associations use AUTO mode.

" + }, + "ApplyOnlyAtCronInterval":{ + "shape":"ApplyOnlyAtCronInterval", + "documentation":"

By default, when you create a new associations, the system runs it immediately after it is created and then according to the schedule you specified. Specify this option if you don't want an association to run immediately after you create it.

" } } }, @@ -13476,6 +13493,10 @@ "SyncCompliance":{ "shape":"AssociationSyncCompliance", "documentation":"

The mode for generating association compliance. You can specify AUTO or MANUAL. In AUTO mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT. If the association execution doesn't run successfully, the association is NON-COMPLIANT.

In MANUAL mode, you must specify the AssociationId as a parameter for the PutComplianceItems API action. In this case, compliance data is not managed by State Manager. It is managed by your direct call to the PutComplianceItems API action.

By default, all associations use AUTO mode.

" + }, + "ApplyOnlyAtCronInterval":{ + "shape":"ApplyOnlyAtCronInterval", + "documentation":"

By default, when you update an association, the system runs it immediately after it is updated and then according to the schedule you specified. Specify this option if you don't want an association to run immediately after you update it.

Also, if you specified this option when you created the association, you can reset it. To do so, specify the no-apply-only-at-cron-interval parameter when you update the association from the command line. This parameter forces the association to run immediately after updating it and according to the interval specified.

" } } }, diff --git a/botocore/data/storagegateway/2013-06-30/service-2.json b/botocore/data/storagegateway/2013-06-30/service-2.json index c63192e5..47386377 100644 --- a/botocore/data/storagegateway/2013-06-30/service-2.json +++ b/botocore/data/storagegateway/2013-06-30/service-2.json @@ -24,7 +24,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Activates the gateway you previously deployed on your host. In the activation process, you specify information such as the AWS Region that you want to use for storing snapshots or tapes, the time zone for scheduled snapshots the gateway snapshot schedule window, an activation key, and a name for your gateway. The activation process also associates your gateway with your account; for more information, see UpdateGatewayInformation.

You must turn on the gateway VM before you can activate your gateway.

" + "documentation":"

Activates the gateway you previously deployed on your host. In the activation process, you specify information such as the AWS Region that you want to use for storing snapshots or tapes, the time zone for scheduled snapshots the gateway snapshot schedule window, an activation key, and a name for your gateway. The activation process also associates your gateway with your account. For more information, see UpdateGatewayInformation.

You must turn on the gateway VM before you can activate your gateway.

" }, "AddCache":{ "name":"AddCache", @@ -38,7 +38,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Configures one or more gateway local disks as cache for a gateway. This operation is only supported in the cached volume, tape and file gateway type (see Storage Gateway Concepts).

In the request, you specify the gateway Amazon Resource Name (ARN) to which you want to add cache, and one or more disk IDs that you want to configure as cache.

" + "documentation":"

Configures one or more gateway local disks as cache for a gateway. This operation is only supported in the cached volume, tape, and file gateway type (see How AWS Storage Gateway works (architecture).

In the request, you specify the gateway Amazon Resource Name (ARN) to which you want to add cache, and one or more disk IDs that you want to configure as cache.

" }, "AddTagsToResource":{ "name":"AddTagsToResource", @@ -94,7 +94,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Assigns a tape to a tape pool for archiving. The tape assigned to a pool is archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the S3 storage class (S3 Glacier or S3 Glacier Deep Archive) that corresponds to the pool.

Valid values: \"GLACIER\", \"DEEP_ARCHIVE\"

" + "documentation":"

Assigns a tape to a tape pool for archiving. The tape assigned to a pool is archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the S3 storage class (S3 Glacier or S3 Glacier Deep Archive) that corresponds to the pool.

Valid Values: GLACIER | DEEP_ARCHIVE

" }, "AttachVolume":{ "name":"AttachVolume", @@ -150,7 +150,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Creates a cached volume on a specified cached volume gateway. This operation is only supported in the cached volume gateway type.

Cache storage must be allocated to the gateway before you can create a cached volume. Use the AddCache operation to add cache storage to a gateway.

In the request, you must specify the gateway, size of the volume in bytes, the iSCSI target name, an IP address on which to expose the target, and a unique client token. In response, the gateway creates the volume and returns information about it. This information includes the volume Amazon Resource Name (ARN), its size, and the iSCSI target ARN that initiators can use to connect to the volume target.

Optionally, you can provide the ARN for an existing volume as the SourceVolumeARN for this cached volume, which creates an exact copy of the existing volume’s latest recovery point. The VolumeSizeInBytes value must be equal to or larger than the size of the copied volume, in bytes.

" + "documentation":"

Creates a cached volume on a specified cached volume gateway. This operation is only supported in the cached volume gateway type.

Cache storage must be allocated to the gateway before you can create a cached volume. Use the AddCache operation to add cache storage to a gateway.

In the request, you must specify the gateway, size of the volume in bytes, the iSCSI target name, an IP address on which to expose the target, and a unique client token. In response, the gateway creates the volume and returns information about it. This information includes the volume Amazon Resource Name (ARN), its size, and the iSCSI target ARN that initiators can use to connect to the volume target.

Optionally, you can provide the ARN for an existing volume as the SourceVolumeARN for this cached volume, which creates an exact copy of the existing volume’s latest recovery point. The VolumeSizeInBytes value must be equal to or larger than the size of the copied volume, in bytes.

" }, "CreateNFSFileShare":{ "name":"CreateNFSFileShare", @@ -164,7 +164,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Creates a Network File System (NFS) file share on an existing file gateway. In Storage Gateway, a file share is a file system mount point backed by Amazon S3 cloud storage. Storage Gateway exposes file shares using an NFS interface. This operation is only supported for file gateways.

File gateway requires AWS Security Token Service (AWS STS) to be activated to enable you to create a file share. Make sure AWS STS is activated in the AWS Region you are creating your file gateway in. If AWS STS is not activated in the AWS Region, activate it. For information about how to activate AWS STS, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

File gateway does not support creating hard or symbolic links on a file share.

" + "documentation":"

Creates a Network File System (NFS) file share on an existing file gateway. In Storage Gateway, a file share is a file system mount point backed by Amazon S3 cloud storage. Storage Gateway exposes file shares using an NFS interface. This operation is only supported for file gateways.

File gateway requires AWS Security Token Service (AWS STS) to be activated to enable you to create a file share. Make sure AWS STS is activated in the AWS Region you are creating your file gateway in. If AWS STS is not activated in the AWS Region, activate it. For information about how to activate AWS STS, see Activating and deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

File gateway does not support creating hard or symbolic links on a file share.

" }, "CreateSMBFileShare":{ "name":"CreateSMBFileShare", @@ -178,7 +178,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Creates a Server Message Block (SMB) file share on an existing file gateway. In Storage Gateway, a file share is a file system mount point backed by Amazon S3 cloud storage. Storage Gateway expose file shares using an SMB interface. This operation is only supported for file gateways.

File gateways require AWS Security Token Service (AWS STS) to be activated to enable you to create a file share. Make sure that AWS STS is activated in the AWS Region you are creating your file gateway in. If AWS STS is not activated in this AWS Region, activate it. For information about how to activate AWS STS, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

File gateways don't support creating hard or symbolic links on a file share.

" + "documentation":"

Creates a Server Message Block (SMB) file share on an existing file gateway. In Storage Gateway, a file share is a file system mount point backed by Amazon S3 cloud storage. Storage Gateway expose file shares using an SMB interface. This operation is only supported for file gateways.

File gateways require AWS Security Token Service (AWS STS) to be activated to enable you to create a file share. Make sure that AWS STS is activated in the AWS Region you are creating your file gateway in. If AWS STS is not activated in this AWS Region, activate it. For information about how to activate AWS STS, see Activating and deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

File gateways don't support creating hard or symbolic links on a file share.

" }, "CreateSnapshot":{ "name":"CreateSnapshot", @@ -193,7 +193,7 @@ {"shape":"InternalServerError"}, {"shape":"ServiceUnavailableError"} ], - "documentation":"

Initiates a snapshot of a volume.

AWS Storage Gateway provides the ability to back up point-in-time snapshots of your data to Amazon Simple Storage Service (Amazon S3) for durable off-site recovery, as well as import the data to an Amazon Elastic Block Store (EBS) volume in Amazon Elastic Compute Cloud (EC2). You can take snapshots of your gateway volume on a scheduled or ad hoc basis. This API enables you to take an ad hoc snapshot. For more information, see Editing a Snapshot Schedule.

In the CreateSnapshot request you identify the volume by providing its Amazon Resource Name (ARN). You must also provide description for the snapshot. When AWS Storage Gateway takes the snapshot of specified volume, the snapshot and description appears in the AWS Storage Gateway Console. In response, AWS Storage Gateway returns you a snapshot ID. You can use this snapshot ID to check the snapshot progress or later use it when you want to create a volume from a snapshot. This operation is only supported in stored and cached volume gateway type.

To list or delete a snapshot, you must use the Amazon EC2 API. For more information, see DescribeSnapshots or DeleteSnapshot in the EC2 API reference.

Volume and snapshot IDs are changing to a longer length ID format. For more information, see the important note on the Welcome page.

" + "documentation":"

Initiates a snapshot of a volume.

AWS Storage Gateway provides the ability to back up point-in-time snapshots of your data to Amazon Simple Storage (Amazon S3) for durable off-site recovery, as well as import the data to an Amazon Elastic Block Store (EBS) volume in Amazon Elastic Compute Cloud (EC2). You can take snapshots of your gateway volume on a scheduled or ad hoc basis. This API enables you to take ad-hoc snapshot. For more information, see Editing a snapshot schedule.

In the CreateSnapshot request you identify the volume by providing its Amazon Resource Name (ARN). You must also provide description for the snapshot. When AWS Storage Gateway takes the snapshot of specified volume, the snapshot and description appears in the AWS Storage Gateway Console. In response, AWS Storage Gateway returns you a snapshot ID. You can use this snapshot ID to check the snapshot progress or later use it when you want to create a volume from a snapshot. This operation is only supported in stored and cached volume gateway type.

To list or delete a snapshot, you must use the Amazon EC2 API. For more information, see DescribeSnapshots or DeleteSnapshot in the Amazon Elastic Compute Cloud API Reference.

Volume and snapshot IDs are changing to a longer length ID format. For more information, see the important note on the Welcome page.

" }, "CreateSnapshotFromVolumeRecoveryPoint":{ "name":"CreateSnapshotFromVolumeRecoveryPoint", @@ -208,7 +208,7 @@ {"shape":"InternalServerError"}, {"shape":"ServiceUnavailableError"} ], - "documentation":"

Initiates a snapshot of a gateway from a volume recovery point. This operation is only supported in the cached volume gateway type.

A volume recovery point is a point in time at which all data of the volume is consistent and from which you can create a snapshot. To get a list of volume recovery point for cached volume gateway, use ListVolumeRecoveryPoints.

In the CreateSnapshotFromVolumeRecoveryPoint request, you identify the volume by providing its Amazon Resource Name (ARN). You must also provide a description for the snapshot. When the gateway takes a snapshot of the specified volume, the snapshot and its description appear in the AWS Storage Gateway console. In response, the gateway returns you a snapshot ID. You can use this snapshot ID to check the snapshot progress or later use it when you want to create a volume from a snapshot.

To list or delete a snapshot, you must use the Amazon EC2 API. For more information, in Amazon Elastic Compute Cloud API Reference.

" + "documentation":"

Initiates a snapshot of a gateway from a volume recovery point. This operation is only supported in the cached volume gateway type.

A volume recovery point is a point in time at which all data of the volume is consistent and from which you can create a snapshot. To get a list of volume recovery point for cached volume gateway, use ListVolumeRecoveryPoints.

In the CreateSnapshotFromVolumeRecoveryPoint request, you identify the volume by providing its Amazon Resource Name (ARN). You must also provide a description for the snapshot. When the gateway takes a snapshot of the specified volume, the snapshot and its description appear in the AWS Storage Gateway console. In response, the gateway returns you a snapshot ID. You can use this snapshot ID to check the snapshot progress or later use it when you want to create a volume from a snapshot.

To list or delete a snapshot, you must use the Amazon EC2 API. For more information, see DescribeSnapshots or DeleteSnapshot in the Amazon Elastic Compute Cloud API Reference.

" }, "CreateStorediSCSIVolume":{ "name":"CreateStorediSCSIVolume", @@ -236,7 +236,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Creates a virtual tape by using your own barcode. You write data to the virtual tape and then archive the tape. A barcode is unique and cannot be reused if it has already been used on a tape. This applies to barcodes used on deleted tapes. This operation is only supported in the tape gateway type.

Cache storage must be allocated to the gateway before you can create a virtual tape. Use the AddCache operation to add cache storage to a gateway.

" + "documentation":"

Creates a virtual tape by using your own barcode. You write data to the virtual tape and then archive the tape. A barcode is unique and can not be reused if it has already been used on a tape. This applies to barcodes used on deleted tapes. This operation is only supported in the tape gateway type.

Cache storage must be allocated to the gateway before you can create a virtual tape. Use the AddCache operation to add cache storage to a gateway.

" }, "CreateTapes":{ "name":"CreateTapes", @@ -250,7 +250,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Creates one or more virtual tapes. You write data to the virtual tapes and then archive the tapes. This operation is only supported in the tape gateway type.

Cache storage must be allocated to the gateway before you can create virtual tapes. Use the AddCache operation to add cache storage to a gateway.

" + "documentation":"

Creates one or more virtual tapes. You write data to the virtual tapes and then archive the tapes. This operation is only supported in the tape gateway type.

Cache storage must be allocated to the gateway before you can create virtual tapes. Use the AddCache operation to add cache storage to a gateway.

" }, "DeleteAutomaticTapeCreationPolicy":{ "name":"DeleteAutomaticTapeCreationPolicy", @@ -264,7 +264,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Deletes the automatic tape creation policy of a gateway. If you delete this policy, new virtual tapes must be created manually. Use the Amazon Resource Name (ARN) of the gateway in your request to remove the policy.

" + "documentation":"

Deletes the automatic tape creation policy of a gateway. If you delete this policy, new virtual tapes must be created manually. Use the Amazon Resource Name (ARN) of the gateway in your request to remove the policy.

" }, "DeleteBandwidthRateLimit":{ "name":"DeleteBandwidthRateLimit", @@ -320,7 +320,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Deletes a gateway. To specify which gateway to delete, use the Amazon Resource Name (ARN) of the gateway in your request. The operation deletes the gateway; however, it does not delete the gateway virtual machine (VM) from your host computer.

After you delete a gateway, you cannot reactivate it. Completed snapshots of the gateway volumes are not deleted upon deleting the gateway, however, pending snapshots will not complete. After you delete a gateway, your next step is to remove it from your environment.

You no longer pay software charges after the gateway is deleted; however, your existing Amazon EBS snapshots persist and you will continue to be billed for these snapshots. You can choose to remove all remaining Amazon EBS snapshots by canceling your Amazon EC2 subscription.  If you prefer not to cancel your Amazon EC2 subscription, you can delete your snapshots using the Amazon EC2 console. For more information, see the AWS Storage Gateway Detail Page.

" + "documentation":"

Deletes a gateway. To specify which gateway to delete, use the Amazon Resource Name (ARN) of the gateway in your request. The operation deletes the gateway; however, it does not delete the gateway virtual machine (VM) from your host computer.

After you delete a gateway, you cannot reactivate it. Completed snapshots of the gateway volumes are not deleted upon deleting the gateway, however, pending snapshots will not complete. After you delete a gateway, your next step is to remove it from your environment.

You no longer pay software charges after the gateway is deleted; however, your existing Amazon EBS snapshots persist and you will continue to be billed for these snapshots. You can choose to remove all remaining Amazon EBS snapshots by canceling your Amazon EC2 subscription.  If you prefer not to cancel your Amazon EC2 subscription, you can delete your snapshots using the Amazon EC2 console. For more information, see the AWS Storage Gateway detail page.

" }, "DeleteSnapshotSchedule":{ "name":"DeleteSnapshotSchedule", @@ -334,7 +334,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Deletes a snapshot of a volume.

You can take snapshots of your gateway volumes on a scheduled or ad hoc basis. This API action enables you to delete a snapshot schedule for a volume. For more information, see Working with Snapshots. In the DeleteSnapshotSchedule request, you identify the volume by providing its Amazon Resource Name (ARN). This operation is only supported in stored and cached volume gateway types.

To list or delete a snapshot, you must use the Amazon EC2 API. For more information, go to DescribeSnapshots in the Amazon Elastic Compute Cloud API Reference.

" + "documentation":"

Deletes a snapshot of a volume.

You can take snapshots of your gateway volumes on a scheduled or ad hoc basis. This API action enables you to delete a snapshot schedule for a volume. For more information, see Backing up your volumes. In the DeleteSnapshotSchedule request, you identify the volume by providing its Amazon Resource Name (ARN). This operation is only supported in stored and cached volume gateway types.

To list or delete a snapshot, you must use the Amazon EC2 API. For more information, go to DescribeSnapshots in the Amazon Elastic Compute Cloud API Reference.

" }, "DeleteTape":{ "name":"DeleteTape", @@ -376,7 +376,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Deletes the specified storage volume that you previously created using the CreateCachediSCSIVolume or CreateStorediSCSIVolume API. This operation is only supported in the cached volume and stored volume types. For stored volume gateways, the local disk that was configured as the storage volume is not deleted. You can reuse the local disk to create another storage volume.

Before you delete a volume, make sure there are no iSCSI connections to the volume you are deleting. You should also make sure there is no snapshot in progress. You can use the Amazon Elastic Compute Cloud (Amazon EC2) API to query snapshots on the volume you are deleting and check the snapshot status. For more information, go to DescribeSnapshots in the Amazon Elastic Compute Cloud API Reference.

In the request, you must provide the Amazon Resource Name (ARN) of the storage volume you want to delete.

" + "documentation":"

Deletes the specified storage volume that you previously created using the CreateCachediSCSIVolume or CreateStorediSCSIVolume API. This operation is only supported in the cached volume and stored volume types. For stored volume gateways, the local disk that was configured as the storage volume is not deleted. You can reuse the local disk to create another storage volume.

Before you delete a volume, make sure there are no iSCSI connections to the volume you are deleting. You should also make sure there is no snapshot in progress. You can use the Amazon Elastic Compute Cloud (Amazon EC2) API to query snapshots on the volume you are deleting and check the snapshot status. For more information, go to DescribeSnapshots in the Amazon Elastic Compute Cloud API Reference.

In the request, you must provide the Amazon Resource Name (ARN) of the storage volume you want to delete.

" }, "DescribeAvailabilityMonitorTest":{ "name":"DescribeAvailabilityMonitorTest", @@ -404,7 +404,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Returns the bandwidth rate limits of a gateway. By default, these limits are not set, which means no bandwidth rate limiting is in effect. This operation is supported for the stored volume, cached volume and tape gateway types.'

This operation only returns a value for a bandwidth rate limit only if the limit is set. If no limits are set for the gateway, then this operation returns only the gateway ARN in the response body. To specify which gateway to describe, use the Amazon Resource Name (ARN) of the gateway in your request.

" + "documentation":"

Returns the bandwidth rate limits of a gateway. By default, these limits are not set, which means no bandwidth rate limiting is in effect. This operation is supported for the stored volume, cached volume and tape gateway types.

This operation only returns a value for a bandwidth rate limit only if the limit is set. If no limits are set for the gateway, then this operation returns only the gateway ARN in the response body. To specify which gateway to describe, use the Amazon Resource Name (ARN) of the gateway in your request.

" }, "DescribeCache":{ "name":"DescribeCache", @@ -544,7 +544,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Returns the description of the gateway volumes specified in the request. The list of gateway volumes in the request must be from one gateway. In the response AWS Storage Gateway returns volume information sorted by volume ARNs. This operation is only supported in stored volume gateway type.

" + "documentation":"

Returns the description of the gateway volumes specified in the request. The list of gateway volumes in the request must be from one gateway. In the response, AWS Storage Gateway returns volume information sorted by volume ARNs. This operation is only supported in stored volume gateway type.

" }, "DescribeTapeArchives":{ "name":"DescribeTapeArchives", @@ -600,7 +600,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Returns information about the upload buffer of a gateway. This operation is supported for the stored volume, cached volume and tape gateway types.

The response includes disk IDs that are configured as upload buffer space, and it includes the amount of upload buffer space allocated and used.

" + "documentation":"

Returns information about the upload buffer of a gateway. This operation is supported for the stored volume, cached volume, and tape gateway types.

The response includes disk IDs that are configured as upload buffer space, and it includes the amount of upload buffer space allocated and used.

" }, "DescribeVTLDevices":{ "name":"DescribeVTLDevices", @@ -684,7 +684,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Lists the automatic tape creation policies for a gateway. If there are no automatic tape creation policies for the gateway, it returns an empty list.

This operation is only supported for tape gateways.

" + "documentation":"

Lists the automatic tape creation policies for a gateway. If there are no automatic tape creation policies for the gateway, it returns an empty list.

This operation is only supported for tape gateways.

" }, "ListFileShares":{ "name":"ListFileShares", @@ -810,7 +810,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Sends you notification through CloudWatch Events when all files written to your file share have been uploaded to Amazon S3.

AWS Storage Gateway can send a notification through Amazon CloudWatch Events when all files written to your file share up to that point in time have been uploaded to Amazon S3. These files include files written to the file share up to the time that you make a request for notification. When the upload is done, Storage Gateway sends you notification through an Amazon CloudWatch Event. You can configure CloudWatch Events to send the notification through event targets such as Amazon SNS or AWS Lambda function. This operation is only supported for file gateways.

For more information, see Getting File Upload Notification in the Storage Gateway User Guide (https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-upload-notification).

" + "documentation":"

Sends you notification through CloudWatch Events when all files written to your file share have been uploaded to Amazon S3.

AWS Storage Gateway can send a notification through Amazon CloudWatch Events when all files written to your file share up to that point in time have been uploaded to Amazon S3. These files include files written to the file share up to the time that you make a request for notification. When the upload is done, Storage Gateway sends you notification through an Amazon CloudWatch Event. You can configure CloudWatch Events to send the notification through event targets such as Amazon SNS or AWS Lambda function. This operation is only supported for file gateways.

For more information, see Getting file upload notification in the AWS Storage Gateway User Guide.

" }, "RefreshCache":{ "name":"RefreshCache", @@ -824,7 +824,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Refreshes the cache for the specified file share. This operation finds objects in the Amazon S3 bucket that were added, removed or replaced since the gateway last listed the bucket's contents and cached the results. This operation is only supported in the file gateway type. You can subscribe to be notified through an Amazon CloudWatch event when your RefreshCache operation completes. For more information, see Getting Notified About File Operations.

When this API is called, it only initiates the refresh operation. When the API call completes and returns a success code, it doesn't necessarily mean that the file refresh has completed. You should use the refresh-complete notification to determine that the operation has completed before you check for new files on the gateway file share. You can subscribe to be notified through an CloudWatch event when your RefreshCache operation completes.

Throttle limit: This API is asynchronous so the gateway will accept no more than two refreshes at any time. We recommend using the refresh-complete CloudWatch event notification before issuing additional requests. For more information, see Getting Notified About File Operations.

If you invoke the RefreshCache API when two requests are already being processed, any new request will cause an InvalidGatewayRequestException error because too many requests were sent to the server.

For more information, see \"https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-notification\".

" + "documentation":"

Refreshes the cache for the specified file share. This operation finds objects in the Amazon S3 bucket that were added, removed or replaced since the gateway last listed the bucket's contents and cached the results. This operation is only supported in the file gateway type. You can subscribe to be notified through an Amazon CloudWatch event when your RefreshCache operation completes. For more information, see Getting notified about file operations in the AWS Storage Gateway User Guide.

When this API is called, it only initiates the refresh operation. When the API call completes and returns a success code, it doesn't necessarily mean that the file refresh has completed. You should use the refresh-complete notification to determine that the operation has completed before you check for new files on the gateway file share. You can subscribe to be notified through an CloudWatch event when your RefreshCache operation completes.

Throttle limit: This API is asynchronous so the gateway will accept no more than two refreshes at any time. We recommend using the refresh-complete CloudWatch event notification before issuing additional requests. For more information, see Getting notified about file operations in the AWS Storage Gateway User Guide.

If you invoke the RefreshCache API when two requests are already being processed, any new request will cause an InvalidGatewayRequestException error because too many requests were sent to the server.

For more information, see Getting notified about file operations in the AWS Storage Gateway User Guide.

" }, "RemoveTagsFromResource":{ "name":"RemoveTagsFromResource", @@ -922,7 +922,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Shuts down a gateway. To specify which gateway to shut down, use the Amazon Resource Name (ARN) of the gateway in the body of your request.

The operation shuts down the gateway service component running in the gateway's virtual machine (VM) and not the host VM.

If you want to shut down the VM, it is recommended that you first shut down the gateway component in the VM to avoid unpredictable conditions.

After the gateway is shutdown, you cannot call any other API except StartGateway, DescribeGatewayInformation and ListGateways. For more information, see ActivateGateway. Your applications cannot read from or write to the gateway's storage volumes, and there are no snapshots taken.

When you make a shutdown request, you will get a 200 OK success response immediately. However, it might take some time for the gateway to shut down. You can call the DescribeGatewayInformation API to check the status. For more information, see ActivateGateway.

If do not intend to use the gateway again, you must delete the gateway (using DeleteGateway) to no longer pay software charges associated with the gateway.

" + "documentation":"

Shuts down a gateway. To specify which gateway to shut down, use the Amazon Resource Name (ARN) of the gateway in the body of your request.

The operation shuts down the gateway service component running in the gateway's virtual machine (VM) and not the host VM.

If you want to shut down the VM, it is recommended that you first shut down the gateway component in the VM to avoid unpredictable conditions.

After the gateway is shutdown, you cannot call any other API except StartGateway, DescribeGatewayInformation, and ListGateways. For more information, see ActivateGateway. Your applications cannot read from or write to the gateway's storage volumes, and there are no snapshots taken.

When you make a shutdown request, you will get a 200 OK success response immediately. However, it might take some time for the gateway to shut down. You can call the DescribeGatewayInformation API to check the status. For more information, see ActivateGateway.

If do not intend to use the gateway again, you must delete the gateway (using DeleteGateway) to no longer pay software charges associated with the gateway.

" }, "StartAvailabilityMonitorTest":{ "name":"StartAvailabilityMonitorTest", @@ -936,7 +936,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Start a test that verifies that the specified gateway is configured for High Availability monitoring in your host environment. This request only initiates the test and that a successful response only indicates that the test was started. It doesn't indicate that the test passed. For the status of the test, invoke the DescribeAvailabilityMonitorTest API.

Starting this test will cause your gateway to go offline for a brief period.

" + "documentation":"

Start a test that verifies that the specified gateway is configured for High Availability monitoring in your host environment. This request only initiates the test and that a successful response only indicates that the test was started. It doesn't indicate that the test passed. For the status of the test, invoke the DescribeAvailabilityMonitorTest API.

Starting this test will cause your gateway to go offline for a brief period.

" }, "StartGateway":{ "name":"StartGateway", @@ -978,7 +978,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Updates the bandwidth rate limits of a gateway. You can update both the upload and download bandwidth rate limit or specify only one of the two. If you don't set a bandwidth rate limit, the existing rate limit remains. This operation is supported for the stored volume, cached volume and tape gateway types.'

By default, a gateway's bandwidth rate limits are not set. If you don't set any limit, the gateway does not have any limitations on its bandwidth usage and could potentially use the maximum available bandwidth.

To specify which gateway to update, use the Amazon Resource Name (ARN) of the gateway in your request.

" + "documentation":"

Updates the bandwidth rate limits of a gateway. You can update both the upload and download bandwidth rate limit or specify only one of the two. If you don't set a bandwidth rate limit, the existing rate limit remains. This operation is supported for the stored volume, cached volume, and tape gateway types.

By default, a gateway's bandwidth rate limits are not set. If you don't set any limit, the gateway does not have any limitations on its bandwidth usage and could potentially use the maximum available bandwidth.

To specify which gateway to update, use the Amazon Resource Name (ARN) of the gateway in your request.

" }, "UpdateChapCredentials":{ "name":"UpdateChapCredentials", @@ -1020,7 +1020,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Updates the gateway virtual machine (VM) software. The request immediately triggers the software update.

When you make this request, you get a 200 OK success response immediately. However, it might take some time for the update to complete. You can call DescribeGatewayInformation to verify the gateway is in the STATE_RUNNING state.

A software update forces a system restart of your gateway. You can minimize the chance of any disruption to your applications by increasing your iSCSI Initiators' timeouts. For more information about increasing iSCSI Initiator timeouts for Windows and Linux, see Customizing Your Windows iSCSI Settings and Customizing Your Linux iSCSI Settings, respectively.

" + "documentation":"

Updates the gateway virtual machine (VM) software. The request immediately triggers the software update.

When you make this request, you get a 200 OK success response immediately. However, it might take some time for the update to complete. You can call DescribeGatewayInformation to verify the gateway is in the STATE_RUNNING state.

A software update forces a system restart of your gateway. You can minimize the chance of any disruption to your applications by increasing your iSCSI Initiators' timeouts. For more information about increasing iSCSI Initiator timeouts for Windows and Linux, see Customizing your Windows iSCSI settings and Customizing your Linux iSCSI settings, respectively.

" }, "UpdateMaintenanceStartTime":{ "name":"UpdateMaintenanceStartTime", @@ -1062,7 +1062,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Updates a Server Message Block (SMB) file share.

To leave a file share field unchanged, set the corresponding input field to null. This operation is only supported for file gateways.

File gateways require AWS Security Token Service (AWS STS) to be activated to enable you to create a file share. Make sure that AWS STS is activated in the AWS Region you are creating your file gateway in. If AWS STS is not activated in this AWS Region, activate it. For information about how to activate AWS STS, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

File gateways don't support creating hard or symbolic links on a file share.

" + "documentation":"

Updates a Server Message Block (SMB) file share.

To leave a file share field unchanged, set the corresponding input field to null. This operation is only supported for file gateways.

File gateways require AWS Security Token Service (AWS STS) to be activated to enable you to create a file share. Make sure that AWS STS is activated in the AWS Region you are creating your file gateway in. If AWS STS is not activated in this AWS Region, activate it. For information about how to activate AWS STS, see Activating and deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

File gateways don't support creating hard or symbolic links on a file share.

" }, "UpdateSMBSecurityStrategy":{ "name":"UpdateSMBSecurityStrategy", @@ -1119,7 +1119,7 @@ "members":{ "ActivationKey":{ "shape":"ActivationKey", - "documentation":"

Your gateway activation key. You can obtain the activation key by sending an HTTP GET request with redirects enabled to the gateway IP address (port 80). The redirect URL returned in the response provides you the activation key for your gateway in the query string parameter activationKey. It may also include other activation-related parameters, however, these are merely defaults -- the arguments you pass to the ActivateGateway API call determine the actual configuration of your gateway.

For more information, see https://docs.aws.amazon.com/storagegateway/latest/userguide/get-activation-key.html in the Storage Gateway User Guide.

" + "documentation":"

Your gateway activation key. You can obtain the activation key by sending an HTTP GET request with redirects enabled to the gateway IP address (port 80). The redirect URL returned in the response provides you the activation key for your gateway in the query string parameter activationKey. It may also include other activation-related parameters, however, these are merely defaults -- the arguments you pass to the ActivateGateway API call determine the actual configuration of your gateway.

For more information, see Getting activation key in the AWS Storage Gateway User Guide.

" }, "GatewayName":{ "shape":"GatewayName", @@ -1131,26 +1131,26 @@ }, "GatewayRegion":{ "shape":"RegionId", - "documentation":"

A value that indicates the AWS Region where you want to store your data. The gateway AWS Region specified must be the same AWS Region as the AWS Region in your Host header in the request. For more information about available AWS Regions and endpoints for AWS Storage Gateway, see Regions and Endpoints in the Amazon Web Services Glossary.

Valid Values: See AWS Storage Gateway Regions and Endpoints in the AWS General Reference.

" + "documentation":"

A value that indicates the AWS Region where you want to store your data. The gateway AWS Region specified must be the same AWS Region as the AWS Region in your Host header in the request. For more information about available AWS Regions and endpoints for AWS Storage Gateway, see AWS Storage Gateway endpoints and quotas in the AWS General Reference.

Valid Values: See AWS Storage Gateway endpoints and quotas in the AWS General Reference.

" }, "GatewayType":{ "shape":"GatewayType", - "documentation":"

A value that defines the type of gateway to activate. The type specified is critical to all later functions of the gateway and cannot be changed after activation. The default value is CACHED.

Valid Values: \"STORED\", \"CACHED\", \"VTL\", \"FILE_S3\"

" + "documentation":"

A value that defines the type of gateway to activate. The type specified is critical to all later functions of the gateway and cannot be changed after activation. The default value is CACHED.

Valid Values: STORED | CACHED | VTL | FILE_S3

" }, "TapeDriveType":{ "shape":"TapeDriveType", - "documentation":"

The value that indicates the type of tape drive to use for tape gateway. This field is optional.

Valid Values: \"IBM-ULT3580-TD5\"

" + "documentation":"

The value that indicates the type of tape drive to use for tape gateway. This field is optional.

Valid Values: IBM-ULT3580-TD5

" }, "MediumChangerType":{ "shape":"MediumChangerType", - "documentation":"

The value that indicates the type of medium changer to use for tape gateway. This field is optional.

Valid Values: \"STK-L700\", \"AWS-Gateway-VTL\"

" + "documentation":"

The value that indicates the type of medium changer to use for tape gateway. This field is optional.

Valid Values: STK-L700 | AWS-Gateway-VTL

" }, "Tags":{ "shape":"Tags", "documentation":"

A list of up to 50 tags that you can assign to the gateway. Each tag is a key-value pair.

Valid characters for key and value are letters, spaces, and numbers that can be represented in UTF-8 format, and the following special characters: + - = . _ : / @. The maximum length of a tag's key is 128 characters, and the maximum length for a tag's value is 256 characters.

" } }, - "documentation":"

A JSON object containing one or more of the following fields:

" + "documentation":"

A JSON object containing one or more of the following fields:

" }, "ActivateGatewayOutput":{ "type":"structure", @@ -1279,7 +1279,7 @@ }, "PoolId":{ "shape":"PoolId", - "documentation":"

The ID of the pool that you want to add your tape to for archiving. The tape in this pool is archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (S3 Glacier or S3 Glacier Deep Archive) that corresponds to the pool.

Valid values: \"GLACIER\", \"DEEP_ARCHIVE\"

" + "documentation":"

The ID of the pool that you want to add your tape to for archiving. The tape in this pool is archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (S3 Glacier or S3 Glacier Deep Archive) that corresponds to the pool.

Valid Values: GLACIER | DEEP_ARCHIVE

" } } }, @@ -1314,7 +1314,7 @@ }, "NetworkInterfaceId":{ "shape":"NetworkInterfaceId", - "documentation":"

The network interface of the gateway on which to expose the iSCSI target. Only IPv4 addresses are accepted. Use DescribeGatewayInformation to get a list of the network interfaces available on a gateway.

Valid Values: A valid IP address.

" + "documentation":"

The network interface of the gateway on which to expose the iSCSI target. Only IPv4 addresses are accepted. Use DescribeGatewayInformation to get a list of the network interfaces available on a gateway.

Valid Values: A valid IP address.

" }, "DiskId":{ "shape":"DiskId", @@ -1343,7 +1343,7 @@ }, "Authentication":{ "type":"string", - "documentation":"

The authentication method of the file share.

Valid values are ActiveDirectory or GuestAccess. The default is ActiveDirectory.

", + "documentation":"

The authentication method of the file share. The default is ActiveDirectory.

Valid Values: ActiveDirectory | GuestAccess

", "max":15, "min":5 }, @@ -1377,7 +1377,7 @@ }, "PoolId":{ "shape":"PoolId", - "documentation":"

The ID of the pool that you want to add your tape to for archiving. The tape in this pool is archived in the Amazon S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (S3 Glacier or S3 Glacier Deep Archive) that corresponds to the pool.

Valid values: \"GLACIER\", \"DEEP_ARCHIVE\"

" + "documentation":"

The ID of the pool that you want to add your tape to for archiving. The tape in this pool is archived in the Amazon S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (S3 Glacier or S3 Glacier Deep Archive) that corresponds to the pool.

Valid Values: GLACIER | DEEP_ARCHIVE

" }, "TapeSizeInBytes":{ "shape":"TapeSize", @@ -1439,7 +1439,7 @@ }, "VolumeAttachmentStatus":{ "shape":"VolumeAttachmentStatus", - "documentation":"

A value that indicates whether a storage volume is attached to or detached from a gateway. For more information, see Moving Your Volumes to a Different Gateway.

" + "documentation":"

A value that indicates whether a storage volume is attached to or detached from a gateway. For more information, see Moving your volumes to a different gateway.

" }, "VolumeSizeInBytes":{ "shape":"long", @@ -1536,7 +1536,7 @@ "members":{ "TargetARN":{ "shape":"TargetARN", - "documentation":"

The Amazon Resource Name (ARN) of the volume.

Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens (-).

" + "documentation":"

The Amazon Resource Name (ARN) of the volume.

Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens (-).

" }, "SecretToAuthenticateInitiator":{ "shape":"ChapSecret", @@ -1585,7 +1585,7 @@ }, "SnapshotId":{ "shape":"SnapshotId", - "documentation":"

The snapshot ID (e.g. \"snap-1122aabb\") of the snapshot to restore as the new cached volume. Specify this field if you want to create the iSCSI storage volume from a snapshot otherwise do not include this field. To list snapshots for your account use DescribeSnapshots in the Amazon Elastic Compute Cloud API Reference.

" + "documentation":"

The snapshot ID (e.g. \"snap-1122aabb\") of the snapshot to restore as the new cached volume. Specify this field if you want to create the iSCSI storage volume from a snapshot; otherwise, do not include this field. To list snapshots for your account use DescribeSnapshots in the Amazon Elastic Compute Cloud API Reference.

" }, "TargetName":{ "shape":"TargetName", @@ -1597,7 +1597,7 @@ }, "NetworkInterfaceId":{ "shape":"NetworkInterfaceId", - "documentation":"

The network interface of the gateway on which to expose the iSCSI target. Only IPv4 addresses are accepted. Use DescribeGatewayInformation to get a list of the network interfaces available on a gateway.

Valid Values: A valid IP address.

" + "documentation":"

The network interface of the gateway on which to expose the iSCSI target. Only IPv4 addresses are accepted. Use DescribeGatewayInformation to get a list of the network interfaces available on a gateway.

Valid Values: A valid IP address.

" }, "ClientToken":{ "shape":"ClientToken", @@ -1605,11 +1605,11 @@ }, "KMSEncrypted":{ "shape":"Boolean", - "documentation":"

True to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

" + "documentation":"

Set to true to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

Valid Values: true | false

" }, "KMSKey":{ "shape":"KMSKey", - "documentation":"

The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server-side encryption. This value can only be set when KMSEncrypted is true. Optional.

" + "documentation":"

The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric CMKs. This value can only be set when KMSEncrypted is true. Optional.

" }, "Tags":{ "shape":"Tags", @@ -1653,47 +1653,47 @@ }, "KMSEncrypted":{ "shape":"Boolean", - "documentation":"

True to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

" + "documentation":"

Set to true to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

Valid Values: true | false

" }, "KMSKey":{ "shape":"KMSKey", - "documentation":"

The Amazon Resource Name (ARN) AWS KMS key used for Amazon S3 server-side encryption. This value can only be set when KMSEncrypted is true. Optional.

" + "documentation":"

The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric CMKs. This value can only be set when KMSEncrypted is true. Optional.

" }, "Role":{ "shape":"Role", - "documentation":"

The ARN of the AWS Identity and Access Management (IAM) role that a file gateway assumes when it accesses the underlying storage.

" + "documentation":"

The ARN of the AWS Identity and Access Management (IAM) role that a file gateway assumes when it accesses the underlying storage.

" }, "LocationARN":{ "shape":"LocationARN", - "documentation":"

The ARN of the backed storage used for storing file data.

" + "documentation":"

The ARN of the backed storage used for storing file data.

" }, "DefaultStorageClass":{ "shape":"StorageClass", - "documentation":"

The default storage class for objects put into an Amazon S3 bucket by the file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA, or S3_ONEZONE_IA. If this field is not populated, the default value S3_STANDARD is used. Optional.

" + "documentation":"

The default storage class for objects put into an Amazon S3 bucket by the file gateway. The default value is S3_INTELLIGENT_TIERING. Optional.

Valid Values: S3_STANDARD | S3_INTELLIGENT_TIERING | S3_STANDARD_IA | S3_ONEZONE_IA

" }, "ObjectACL":{ "shape":"ObjectACL", - "documentation":"

A value that sets the access control list permission for objects in the S3 bucket that a file gateway puts objects into. The default value is \"private\".

" + "documentation":"

A value that sets the access control list (ACL) permission for objects in the S3 bucket that a file gateway puts objects into. The default value is private.

" }, "ClientList":{ "shape":"FileShareClientList", - "documentation":"

The list of clients that are allowed to access the file gateway. The list must contain either valid IP addresses or valid CIDR blocks.

" + "documentation":"

The list of clients that are allowed to access the file gateway. The list must contain either valid IP addresses or valid CIDR blocks.

" }, "Squash":{ "shape":"Squash", - "documentation":"

A value that maps a user to anonymous user. Valid options are the following:

  • RootSquash - Only root is mapped to anonymous user.

  • NoSquash - No one is mapped to anonymous user

  • AllSquash - Everyone is mapped to anonymous user.

" + "documentation":"

A value that maps a user to anonymous user.

Valid values are the following:

  • RootSquash: Only root is mapped to anonymous user.

  • NoSquash: No one is mapped to anonymous user.

  • AllSquash: Everyone is mapped to anonymous user.

" }, "ReadOnly":{ "shape":"Boolean", - "documentation":"

A value that sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.

" + "documentation":"

A value that sets the write status of a file share. Set this value to true to set the write status to read-only, otherwise set to false.

Valid Values: true | false

" }, "GuessMIMETypeEnabled":{ "shape":"Boolean", - "documentation":"

A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.

" + "documentation":"

A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, otherwise set to false. The default value is true.

Valid Values: true | false

" }, "RequesterPays":{ "shape":"Boolean", - "documentation":"

A value that sets who pays the cost of the request and the cost associated with data download from the S3 bucket. If this value is set to true, the requester pays the costs. Otherwise the S3 bucket owner pays. However, the S3 bucket owner always pays the cost of storing data.

RequesterPays is a configuration for the S3 bucket that backs the file share, so make sure that the configuration on the file share is the same as the S3 bucket configuration.

" + "documentation":"

A value that sets who pays the cost of the request and the cost associated with data download from the S3 bucket. If this value is set to true, the requester pays the costs; otherwise, the S3 bucket owner pays. However, the S3 bucket owner always pays the cost of storing data.

RequesterPays is a configuration for the S3 bucket that backs the file share, so make sure that the configuration on the file share is the same as the S3 bucket configuration.

Valid Values: true | false

" }, "Tags":{ "shape":"Tags", @@ -1707,7 +1707,7 @@ "members":{ "FileShareARN":{ "shape":"FileShareARN", - "documentation":"

The Amazon Resource Name (ARN) of the newly created file share.

" + "documentation":"

The Amazon Resource Name (ARN) of the newly created file share.

" } }, "documentation":"

CreateNFSFileShareOutput

" @@ -1731,51 +1731,51 @@ }, "KMSEncrypted":{ "shape":"Boolean", - "documentation":"

True to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

" + "documentation":"

Set to true to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

Valid Values: true | false

" }, "KMSKey":{ "shape":"KMSKey", - "documentation":"

The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server-side encryption. This value can only be set when KMSEncrypted is true. Optional.

" + "documentation":"

The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric CMKs. This value can only be set when KMSEncrypted is true. Optional.

" }, "Role":{ "shape":"Role", - "documentation":"

The ARN of the AWS Identity and Access Management (IAM) role that a file gateway assumes when it accesses the underlying storage.

" + "documentation":"

The ARN of the AWS Identity and Access Management (IAM) role that a file gateway assumes when it accesses the underlying storage.

" }, "LocationARN":{ "shape":"LocationARN", - "documentation":"

The ARN of the backed storage used for storing file data.

" + "documentation":"

The ARN of the backed storage used for storing file data.

" }, "DefaultStorageClass":{ "shape":"StorageClass", - "documentation":"

The default storage class for objects put into an Amazon S3 bucket by the file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA, or S3_ONEZONE_IA. If this field is not populated, the default value S3_STANDARD is used. Optional.

" + "documentation":"

The default storage class for objects put into an Amazon S3 bucket by the file gateway. The default value is S3_INTELLIGENT_TIERING. Optional.

Valid Values: S3_STANDARD | S3_INTELLIGENT_TIERING | S3_STANDARD_IA | S3_ONEZONE_IA

" }, "ObjectACL":{ "shape":"ObjectACL", - "documentation":"

A value that sets the access control list permission for objects in the S3 bucket that a file gateway puts objects into. The default value is \"private\".

" + "documentation":"

A value that sets the access control list (ACL) permission for objects in the S3 bucket that a file gateway puts objects into. The default value is private.

" }, "ReadOnly":{ "shape":"Boolean", - "documentation":"

A value that sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.

" + "documentation":"

A value that sets the write status of a file share. Set this value to true to set the write status to read-only, otherwise set to false.

Valid Values: true | false

" }, "GuessMIMETypeEnabled":{ "shape":"Boolean", - "documentation":"

A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.

" + "documentation":"

A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, otherwise set to false. The default value is true.

Valid Values: true | false

" }, "RequesterPays":{ "shape":"Boolean", - "documentation":"

A value that sets who pays the cost of the request and the cost associated with data download from the S3 bucket. If this value is set to true, the requester pays the costs. Otherwise the S3 bucket owner pays. However, the S3 bucket owner always pays the cost of storing data.

RequesterPays is a configuration for the S3 bucket that backs the file share, so make sure that the configuration on the file share is the same as the S3 bucket configuration.

" + "documentation":"

A value that sets who pays the cost of the request and the cost associated with data download from the S3 bucket. If this value is set to true, the requester pays the costs; otherwise, the S3 bucket owner pays. However, the S3 bucket owner always pays the cost of storing data.

RequesterPays is a configuration for the S3 bucket that backs the file share, so make sure that the configuration on the file share is the same as the S3 bucket configuration.

Valid Values: true | false

" }, "SMBACLEnabled":{ "shape":"Boolean", - "documentation":"

Set this value to \"true to enable ACL (access control list) on the SMB file share. Set it to \"false\" to map file and directory permissions to the POSIX permissions.

For more information, see https://docs.aws.amazon.com/storagegateway/latest/userguide/smb-acl.html in the Storage Gateway User Guide.

" + "documentation":"

Set this value to true to enable access control list (ACL) on the SMB file share. Set it to false to map file and directory permissions to the POSIX permissions.

For more information, see Using Microsoft Windows ACLs to control access to an SMB file share in the AWS Storage Gateway User Guide.

Valid Values: true | false

" }, "AdminUserList":{ "shape":"FileShareUserList", - "documentation":"

A list of users in the Active Directory that will be granted administrator privileges on the file share. These users can do all file operations as the super-user.

Use this option very carefully, because any user in this list can do anything they like on the file share, regardless of file permissions.

" + "documentation":"

A list of users in the Active Directory that will be granted administrator privileges on the file share. These users can do all file operations as the super-user.

Use this option very carefully, because any user in this list can do anything they like on the file share, regardless of file permissions.

" }, "ValidUserList":{ "shape":"FileShareUserList", - "documentation":"

A list of users or groups in the Active Directory that are allowed to access the file share. A group must be prefixed with the @ character. For example @group1. Can only be set if Authentication is set to ActiveDirectory.

" + "documentation":"

A list of users or groups in the Active Directory that are allowed to access the file share. A group must be prefixed with the @ character. For example, @group1. Can only be set if Authentication is set to ActiveDirectory.

" }, "InvalidUserList":{ "shape":"FileShareUserList", @@ -1787,7 +1787,7 @@ }, "Authentication":{ "shape":"Authentication", - "documentation":"

The authentication method that users use to access the file share.

Valid values are ActiveDirectory or GuestAccess. The default is ActiveDirectory.

" + "documentation":"

The authentication method that users use to access the file share. The default is ActiveDirectory.

Valid Values: ActiveDirectory | GuestAccess

" }, "Tags":{ "shape":"Tags", @@ -1801,7 +1801,7 @@ "members":{ "FileShareARN":{ "shape":"FileShareARN", - "documentation":"

The Amazon Resource Name (ARN) of the newly created file share.

" + "documentation":"

The Amazon Resource Name (ARN) of the newly created file share.

" } }, "documentation":"

CreateSMBFileShareOutput

" @@ -1819,7 +1819,7 @@ }, "SnapshotDescription":{ "shape":"SnapshotDescription", - "documentation":"

Textual description of the snapshot that appears in the Amazon EC2 console, Elastic Block Store snapshots panel in the Description field, and in the AWS Storage Gateway snapshot Details pane, Description field

" + "documentation":"

Textual description of the snapshot that appears in the Amazon EC2 console, Elastic Block Store snapshots panel in the Description field, and in the AWS Storage Gateway snapshot Details pane, Description field.

" }, "Tags":{ "shape":"Tags", @@ -1857,7 +1857,7 @@ }, "SnapshotDescription":{ "shape":"SnapshotDescription", - "documentation":"

Textual description of the snapshot that appears in the Amazon EC2 console, Elastic Block Store snapshots panel in the Description field, and in the AWS Storage Gateway snapshot Details pane, Description field

" + "documentation":"

Textual description of the snapshot that appears in the Amazon EC2 console, Elastic Block Store snapshots panel in the Description field, and in the AWS Storage Gateway snapshot Details pane, Description field.

" }, "Tags":{ "shape":"Tags", @@ -1897,11 +1897,11 @@ }, "SnapshotId":{ "shape":"SnapshotId", - "documentation":"

The snapshot ID (e.g. \"snap-1122aabb\") of the snapshot to restore as the new stored volume. Specify this field if you want to create the iSCSI storage volume from a snapshot otherwise do not include this field. To list snapshots for your account use DescribeSnapshots in the Amazon Elastic Compute Cloud API Reference.

" + "documentation":"

The snapshot ID (e.g. \"snap-1122aabb\") of the snapshot to restore as the new stored volume. Specify this field if you want to create the iSCSI storage volume from a snapshot; otherwise, do not include this field. To list snapshots for your account use DescribeSnapshots in the Amazon Elastic Compute Cloud API Reference.

" }, "PreserveExistingData":{ "shape":"boolean", - "documentation":"

Specify this field as true if you want to preserve the data on the local disk. Otherwise, specifying this field as false creates an empty volume.

Valid Values: true, false

" + "documentation":"

Set to true true if you want to preserve the data on the local disk. Otherwise, set to false to create an empty volume.

Valid Values: true | false

" }, "TargetName":{ "shape":"TargetName", @@ -1909,15 +1909,15 @@ }, "NetworkInterfaceId":{ "shape":"NetworkInterfaceId", - "documentation":"

The network interface of the gateway on which to expose the iSCSI target. Only IPv4 addresses are accepted. Use DescribeGatewayInformation to get a list of the network interfaces available on a gateway.

Valid Values: A valid IP address.

" + "documentation":"

The network interface of the gateway on which to expose the iSCSI target. Only IPv4 addresses are accepted. Use DescribeGatewayInformation to get a list of the network interfaces available on a gateway.

Valid Values: A valid IP address.

" }, "KMSEncrypted":{ "shape":"Boolean", - "documentation":"

True to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

" + "documentation":"

Set to true to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

Valid Values: true | false

" }, "KMSKey":{ "shape":"KMSKey", - "documentation":"

The Amazon Resource Name (ARN) of the KMS key used for Amazon S3 server-side encryption. This value can only be set when KMSEncrypted is true. Optional.

" + "documentation":"

The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric CMKs. This value can only be set when KMSEncrypted is true. Optional.

" }, "Tags":{ "shape":"Tags", @@ -1966,15 +1966,15 @@ }, "KMSEncrypted":{ "shape":"Boolean", - "documentation":"

True to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

" + "documentation":"

Set to true to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

Valid Values: true | false

" }, "KMSKey":{ "shape":"KMSKey", - "documentation":"

The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server-side encryption. This value can only be set when KMSEncrypted is true. Optional.

" + "documentation":"

The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric CMKs. This value can only be set when KMSEncrypted is true. Optional.

" }, "PoolId":{ "shape":"PoolId", - "documentation":"

The ID of the pool that you want to add your tape to for archiving. The tape in this pool is archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (S3 Glacier or S3 Glacier Deep Archive) that corresponds to the pool.

Valid values: \"GLACIER\", \"DEEP_ARCHIVE\"

" + "documentation":"

The ID of the pool that you want to add your tape to for archiving. The tape in this pool is archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (S3 Glacier or S3 Deep Archive) that corresponds to the pool.

Valid Values: GLACIER | DEEP_ARCHIVE

" }, "Tags":{ "shape":"Tags", @@ -2025,15 +2025,15 @@ }, "KMSEncrypted":{ "shape":"Boolean", - "documentation":"

True to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

" + "documentation":"

Set to true to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

Valid Values: true | false

" }, "KMSKey":{ "shape":"KMSKey", - "documentation":"

The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server-side encryption. This value can only be set when KMSEncrypted is true. Optional.

" + "documentation":"

The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric CMKs. This value can only be set when KMSEncrypted is true. Optional.

" }, "PoolId":{ "shape":"PoolId", - "documentation":"

The ID of the pool that you want to add your tape to for archiving. The tape in this pool is archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (S3 Glacier or S3 Glacier Deep Archive) that corresponds to the pool.

Valid values: \"GLACIER\", \"DEEP_ARCHIVE\"

" + "documentation":"

The ID of the pool that you want to add your tape to for archiving. The tape in this pool is archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (S3 Glacier or S3 Glacier Deep Archive) that corresponds to the pool.

Valid Values: GLACIER | DEEP_ARCHIVE

" }, "Tags":{ "shape":"Tags", @@ -2086,7 +2086,7 @@ "GatewayARN":{"shape":"GatewayARN"}, "BandwidthType":{ "shape":"BandwidthType", - "documentation":"

One of the BandwidthType values that indicates the gateway bandwidth rate limit to delete.

Valid Values: Upload, Download, All.

" + "documentation":"

One of the BandwidthType values that indicates the gateway bandwidth rate limit to delete.

Valid Values: Upload | Download | All

" } }, "documentation":"

A JSON object containing the following fields:

" @@ -2136,11 +2136,11 @@ "members":{ "FileShareARN":{ "shape":"FileShareARN", - "documentation":"

The Amazon Resource Name (ARN) of the file share to be deleted.

" + "documentation":"

The Amazon Resource Name (ARN) of the file share to be deleted.

" }, "ForceDelete":{ "shape":"boolean", - "documentation":"

If this value is set to true, the operation deletes a file share immediately and aborts all data uploads to AWS. Otherwise, the file share is not deleted until all data is uploaded to AWS. This process aborts the data upload process, and the file share enters the FORCE_DELETING status.

" + "documentation":"

If this value is set to true, the operation deletes a file share immediately and aborts all data uploads to AWS. Otherwise, the file share is not deleted until all data is uploaded to AWS. This process aborts the data upload process, and the file share enters the FORCE_DELETING status.

Valid Values: true | false

" } }, "documentation":"

DeleteFileShareInput

" @@ -2150,7 +2150,7 @@ "members":{ "FileShareARN":{ "shape":"FileShareARN", - "documentation":"

The Amazon Resource Name (ARN) of the deleted file share.

" + "documentation":"

The Amazon Resource Name (ARN) of the deleted file share.

" } }, "documentation":"

DeleteFileShareOutput

" @@ -2257,7 +2257,7 @@ "documentation":"

The Amazon Resource Name (ARN) of the storage volume that was deleted. It is the same ARN you provided in the request.

" } }, - "documentation":"

A JSON object containing the Amazon Resource Name (ARN) of the storage volume that was deleted

" + "documentation":"

A JSON object containing the Amazon Resource Name (ARN) of the storage volume that was deleted.

" }, "DescribeAvailabilityMonitorTestInput":{ "type":"structure", @@ -2379,7 +2379,7 @@ "documentation":"

An array of ChapInfo objects that represent CHAP credentials. Each object in the array contains CHAP credential information for one target-initiator pair. If no CHAP credentials are set, an empty array is returned. CHAP credential information is provided in a JSON object with the following fields:

  • InitiatorName: The iSCSI initiator that connects to the target.

  • SecretToAuthenticateInitiator: The secret key that the initiator (for example, the Windows client) must provide to participate in mutual CHAP with the target.

  • SecretToAuthenticateTarget: The secret key that the target must provide to participate in mutual CHAP with the initiator (e.g. Windows client).

  • TargetARN: The Amazon Resource Name (ARN) of the storage volume.

" } }, - "documentation":"

A JSON object containing a .

" + "documentation":"

A JSON object containing the following fields:

" }, "DescribeGatewayInformationInput":{ "type":"structure", @@ -2439,7 +2439,7 @@ }, "VPCEndpoint":{ "shape":"string", - "documentation":"

The configuration settings for the virtual private cloud (VPC) endpoint for your gateway.

" + "documentation":"

The configuration settings for the virtual private cloud (VPC) endpoint for your gateway.

" }, "CloudWatchLogGroupARN":{ "shape":"CloudWatchLogGroupARN", @@ -2448,6 +2448,10 @@ "HostEnvironment":{ "shape":"HostEnvironment", "documentation":"

The type of hypervisor environment used by the host.

" + }, + "EndpointType":{ + "shape":"EndpointType", + "documentation":"

The type of endpoint for your gateway.

Valid Values: STANDARD | FIPS

" } }, "documentation":"

A JSON object containing the following fields:

" @@ -2478,7 +2482,7 @@ }, "DayOfMonth":{ "shape":"DayOfMonth", - "documentation":"

The day of the month component of the maintenance start time represented as an ordinal number from 1 to 28, where 1 represents the first day of the month and 28 represents the last day of the month.

This value is only available for tape and volume gateways.

" + "documentation":"

The day of the month component of the maintenance start time represented as an ordinal number from 1 to 28, where 1 represents the first day of the month and 28 represents the last day of the month.

" }, "Timezone":{ "shape":"GatewayTimezone", @@ -2493,7 +2497,7 @@ "members":{ "FileShareARNList":{ "shape":"FileShareARNList", - "documentation":"

An array containing the Amazon Resource Name (ARN) of each file share to be described.

" + "documentation":"

An array containing the Amazon Resource Name (ARN) of each file share to be described.

" } }, "documentation":"

DescribeNFSFileSharesInput

" @@ -2503,7 +2507,7 @@ "members":{ "NFSFileShareInfoList":{ "shape":"NFSFileShareInfoList", - "documentation":"

An array containing a description for each requested file share.

" + "documentation":"

An array containing a description for each requested file share.

" } }, "documentation":"

DescribeNFSFileSharesOutput

" @@ -2514,7 +2518,7 @@ "members":{ "FileShareARNList":{ "shape":"FileShareARNList", - "documentation":"

An array containing the Amazon Resource Name (ARN) of each file share to be described.

" + "documentation":"

An array containing the Amazon Resource Name (ARN) of each file share to be described.

" } }, "documentation":"

DescribeSMBFileSharesInput

" @@ -2524,7 +2528,7 @@ "members":{ "SMBFileShareInfoList":{ "shape":"SMBFileShareInfoList", - "documentation":"

An array containing a description for each requested file share.

" + "documentation":"

An array containing a description for each requested file share.

" } }, "documentation":"

DescribeSMBFileSharesOutput

" @@ -2546,15 +2550,15 @@ }, "ActiveDirectoryStatus":{ "shape":"ActiveDirectoryStatus", - "documentation":"

Indicates the status of a gateway that is a member of the Active Directory domain.

  • ACCESS_DENIED: Indicates that the JoinDomain operation failed due to an authentication error.

  • DETACHED: Indicates that gateway is not joined to a domain.

  • JOINED: Indicates that the gateway has successfully joined a domain.

  • JOINING: Indicates that a JoinDomain operation is in progress.

  • NETWORK_ERROR: Indicates that JoinDomain operation failed due to a network or connectivity error.

  • TIMEOUT: Indicates that the JoinDomain operation failed because the operation didn't complete within the allotted time.

  • UNKNOWN_ERROR: Indicates that the JoinDomain operation failed due to another type of error.

" + "documentation":"

Indicates the status of a gateway that is a member of the Active Directory domain.

  • ACCESS_DENIED: Indicates that the JoinDomain operation failed due to an authentication error.

  • DETACHED: Indicates that gateway is not joined to a domain.

  • JOINED: Indicates that the gateway has successfully joined a domain.

  • JOINING: Indicates that a JoinDomain operation is in progress.

  • NETWORK_ERROR: Indicates that JoinDomain operation failed due to a network or connectivity error.

  • TIMEOUT: Indicates that the JoinDomain operation failed because the operation didn't complete within the allotted time.

  • UNKNOWN_ERROR: Indicates that the JoinDomain operation failed due to another type of error.

" }, "SMBGuestPasswordSet":{ "shape":"Boolean", - "documentation":"

This value is true if a password for the guest user “smbguest” is set, and otherwise false.

" + "documentation":"

This value is true if a password for the guest user smbguest is set, otherwise false.

Valid Values: true | false

" }, "SMBSecurityStrategy":{ "shape":"SMBSecurityStrategy", - "documentation":"

The type of security strategy that was specified for file gateway.

ClientSpecified: if you use this option, requests are established based on what is negotiated by the client. This option is recommended when you want to maximize compatibility across different clients in your environment.

MandatorySigning: if you use this option, file gateway only allows connections from SMBv2 or SMBv3 clients that have signing enabled. This option works with SMB clients on Microsoft Windows Vista, Windows Server 2008 or newer.

MandatoryEncryption: if you use this option, file gateway only allows connections from SMBv3 clients that have encryption enabled. This option is highly recommended for environments that handle sensitive data. This option works with SMB clients on Microsoft Windows 8, Windows Server 2012 or newer.

" + "documentation":"

The type of security strategy that was specified for file gateway.

  • ClientSpecified: If you use this option, requests are established based on what is negotiated by the client. This option is recommended when you want to maximize compatibility across different clients in your environment.

  • MandatorySigning: If you use this option, file gateway only allows connections from SMBv2 or SMBv3 clients that have signing enabled. This option works with SMB clients on Microsoft Windows Vista, Windows Server 2008 or newer.

  • MandatoryEncryption: If you use this option, file gateway only allows connections from SMBv3 clients that have encryption enabled. This option is highly recommended for environments that handle sensitive data. This option works with SMB clients on Microsoft Windows 8, Windows Server 2012 or newer.

" } } }, @@ -2614,7 +2618,7 @@ "members":{ "StorediSCSIVolumes":{ "shape":"StorediSCSIVolumes", - "documentation":"

Describes a single unit of output from DescribeStorediSCSIVolumes. The following fields are returned:

  • ChapEnabled: Indicates whether mutual CHAP is enabled for the iSCSI target.

  • LunNumber: The logical disk number.

  • NetworkInterfaceId: The network interface ID of the stored volume that initiator use to map the stored volume as an iSCSI target.

  • NetworkInterfacePort: The port used to communicate with iSCSI targets.

  • PreservedExistingData: Indicates if when the stored volume was created, existing data on the underlying local disk was preserved.

  • SourceSnapshotId: If the stored volume was created from a snapshot, this field contains the snapshot ID used, e.g. snap-1122aabb. Otherwise, this field is not included.

  • StorediSCSIVolumes: An array of StorediSCSIVolume objects where each object contains metadata about one stored volume.

  • TargetARN: The Amazon Resource Name (ARN) of the volume target.

  • VolumeARN: The Amazon Resource Name (ARN) of the stored volume.

  • VolumeDiskId: The disk ID of the local disk that was specified in the CreateStorediSCSIVolume operation.

  • VolumeId: The unique identifier of the storage volume, e.g. vol-1122AABB.

  • VolumeiSCSIAttributes: An VolumeiSCSIAttributes object that represents a collection of iSCSI attributes for one stored volume.

  • VolumeProgress: Represents the percentage complete if the volume is restoring or bootstrapping that represents the percent of data transferred. This field does not appear in the response if the stored volume is not restoring or bootstrapping.

  • VolumeSizeInBytes: The size of the volume in bytes.

  • VolumeStatus: One of the VolumeStatus values that indicates the state of the volume.

  • VolumeType: One of the enumeration values describing the type of the volume. Currently, on STORED volumes are supported.

" + "documentation":"

Describes a single unit of output from DescribeStorediSCSIVolumes. The following fields are returned:

  • ChapEnabled: Indicates whether mutual CHAP is enabled for the iSCSI target.

  • LunNumber: The logical disk number.

  • NetworkInterfaceId: The network interface ID of the stored volume that initiator use to map the stored volume as an iSCSI target.

  • NetworkInterfacePort: The port used to communicate with iSCSI targets.

  • PreservedExistingData: Indicates when the stored volume was created, existing data on the underlying local disk was preserved.

  • SourceSnapshotId: If the stored volume was created from a snapshot, this field contains the snapshot ID used, e.g. snap-1122aabb. Otherwise, this field is not included.

  • StorediSCSIVolumes: An array of StorediSCSIVolume objects where each object contains metadata about one stored volume.

  • TargetARN: The Amazon Resource Name (ARN) of the volume target.

  • VolumeARN: The Amazon Resource Name (ARN) of the stored volume.

  • VolumeDiskId: The disk ID of the local disk that was specified in the CreateStorediSCSIVolume operation.

  • VolumeId: The unique identifier of the storage volume, e.g. vol-1122AABB.

  • VolumeiSCSIAttributes: An VolumeiSCSIAttributes object that represents a collection of iSCSI attributes for one stored volume.

  • VolumeProgress: Represents the percentage complete if the volume is restoring or bootstrapping that represents the percent of data transferred. This field does not appear in the response if the stored volume is not restoring or bootstrapping.

  • VolumeSizeInBytes: The size of the volume in bytes.

  • VolumeStatus: One of the VolumeStatus values that indicates the state of the volume.

  • VolumeType: One of the enumeration values describing the type of the volume. Currently, only STORED volumes are supported.

" } } }, @@ -2641,7 +2645,7 @@ "members":{ "TapeArchives":{ "shape":"TapeArchives", - "documentation":"

An array of virtual tape objects in the virtual tape shelf (VTS). The description includes of the Amazon Resource Name (ARN) of the virtual tapes. The information returned includes the Amazon Resource Names (ARNs) of the tapes, size of the tapes, status of the tapes, progress of the description and tape barcode.

" + "documentation":"

An array of virtual tape objects in the virtual tape shelf (VTS). The description includes of the Amazon Resource Name (ARN) of the virtual tapes. The information returned includes the Amazon Resource Names (ARNs) of the tapes, size of the tapes, status of the tapes, progress of the description, and tape barcode.

" }, "Marker":{ "shape":"Marker", @@ -2692,7 +2696,7 @@ }, "Marker":{ "shape":"Marker", - "documentation":"

A marker value, obtained in a previous call to DescribeTapes. This marker indicates which page of results to retrieve.

If not specified, the first page of results is retrieved.

" + "documentation":"

A marker value, obtained in a previous call to DescribeTapes. This marker indicates which page of results to retrieve.

If not specified, the first page of results is retrieved.

" }, "Limit":{ "shape":"PositiveIntObject", @@ -2817,7 +2821,7 @@ }, "ForceDetach":{ "shape":"Boolean", - "documentation":"

Set to true to forcibly remove the iSCSI connection of the target volume and detach the volume. The default is false. If this value is set to false, you must manually disconnect the iSCSI connection from the target volume.

" + "documentation":"

Set to true to forcibly remove the iSCSI connection of the target volume and detach the volume. The default is false. If this value is set to false, you must manually disconnect the iSCSI connection from the target volume.

Valid Values: true | false

" } }, "documentation":"

AttachVolumeInput

" @@ -2903,7 +2907,7 @@ "DiskAllocationType":{"shape":"DiskAllocationType"}, "DiskAllocationResource":{ "shape":"string", - "documentation":"

The iSCSI qualified name (IQN) that is defined for a disk. This field is not included in the response if the local disk is not defined as an iSCSI target. The format of this field is targetIqn::LUNNumber::region-volumeId.

" + "documentation":"

The iSCSI qualified name (IQN) that is defined for a disk. This field is not included in the response if the local disk is not defined as an iSCSI target. The format of this field is targetIqn::LUNNumber::region-volumeId.

" }, "DiskAttributeList":{"shape":"DiskAttributeList"} }, @@ -2911,7 +2915,7 @@ }, "DiskAllocationType":{ "type":"string", - "documentation":"

One of the DiskAllocationType enumeration values that identifies how a local disk is used. Valid values: UPLOAD_BUFFER, CACHE_STORAGE

", + "documentation":"

One of the DiskAllocationType enumeration values that identifies how a local disk is used.

Valid Values: UPLOAD_BUFFER | CACHE_STORAGE

", "max":100, "min":3 }, @@ -2962,6 +2966,11 @@ "DoubleObject":{"type":"double"}, "Ec2InstanceId":{"type":"string"}, "Ec2InstanceRegion":{"type":"string"}, + "EndpointType":{ + "type":"string", + "max":8, + "min":4 + }, "ErrorCode":{ "type":"string", "enum":[ @@ -3031,7 +3040,7 @@ }, "FileShareARN":{ "type":"string", - "documentation":"

The Amazon Resource Name (ARN) of the file share.

", + "documentation":"

The Amazon Resource Name (ARN) of the file share.

", "max":500, "min":50 }, @@ -3044,13 +3053,13 @@ "FileShareClientList":{ "type":"list", "member":{"shape":"IPV4AddressCIDR"}, - "documentation":"

The list of clients that are allowed to access the file gateway. The list must contain either valid IP addresses or valid CIDR blocks.

", + "documentation":"

The list of clients that are allowed to access the file gateway. The list must contain either valid IP addresses or valid CIDR blocks.

", "max":100, "min":1 }, "FileShareId":{ "type":"string", - "documentation":"

The ID of the file share.

", + "documentation":"

The ID of the file share.

", "max":30, "min":12 }, @@ -3071,7 +3080,7 @@ }, "FileShareStatus":{ "type":"string", - "documentation":"

The status of the file share. Possible values are CREATING, UPDATING, AVAILABLE and DELETING.

", + "documentation":"

The status of the file share.

Valid Values: CREATING | UPDATING | AVAILABLE | DELETING

", "max":50, "min":3 }, @@ -3133,7 +3142,7 @@ }, "GatewayOperationalState":{ "shape":"GatewayOperationalState", - "documentation":"

The state of the gateway.

Valid Values: DISABLED or ACTIVE

" + "documentation":"

The state of the gateway.

Valid Values: DISABLED | ACTIVE

" }, "GatewayName":{ "shape":"string", @@ -3306,14 +3315,14 @@ }, "ActiveDirectoryStatus":{ "shape":"ActiveDirectoryStatus", - "documentation":"

Indicates the status of the gateway as a member of the Active Directory domain.

  • ACCESS_DENIED: Indicates that the JoinDomain operation failed due to an authentication error.

  • DETACHED: Indicates that gateway is not joined to a domain.

  • JOINED: Indicates that the gateway has successfully joined a domain.

  • JOINING: Indicates that a JoinDomain operation is in progress.

  • NETWORK_ERROR: Indicates that JoinDomain operation failed due to a network or connectivity error.

  • TIMEOUT: Indicates that the JoinDomain operation failed because the operation didn't complete within the allotted time.

  • UNKNOWN_ERROR: Indicates that the JoinDomain operation failed due to another type of error.

" + "documentation":"

Indicates the status of the gateway as a member of the Active Directory domain.

  • ACCESS_DENIED: Indicates that the JoinDomain operation failed due to an authentication error.

  • DETACHED: Indicates that gateway is not joined to a domain.

  • JOINED: Indicates that the gateway has successfully joined a domain.

  • JOINING: Indicates that a JoinDomain operation is in progress.

  • NETWORK_ERROR: Indicates that JoinDomain operation failed due to a network or connectivity error.

  • TIMEOUT: Indicates that the JoinDomain operation failed because the operation didn't complete within the allotted time.

  • UNKNOWN_ERROR: Indicates that the JoinDomain operation failed due to another type of error.

" } }, "documentation":"

JoinDomainOutput

" }, "KMSKey":{ "type":"string", - "documentation":"

The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server-side encryption. This value can only be set when KMSEncrypted is true. Optional.

", + "documentation":"

The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric CMKs. This value can only be set when KMSEncrypted is true. Optional.

", "max":2048, "min":7 }, @@ -3360,15 +3369,15 @@ "members":{ "Marker":{ "shape":"Marker", - "documentation":"

If the request includes Marker, the response returns that value in this field.

" + "documentation":"

If the request includes Marker, the response returns that value in this field.

" }, "NextMarker":{ "shape":"Marker", - "documentation":"

If a value is present, there are more file shares to return. In a subsequent request, use NextMarker as the value for Marker to retrieve the next set of file shares.

" + "documentation":"

If a value is present, there are more file shares to return. In a subsequent request, use NextMarker as the value for Marker to retrieve the next set of file shares.

" }, "FileShareInfoList":{ "shape":"FileShareInfoList", - "documentation":"

An array of information about the file gateway's file shares.

" + "documentation":"

An array of information about the file gateway's file shares.

" } }, "documentation":"

ListFileShareOutput

" @@ -3558,7 +3567,7 @@ }, "LocationARN":{ "type":"string", - "documentation":"

The ARN of the backend storage used for storing file data.

", + "documentation":"

The ARN of the backend storage used for storing file data.

", "max":310, "min":16 }, @@ -3587,19 +3596,19 @@ "members":{ "FileMode":{ "shape":"PermissionMode", - "documentation":"

The Unix file mode in the form \"nnnn\". For example, \"0666\" represents the default file mode inside the file share. The default value is 0666.

" + "documentation":"

The Unix file mode in the form \"nnnn\". For example, 0666 represents the default file mode inside the file share. The default value is 0666.

" }, "DirectoryMode":{ "shape":"PermissionMode", - "documentation":"

The Unix directory mode in the form \"nnnn\". For example, \"0666\" represents the default access mode for all directories inside the file share. The default value is 0777.

" + "documentation":"

The Unix directory mode in the form \"nnnn\". For example, 0666 represents the default access mode for all directories inside the file share. The default value is 0777.

" }, "GroupId":{ "shape":"PermissionId", - "documentation":"

The default group ID for the file share (unless the files have another group ID specified). The default value is nfsnobody.

" + "documentation":"

The default group ID for the file share (unless the files have another group ID specified). The default value is nfsnobody.

" }, "OwnerId":{ "shape":"PermissionId", - "documentation":"

The default owner ID for files in the file share (unless the files have another owner ID specified). The default value is nfsnobody.

" + "documentation":"

The default owner ID for files in the file share (unless the files have another owner ID specified). The default value is nfsnobody.

" } }, "documentation":"

Describes Network File System (NFS) file share default values. Files and folders stored as Amazon S3 objects in S3 buckets don't, by default, have Unix file permissions assigned to them. Upon discovery in an S3 bucket by Storage Gateway, the S3 objects that represent files and folders are assigned these default Unix permissions. This operation is only supported for file gateways.

" @@ -3614,7 +3623,7 @@ "GatewayARN":{"shape":"GatewayARN"}, "KMSEncrypted":{ "shape":"boolean", - "documentation":"

True to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

" + "documentation":"

Set to true to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

Valid Values: true | false

" }, "KMSKey":{"shape":"KMSKey"}, "Path":{"shape":"Path"}, @@ -3622,22 +3631,22 @@ "LocationARN":{"shape":"LocationARN"}, "DefaultStorageClass":{ "shape":"StorageClass", - "documentation":"

The default storage class for objects put into an Amazon S3 bucket by the file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA, or S3_ONEZONE_IA. If this field is not populated, the default value S3_STANDARD is used. Optional.

" + "documentation":"

The default storage class for objects put into an Amazon S3 bucket by the file gateway. The default value is S3_INTELLIGENT_TIERING. Optional.

Valid Values: S3_STANDARD | S3_INTELLIGENT_TIERING | S3_STANDARD_IA | S3_ONEZONE_IA

" }, "ObjectACL":{"shape":"ObjectACL"}, "ClientList":{"shape":"FileShareClientList"}, "Squash":{"shape":"Squash"}, "ReadOnly":{ "shape":"Boolean", - "documentation":"

A value that sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.

" + "documentation":"

A value that sets the write status of a file share. Set this value to true to set the write status to read-only, otherwise set to false.

Valid Values: true | false

" }, "GuessMIMETypeEnabled":{ "shape":"Boolean", - "documentation":"

A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.

" + "documentation":"

A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, otherwise set to false. The default value is true.

Valid Values: true | false

" }, "RequesterPays":{ "shape":"Boolean", - "documentation":"

A value that sets who pays the cost of the request and the cost associated with data download from the S3 bucket. If this value is set to true, the requester pays the costs. Otherwise the S3 bucket owner pays. However, the S3 bucket owner always pays the cost of storing data.

RequesterPays is a configuration for the S3 bucket that backs the file share, so make sure that the configuration on the file share is the same as the S3 bucket configuration.

" + "documentation":"

A value that sets who pays the cost of the request and the cost associated with data download from the S3 bucket. If this value is set to true, the requester pays the costs; otherwise, the S3 bucket owner pays. However, the S3 bucket owner always pays the cost of storing data.

RequesterPays is a configuration for the S3 bucket that backs the file share, so make sure that the configuration on the file share is the same as the S3 bucket configuration.

Valid Values: true | false

" }, "Tags":{ "shape":"Tags", @@ -3704,7 +3713,7 @@ }, "ObjectACL":{ "type":"string", - "documentation":"

A value that sets the access control list permission for objects in the S3 bucket that a file gateway puts objects into. The default value is \"private\".

", + "documentation":"

A value that sets the access control list (ACL) permission for objects in the S3 bucket that a file gateway puts objects into. The default value is private.

", "enum":[ "private", "public-read", @@ -3722,7 +3731,7 @@ }, "Path":{ "type":"string", - "documentation":"

The file share path used by the NFS client to identify the mount point.

" + "documentation":"

The file share path used by the NFS client to identify the mount point.

" }, "PermissionId":{ "type":"long", @@ -3759,11 +3768,11 @@ }, "FolderList":{ "shape":"FolderList", - "documentation":"

A comma-separated list of the paths of folders to refresh in the cache. The default is [\"/\"]. The default refreshes objects and folders at the root of the Amazon S3 bucket. If Recursive is set to \"true\", the entire S3 bucket that the file share has access to is refreshed.

" + "documentation":"

A comma-separated list of the paths of folders to refresh in the cache. The default is [\"/\"]. The default refreshes objects and folders at the root of the Amazon S3 bucket. If Recursive is set to true, the entire S3 bucket that the file share has access to is refreshed.

" }, "Recursive":{ "shape":"Boolean", - "documentation":"

A value that specifies whether to recursively refresh folders in the cache. The refresh includes folders that were in the cache the last time the gateway listed the folder's contents. If this value set to \"true\", each folder that is listed in FolderList is recursively updated. Otherwise, subfolders listed in FolderList are not refreshed. Only objects that are in folders listed directly under FolderList are found and used for the update. The default is \"true\".

" + "documentation":"

A value that specifies whether to recursively refresh folders in the cache. The refresh includes folders that were in the cache the last time the gateway listed the folder's contents. If this value set to true, each folder that is listed in FolderList is recursively updated. Otherwise, subfolders listed in FolderList are not refreshed. Only objects that are in folders listed directly under FolderList are found and used for the update. The default is true.

Valid Values: true | false

" } }, "documentation":"

RefreshCacheInput

" @@ -3882,7 +3891,7 @@ }, "Role":{ "type":"string", - "documentation":"

The ARN of the IAM role that file gateway assumes when it accesses the underlying storage.

", + "documentation":"

The ARN of the IAM role that file gateway assumes when it accesses the underlying storage.

", "max":2048, "min":20 }, @@ -3895,7 +3904,7 @@ "GatewayARN":{"shape":"GatewayARN"}, "KMSEncrypted":{ "shape":"boolean", - "documentation":"

True to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

" + "documentation":"

Set to true to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

Valid Values: true | false

" }, "KMSKey":{"shape":"KMSKey"}, "Path":{ @@ -3906,24 +3915,24 @@ "LocationARN":{"shape":"LocationARN"}, "DefaultStorageClass":{ "shape":"StorageClass", - "documentation":"

The default storage class for objects put into an Amazon S3 bucket by the file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA, or S3_ONEZONE_IA. If this field is not populated, the default value S3_STANDARD is used. Optional.

" + "documentation":"

The default storage class for objects put into an Amazon S3 bucket by the file gateway. The default value is S3_INTELLIGENT_TIERING. Optional.

Valid Values: S3_STANDARD | S3_INTELLIGENT_TIERING | S3_STANDARD_IA | S3_ONEZONE_IA

" }, "ObjectACL":{"shape":"ObjectACL"}, "ReadOnly":{ "shape":"Boolean", - "documentation":"

A value that sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.

" + "documentation":"

A value that sets the write status of a file share. Set this value to true to set the write status to read-only, otherwise set to false.

Valid Values: true | false

" }, "GuessMIMETypeEnabled":{ "shape":"Boolean", - "documentation":"

A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.

" + "documentation":"

A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, otherwise set to false. The default value is true.

Valid Values: true | false

" }, "RequesterPays":{ "shape":"Boolean", - "documentation":"

A value that sets who pays the cost of the request and the cost associated with data download from the S3 bucket. If this value is set to true, the requester pays the costs. Otherwise the S3 bucket owner pays. However, the S3 bucket owner always pays the cost of storing data.

RequesterPays is a configuration for the S3 bucket that backs the file share, so make sure that the configuration on the file share is the same as the S3 bucket configuration.

" + "documentation":"

A value that sets who pays the cost of the request and the cost associated with data download from the S3 bucket. If this value is set to true, the requester pays the costs; otherwise, the S3 bucket owner pays. However, the S3 bucket owner always pays the cost of storing data.

RequesterPays is a configuration for the S3 bucket that backs the file share, so make sure that the configuration on the file share is the same as the S3 bucket configuration.

Valid Values: true | false

" }, "SMBACLEnabled":{ "shape":"Boolean", - "documentation":"

If this value is set to \"true\", indicates that ACL (access control list) is enabled on the SMB file share. If it is set to \"false\", it indicates that file and directory permissions are mapped to the POSIX permission.

For more information, see https://docs.aws.amazon.com/storagegateway/latest/userguide/smb-acl.html in the Storage Gateway User Guide.

" + "documentation":"

If this value is set to true, it indicates that access control list (ACL) is enabled on the SMB file share. If it is set to false, it indicates that file and directory permissions are mapped to the POSIX permission.

For more information, see Using Microsoft Windows ACLs to control access to an SMB file share in the AWS Storage Gateway User Guide.

" }, "AdminUserList":{ "shape":"FileShareUserList", @@ -3931,7 +3940,7 @@ }, "ValidUserList":{ "shape":"FileShareUserList", - "documentation":"

A list of users or groups in the Active Directory that are allowed to access the file share. A group must be prefixed with the @ character. For example @group1. Can only be set if Authentication is set to ActiveDirectory.

" + "documentation":"

A list of users or groups in the Active Directory that are allowed to access the file share. A group must be prefixed with the @ character. For example, @group1. Can only be set if Authentication is set to ActiveDirectory.

" }, "InvalidUserList":{ "shape":"FileShareUserList", @@ -4017,7 +4026,7 @@ }, "Password":{ "shape":"SMBGuestPassword", - "documentation":"

The password that you want to set for your SMB Server.

" + "documentation":"

The password that you want to set for your SMB server.

" } }, "documentation":"

SetSMBGuestPasswordInput

" @@ -4054,7 +4063,7 @@ }, "Squash":{ "type":"string", - "documentation":"

The user mapped to anonymous user. Valid options are the following:

  • RootSquash - Only root is mapped to anonymous user.

  • NoSquash - No one is mapped to anonymous user

  • AllSquash - Everyone is mapped to anonymous user.

", + "documentation":"

The user mapped to anonymous user. Valid options are the following:

  • RootSquash: Only root is mapped to anonymous user.

  • NoSquash: No one is mapped to anonymous user.

  • AllSquash: Everyone is mapped to anonymous user.

", "max":15, "min":5 }, @@ -4127,7 +4136,7 @@ }, "VolumeAttachmentStatus":{ "shape":"VolumeAttachmentStatus", - "documentation":"

A value that indicates whether a storage volume is attached to, detached from, or is in the process of detaching from a gateway. For more information, see Moving Your Volumes to a Different Gateway.

" + "documentation":"

A value that indicates whether a storage volume is attached to, detached from, or is in the process of detaching from a gateway. For more information, see Moving your volumes to a different gateway.

" }, "VolumeSizeInBytes":{ "shape":"long", @@ -4147,7 +4156,7 @@ }, "PreservedExistingData":{ "shape":"boolean", - "documentation":"

Indicates if when the stored volume was created, existing data on the underlying local disk was preserved.

Valid Values: true, false

" + "documentation":"

Indicates if when the stored volume was created, existing data on the underlying local disk was preserved.

Valid Values: true | false

" }, "VolumeiSCSIAttributes":{ "shape":"VolumeiSCSIAttributes", @@ -4182,14 +4191,14 @@ "members":{ "Key":{ "shape":"TagKey", - "documentation":"

Tag key (String). The key can't start with aws:.

" + "documentation":"

Tag key. The key can't start with aws:.

" }, "Value":{ "shape":"TagValue", "documentation":"

Value of the tag key.

" } }, - "documentation":"

A key-value pair that helps you manage, filter, and search for your resource. Allowed characters: letters, white space, and numbers, representable in UTF-8, and the following characters: + - = . _ : /

" + "documentation":"

A key-value pair that helps you manage, filter, and search for your resource. Allowed characters: letters, white space, and numbers, representable in UTF-8, and the following characters: + - = . _ : /.

" }, "TagKey":{ "type":"string", @@ -4247,7 +4256,7 @@ "KMSKey":{"shape":"KMSKey"}, "PoolId":{ "shape":"PoolId", - "documentation":"

The ID of the pool that contains tapes that will be archived. The tapes in this pool are archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (S3 Glacier or S# Glacier Deep Archive) that corresponds to the pool.

Valid values: \"GLACIER\", \"DEEP_ARCHIVE\"

" + "documentation":"

The ID of the pool that contains tapes that will be archived. The tapes in this pool are archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (S3 Glacier or S3 Glacier Deep Archive) that corresponds to the pool.

Valid Values: GLACIER | DEEP_ARCHIVE

" } }, "documentation":"

Describes a virtual tape object.

" @@ -4301,7 +4310,7 @@ "KMSKey":{"shape":"KMSKey"}, "PoolId":{ "shape":"PoolId", - "documentation":"

The ID of the pool that was used to archive the tape. The tapes in this pool are archived in the S3 storage class that is associated with the pool.

Valid values: \"GLACIER\", \"DEEP_ARCHIVE\"

" + "documentation":"

The ID of the pool that was used to archive the tape. The tapes in this pool are archived in the S3 storage class that is associated with the pool.

Valid Values: GLACIER | DEEP_ARCHIVE

" } }, "documentation":"

Represents a virtual tape that is archived in the virtual tape shelf (VTS).

" @@ -4353,7 +4362,7 @@ }, "PoolId":{ "shape":"PoolId", - "documentation":"

The ID of the pool that you want to add your tape to for archiving. The tape in this pool is archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (S3 Glacier or S3 Glacier Deep Archive) that corresponds to the pool.

Valid values: \"GLACIER\", \"DEEP_ARCHIVE\"

" + "documentation":"

The ID of the pool that you want to add your tape to for archiving. The tape in this pool is archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (S3 Glacier or S3 Glacier Deep Archive) that corresponds to the pool.

Valid Values: GLACIER | DEEP_ARCHIVE

" } }, "documentation":"

Describes a virtual tape.

" @@ -4423,7 +4432,7 @@ "members":{ "AutomaticTapeCreationRules":{ "shape":"AutomaticTapeCreationRules", - "documentation":"

An automatic tape creation policy consists of a list of automatic tape creation rules. The rules determine when and how to automatically create new tapes.

" + "documentation":"

An automatic tape creation policy consists of a list of automatic tape creation rules. The rules determine when and how to automatically create new tapes.

" }, "GatewayARN":{"shape":"GatewayARN"} } @@ -4510,7 +4519,7 @@ }, "CloudWatchLogGroupARN":{ "shape":"CloudWatchLogGroupARN", - "documentation":"

The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that you want to use to monitor and log events in the gateway.

For more information, see What Is Amazon CloudWatch Logs?.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that you want to use to monitor and log events in the gateway.

For more information, see What is Amazon CloudWatch logs?.

" } } }, @@ -4523,7 +4532,7 @@ "documentation":"

The name you configured for your gateway.

" } }, - "documentation":"

A JSON object containing the ARN of the gateway that was updated.

" + "documentation":"

A JSON object containing the Amazon Resource Name (ARN) of the gateway that was updated.

" }, "UpdateGatewaySoftwareNowInput":{ "type":"structure", @@ -4563,7 +4572,7 @@ }, "DayOfMonth":{ "shape":"DayOfMonth", - "documentation":"

The day of the month component of the maintenance start time represented as an ordinal number from 1 to 28, where 1 represents the first day of the month and 28 represents the last day of the month.

This value is only available for tape and volume gateways.

" + "documentation":"

The day of the month component of the maintenance start time represented as an ordinal number from 1 to 28, where 1 represents the first day of the month and 28 represents the last day of the month.

" } }, "documentation":"

A JSON object containing the following fields:

" @@ -4581,15 +4590,15 @@ "members":{ "FileShareARN":{ "shape":"FileShareARN", - "documentation":"

The Amazon Resource Name (ARN) of the file share to be updated.

" + "documentation":"

The Amazon Resource Name (ARN) of the file share to be updated.

" }, "KMSEncrypted":{ "shape":"Boolean", - "documentation":"

True to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

" + "documentation":"

Set to true to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

Valid Values: true | false

" }, "KMSKey":{ "shape":"KMSKey", - "documentation":"

The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server-side encryption. This value can only be set when KMSEncrypted is true. Optional.

" + "documentation":"

The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric CMKs. This value can only be set when KMSEncrypted is true. Optional.

" }, "NFSFileShareDefaults":{ "shape":"NFSFileShareDefaults", @@ -4597,11 +4606,11 @@ }, "DefaultStorageClass":{ "shape":"StorageClass", - "documentation":"

The default storage class for objects put into an Amazon S3 bucket by the file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA, or S3_ONEZONE_IA. If this field is not populated, the default value S3_STANDARD is used. Optional.

" + "documentation":"

The default storage class for objects put into an Amazon S3 bucket by the file gateway. The default value is S3_INTELLIGENT_TIERING. Optional.

Valid Values: S3_STANDARD | S3_INTELLIGENT_TIERING | S3_STANDARD_IA | S3_ONEZONE_IA

" }, "ObjectACL":{ "shape":"ObjectACL", - "documentation":"

A value that sets the access control list permission for objects in the S3 bucket that a file gateway puts objects into. The default value is \"private\".

" + "documentation":"

A value that sets the access control list (ACL) permission for objects in the S3 bucket that a file gateway puts objects into. The default value is private.

" }, "ClientList":{ "shape":"FileShareClientList", @@ -4609,19 +4618,19 @@ }, "Squash":{ "shape":"Squash", - "documentation":"

The user mapped to anonymous user. Valid options are the following:

  • RootSquash - Only root is mapped to anonymous user.

  • NoSquash - No one is mapped to anonymous user

  • AllSquash - Everyone is mapped to anonymous user.

" + "documentation":"

The user mapped to anonymous user.

Valid values are the following:

  • RootSquash: Only root is mapped to anonymous user.

  • NoSquash: No one is mapped to anonymous user.

  • AllSquash: Everyone is mapped to anonymous user.

" }, "ReadOnly":{ "shape":"Boolean", - "documentation":"

A value that sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.

" + "documentation":"

A value that sets the write status of a file share. Set this value to true to set the write status to read-only, otherwise set to false.

Valid Values: true | false

" }, "GuessMIMETypeEnabled":{ "shape":"Boolean", - "documentation":"

A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.

" + "documentation":"

A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, otherwise set to false. The default value is true.

Valid Values: true | false

" }, "RequesterPays":{ "shape":"Boolean", - "documentation":"

A value that sets who pays the cost of the request and the cost associated with data download from the S3 bucket. If this value is set to true, the requester pays the costs. Otherwise the S3 bucket owner pays. However, the S3 bucket owner always pays the cost of storing data.

RequesterPays is a configuration for the S3 bucket that backs the file share, so make sure that the configuration on the file share is the same as the S3 bucket configuration.

" + "documentation":"

A value that sets who pays the cost of the request and the cost associated with data download from the S3 bucket. If this value is set to true, the requester pays the costs; otherwise, the S3 bucket owner pays. However, the S3 bucket owner always pays the cost of storing data.

RequesterPays is a configuration for the S3 bucket that backs the file share, so make sure that the configuration on the file share is the same as the S3 bucket configuration.

Valid Values: true | false

" } }, "documentation":"

UpdateNFSFileShareInput

" @@ -4631,7 +4640,7 @@ "members":{ "FileShareARN":{ "shape":"FileShareARN", - "documentation":"

The Amazon Resource Name (ARN) of the updated file share.

" + "documentation":"

The Amazon Resource Name (ARN) of the updated file share.

" } }, "documentation":"

UpdateNFSFileShareOutput

" @@ -4646,43 +4655,43 @@ }, "KMSEncrypted":{ "shape":"Boolean", - "documentation":"

True to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

" + "documentation":"

Set to true to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

Valid Values: true | false

" }, "KMSKey":{ "shape":"KMSKey", - "documentation":"

The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server-side encryption. This value can only be set when KMSEncrypted is true. Optional.

" + "documentation":"

The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric CMKs. This value can only be set when KMSEncrypted is true. Optional.

" }, "DefaultStorageClass":{ "shape":"StorageClass", - "documentation":"

The default storage class for objects put into an Amazon S3 bucket by the file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA, or S3_ONEZONE_IA. If this field is not populated, the default value S3_STANDARD is used. Optional.

" + "documentation":"

The default storage class for objects put into an Amazon S3 bucket by the file gateway. The default value is S3_INTELLIGENT_TIERING. Optional.

Valid Values: S3_STANDARD | S3_INTELLIGENT_TIERING | S3_STANDARD_IA | S3_ONEZONE_IA

" }, "ObjectACL":{ "shape":"ObjectACL", - "documentation":"

A value that sets the access control list permission for objects in the S3 bucket that a file gateway puts objects into. The default value is \"private\".

" + "documentation":"

A value that sets the access control list (ACL) permission for objects in the S3 bucket that a file gateway puts objects into. The default value is private.

" }, "ReadOnly":{ "shape":"Boolean", - "documentation":"

A value that sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.

" + "documentation":"

A value that sets the write status of a file share. Set this value to true to set write status to read-only, otherwise set to false.

Valid Values: true | false

" }, "GuessMIMETypeEnabled":{ "shape":"Boolean", - "documentation":"

A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.

" + "documentation":"

A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, otherwise set to false. The default value is true.

Valid Values: true | false

" }, "RequesterPays":{ "shape":"Boolean", - "documentation":"

A value that sets who pays the cost of the request and the cost associated with data download from the S3 bucket. If this value is set to true, the requester pays the costs. Otherwise the S3 bucket owner pays. However, the S3 bucket owner always pays the cost of storing data.

RequesterPays is a configuration for the S3 bucket that backs the file share, so make sure that the configuration on the file share is the same as the S3 bucket configuration.

" + "documentation":"

A value that sets who pays the cost of the request and the cost associated with data download from the S3 bucket. If this value is set to true, the requester pays the costs; otherwise, the S3 bucket owner pays. However, the S3 bucket owner always pays the cost of storing data.

RequesterPays is a configuration for the S3 bucket that backs the file share, so make sure that the configuration on the file share is the same as the S3 bucket configuration.

Valid Values: true | false

" }, "SMBACLEnabled":{ "shape":"Boolean", - "documentation":"

Set this value to \"true to enable ACL (access control list) on the SMB file share. Set it to \"false\" to map file and directory permissions to the POSIX permissions.

For more information, see https://docs.aws.amazon.com/storagegateway/latest/userguide/smb-acl.htmlin the Storage Gateway User Guide.

" + "documentation":"

Set this value to true to enable access control list (ACL) on the SMB file share. Set it to false to map file and directory permissions to the POSIX permissions.

For more information, see Using Microsoft Windows ACLs to control access to an SMB file share in the AWS Storage Gateway User Guide.

Valid Values: true | false

" }, "AdminUserList":{ "shape":"FileShareUserList", - "documentation":"

A list of users in the Active Directory that have administrator rights to the file share. A group must be prefixed with the @ character. For example @group1. Can only be set if Authentication is set to ActiveDirectory.

" + "documentation":"

A list of users in the Active Directory that have administrator rights to the file share. A group must be prefixed with the @ character. For example, @group1. Can only be set if Authentication is set to ActiveDirectory.

" }, "ValidUserList":{ "shape":"FileShareUserList", - "documentation":"

A list of users or groups in the Active Directory that are allowed to access the file share. A group must be prefixed with the @ character. For example @group1. Can only be set if Authentication is set to ActiveDirectory.

" + "documentation":"

A list of users or groups in the Active Directory that are allowed to access the file share. A group must be prefixed with the @ character. For example, @group1. Can only be set if Authentication is set to ActiveDirectory.

" }, "InvalidUserList":{ "shape":"FileShareUserList", @@ -4700,7 +4709,7 @@ "members":{ "FileShareARN":{ "shape":"FileShareARN", - "documentation":"

The Amazon Resource Name (ARN) of the updated SMB file share.

" + "documentation":"

The Amazon Resource Name (ARN) of the updated SMB file share.

" } }, "documentation":"

UpdateSMBFileShareOutput

" @@ -4715,7 +4724,7 @@ "GatewayARN":{"shape":"GatewayARN"}, "SMBSecurityStrategy":{ "shape":"SMBSecurityStrategy", - "documentation":"

Specifies the type of security strategy.

ClientSpecified: if you use this option, requests are established based on what is negotiated by the client. This option is recommended when you want to maximize compatibility across different clients in your environment.

MandatorySigning: if you use this option, file gateway only allows connections from SMBv2 or SMBv3 clients that have signing enabled. This option works with SMB clients on Microsoft Windows Vista, Windows Server 2008 or newer.

MandatoryEncryption: if you use this option, file gateway only allows connections from SMBv3 clients that have encryption enabled. This option is highly recommended for environments that handle sensitive data. This option works with SMB clients on Microsoft Windows 8, Windows Server 2012 or newer.

" + "documentation":"

Specifies the type of security strategy.

ClientSpecified: if you use this option, requests are established based on what is negotiated by the client. This option is recommended when you want to maximize compatibility across different clients in your environment.

MandatorySigning: if you use this option, file gateway only allows connections from SMBv2 or SMBv3 clients that have signing enabled. This option works with SMB clients on Microsoft Windows Vista, Windows Server 2008 or newer.

MandatoryEncryption: if you use this option, file gateway only allows connections from SMBv3 clients that have encryption enabled. This option is highly recommended for environments that handle sensitive data. This option works with SMB clients on Microsoft Windows 8, Windows Server 2012 or newer.

" } } }, @@ -4779,7 +4788,7 @@ }, "DeviceType":{ "shape":"DeviceType", - "documentation":"

The type of medium changer you want to select.

Valid Values: \"STK-L700\", \"AWS-Gateway-VTL\"

" + "documentation":"

The type of medium changer you want to select.

Valid Values: STK-L700 | AWS-Gateway-VTL

" } } }, @@ -4859,16 +4868,16 @@ "members":{ "VolumeARN":{ "shape":"VolumeARN", - "documentation":"

The Amazon Resource Name (ARN) for the storage volume. For example, the following is a valid ARN:

arn:aws:storagegateway:us-east-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABB

Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens (-).

" + "documentation":"

The Amazon Resource Name (ARN) for the storage volume. For example, the following is a valid ARN:

arn:aws:storagegateway:us-east-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABB

Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens (-).

" }, "VolumeId":{ "shape":"VolumeId", - "documentation":"

The unique identifier assigned to the volume. This ID becomes part of the volume Amazon Resource Name (ARN), which you use as input for other operations.

Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens (-).

" + "documentation":"

The unique identifier assigned to the volume. This ID becomes part of the volume Amazon Resource Name (ARN), which you use as input for other operations.

Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens (-).

" }, "GatewayARN":{"shape":"GatewayARN"}, "GatewayId":{ "shape":"GatewayId", - "documentation":"

The unique identifier assigned to your gateway during activation. This ID becomes part of the gateway Amazon Resource Name (ARN), which you use as input for other operations.

Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens (-).

" + "documentation":"

The unique identifier assigned to your gateway during activation. This ID becomes part of the gateway Amazon Resource Name (ARN), which you use as input for other operations.

Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens (-).

" }, "VolumeType":{ "shape":"VolumeType", @@ -4880,7 +4889,7 @@ }, "VolumeAttachmentStatus":{ "shape":"VolumeAttachmentStatus", - "documentation":"

One of the VolumeStatus values that indicates the state of the storage volume.

" + "documentation":"

One of the VolumeStatus values that indicates the state of the storage volume.

" } }, "documentation":"

Describes a storage volume object.

" @@ -4963,5 +4972,5 @@ "long":{"type":"long"}, "string":{"type":"string"} }, - "documentation":"AWS Storage Gateway Service

AWS Storage Gateway is the service that connects an on-premises software appliance with cloud-based storage to provide seamless and secure integration between an organization's on-premises IT environment and the AWS storage infrastructure. The service enables you to securely upload data to the AWS Cloud for cost effective backup and rapid disaster recovery.

Use the following links to get started using the AWS Storage Gateway Service API Reference:

AWS Storage Gateway resource IDs are in uppercase. When you use these resource IDs with the Amazon EC2 API, EC2 expects resource IDs in lowercase. You must change your resource ID to lowercase to use it with the EC2 API. For example, in Storage Gateway the ID for a volume might be vol-AA22BB012345DAF670. When you use this ID with the EC2 API, you must change it to vol-aa22bb012345daf670. Otherwise, the EC2 API might not behave as expected.

IDs for Storage Gateway volumes and Amazon EBS snapshots created from gateway volumes are changing to a longer format. Starting in December 2016, all new volumes and snapshots will be created with a 17-character string. Starting in April 2016, you will be able to use these longer IDs so you can test your systems with the new format. For more information, see Longer EC2 and EBS Resource IDs.

For example, a volume Amazon Resource Name (ARN) with the longer volume ID format looks like the following:

arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG.

A snapshot ID with the longer ID format looks like the following: snap-78e226633445566ee.

For more information, see Announcement: Heads-up – Longer AWS Storage Gateway volume and snapshot IDs coming in 2016.

" + "documentation":"AWS Storage Gateway Service

AWS Storage Gateway is the service that connects an on-premises software appliance with cloud-based storage to provide seamless and secure integration between an organization's on-premises IT environment and the AWS storage infrastructure. The service enables you to securely upload data to the AWS Cloud for cost effective backup and rapid disaster recovery.

Use the following links to get started using the AWS Storage Gateway Service API Reference:

AWS Storage Gateway resource IDs are in uppercase. When you use these resource IDs with the Amazon EC2 API, EC2 expects resource IDs in lowercase. You must change your resource ID to lowercase to use it with the EC2 API. For example, in Storage Gateway the ID for a volume might be vol-AA22BB012345DAF670. When you use this ID with the EC2 API, you must change it to vol-aa22bb012345daf670. Otherwise, the EC2 API might not behave as expected.

IDs for Storage Gateway volumes and Amazon EBS snapshots created from gateway volumes are changing to a longer format. Starting in December 2016, all new volumes and snapshots will be created with a 17-character string. Starting in April 2016, you will be able to use these longer IDs so you can test your systems with the new format. For more information, see Longer EC2 and EBS resource IDs.

For example, a volume Amazon Resource Name (ARN) with the longer volume ID format looks like the following:

arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG.

A snapshot ID with the longer ID format looks like the following: snap-78e226633445566ee.

For more information, see Announcement: Heads-up – Longer AWS Storage Gateway volume and snapshot IDs coming in 2016.

" } diff --git a/botocore/data/transfer/2018-11-05/service-2.json b/botocore/data/transfer/2018-11-05/service-2.json index 42cf1211..90bd06f7 100644 --- a/botocore/data/transfer/2018-11-05/service-2.json +++ b/botocore/data/transfer/2018-11-05/service-2.json @@ -350,7 +350,7 @@ "members":{ "Certificate":{ "shape":"Certificate", - "documentation":"

The Amazon Resource Name (ARN) of the AWS Certificate Manager (ACM) certificate. Required when Protocols is set to FTPS.

" + "documentation":"

The Amazon Resource Name (ARN) of the AWS Certificate Manager (ACM) certificate. Required when Protocols is set to FTPS.

To request a new public certificate, see Request a public certificate in the AWS Certificate Manager User Guide.

To import an existing certificate into ACM, see Importing certificates into ACM in the AWS Certificate Manager User Guide.

To request a private certificate to use FTPS through private IP addresses, see Request a private certificate in the AWS Certificate Manager User Guide.

Certificates with the following cryptographic algorithms and key sizes are supported:

  • 2048-bit RSA (RSA_2048)

  • 4096-bit RSA (RSA_4096)

  • Elliptic Prime Curve 256 bit (EC_prime256v1)

  • Elliptic Prime Curve 384 bit (EC_secp384r1)

  • Elliptic Prime Curve 521 bit (EC_secp521r1)

The certificate must be a valid SSL/TLS X.509 version 3 certificate with FQDN or IP address specified and information about the issuer.

" }, "EndpointDetails":{ "shape":"EndpointDetails", @@ -358,11 +358,11 @@ }, "EndpointType":{ "shape":"EndpointType", - "documentation":"

The type of VPC endpoint that you want your file transfer protocol-enabled server to connect to. You can choose to connect to the public internet or a virtual private cloud (VPC) endpoint. With a VPC endpoint, you can restrict access to your server and resources only within your VPC.

" + "documentation":"

The type of VPC endpoint that you want your file transfer protocol-enabled server to connect to. You can choose to connect to the public internet or a VPC endpoint. With a VPC endpoint, you can restrict access to your server and resources only within your VPC.

It is recommended that you use VPC as the EndpointType. With this endpoint type, you have the option to directly associate up to three Elastic IPv4 addresses (BYO IP included) with your server's endpoint and use VPC security groups to restrict traffic by the client's public IP address. This is not possible with EndpointType set to VPC_ENDPOINT.

" }, "HostKey":{ "shape":"HostKey", - "documentation":"

The RSA private key as generated by the ssh-keygen -N \"\" -f my-new-server-key command.

If you aren't planning to migrate existing users from an existing SFTP-enabled server to a new server, don't update the host key. Accidentally changing a server's host key can be disruptive.

For more information, see Changing the Host Key for Your AWS Transfer Family Server in the AWS Transfer Family User Guide.

" + "documentation":"

The RSA private key as generated by the ssh-keygen -N \"\" -m PEM -f my-new-server-key command.

If you aren't planning to migrate existing users from an existing SFTP-enabled server to a new server, don't update the host key. Accidentally changing a server's host key can be disruptive.

For more information, see Change the host key for your SFTP-enabled server in the AWS Transfer Family User Guide.

" }, "IdentityProviderDetails":{ "shape":"IdentityProviderDetails", @@ -378,7 +378,7 @@ }, "Protocols":{ "shape":"Protocols", - "documentation":"

Specifies the file transfer protocol or protocols over which your file transfer protocol client can connect to your server's endpoint. The available protocols are:

  • Secure Shell (SSH) File Transfer Protocol (SFTP): File transfer over SSH

  • File Transfer Protocol Secure (FTPS): File transfer with TLS encryption

  • File Transfer Protocol (FTP): Unencrypted file transfer

" + "documentation":"

Specifies the file transfer protocol or protocols over which your file transfer protocol client can connect to your server's endpoint. The available protocols are:

  • SFTP (Secure Shell (SSH) File Transfer Protocol): File transfer over SSH

  • FTPS (File Transfer Protocol Secure): File transfer with TLS encryption

  • FTP (File Transfer Protocol): Unencrypted file transfer

If you select FTPS, you must choose a certificate stored in AWS Certificate Manager (ACM) which will be used to identify your server when clients connect to it over FTPS.

If Protocol includes either FTP or FTPS, then the EndpointType must be VPC and the IdentityProviderType must be API_GATEWAY.

If Protocol includes FTP, then AddressAllocationIds cannot be associated.

If Protocol is set only to SFTP, the EndpointType can be set to PUBLIC and the IdentityProviderType can be set to SERVICE_MANAGED.

" }, "Tags":{ "shape":"Tags", @@ -406,7 +406,7 @@ "members":{ "HomeDirectory":{ "shape":"HomeDirectory", - "documentation":"

The landing directory (folder) for a user when they log in to the file transfer protocol-enabled server using the client.

An example is your-Amazon-S3-bucket-name>/home/username.

" + "documentation":"

The landing directory (folder) for a user when they log in to the file transfer protocol-enabled server using the client.

An example is your-Amazon-S3-bucket-name>/home/username .

" }, "HomeDirectoryType":{ "shape":"HomeDirectoryType", @@ -414,11 +414,11 @@ }, "HomeDirectoryMappings":{ "shape":"HomeDirectoryMappings", - "documentation":"

Logical directory mappings that specify what Amazon S3 paths and keys should be visible to your user and how you want to make them visible. You will need to specify the \"Entry\" and \"Target\" pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 path. If you only specify a target, it will be displayed as is. You will need to also make sure that your AWS IAM Role provides access to paths in Target. The following is an example.

'[ \"/bucket2/documentation\", { \"Entry\": \"your-personal-report.pdf\", \"Target\": \"/bucket3/customized-reports/${transfer:UserName}.pdf\" } ]'

In most cases, you can use this value instead of the scope-down policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.

If the target of a logical directory entry does not exist in Amazon S3, the entry will be ignored. As a workaround, you can use the Amazon S3 api to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api call instead of s3 so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. Make sure that the end of the key name ends in a '/' for it to be considered a folder.

" + "documentation":"

Logical directory mappings that specify what Amazon S3 paths and keys should be visible to your user and how you want to make them visible. You will need to specify the \"Entry\" and \"Target\" pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 path. If you only specify a target, it will be displayed as is. You will need to also make sure that your IAM role provides access to paths in Target. The following is an example.

'[ \"/bucket2/documentation\", { \"Entry\": \"your-personal-report.pdf\", \"Target\": \"/bucket3/customized-reports/${transfer:UserName}.pdf\" } ]'

In most cases, you can use this value instead of the scope-down policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.

If the target of a logical directory entry does not exist in Amazon S3, the entry will be ignored. As a workaround, you can use the Amazon S3 api to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api call instead of s3 so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. Make sure that the end of the key name ends in a '/' for it to be considered a folder.

" }, "Policy":{ "shape":"Policy", - "documentation":"

A scope-down policy for your user so you can use the same IAM role across multiple users. This policy scopes down user access to portions of their Amazon S3 bucket. Variables that you can use inside this policy include ${Transfer:UserName}, ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}.

For scope-down policies, AWS Transfer Family stores the policy as a JSON blob, instead of the Amazon Resource Name (ARN) of the policy. You save the policy as a JSON blob and pass it in the Policy argument.

For an example of a scope-down policy, see Creating a Scope-Down Policy.

For more information, see AssumeRole in the AWS Security Token Service API Reference.

" + "documentation":"

A scope-down policy for your user so you can use the same IAM role across multiple users. This policy scopes down user access to portions of their Amazon S3 bucket. Variables that you can use inside this policy include ${Transfer:UserName}, ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}.

For scope-down policies, AWS Transfer Family stores the policy as a JSON blob, instead of the Amazon Resource Name (ARN) of the policy. You save the policy as a JSON blob and pass it in the Policy argument.

For an example of a scope-down policy, see Creating a scope-down policy.

For more information, see AssumeRole in the AWS Security Token Service API Reference.

" }, "Role":{ "shape":"Role", @@ -573,19 +573,19 @@ }, "Certificate":{ "shape":"Certificate", - "documentation":"

The Amazon Resource Name (ARN) of the AWS Certificate Manager (ACM) certificate. Required when Protocols is set to FTPS.

" + "documentation":"

Specifies the ARN of the AWS Certificate Manager (ACM) certificate. Required when Protocols is set to FTPS.

" }, "EndpointDetails":{ "shape":"EndpointDetails", - "documentation":"

The virtual private cloud (VPC) endpoint settings that you configured for your file transfer protocol-enabled server.

" + "documentation":"

Specifies the virtual private cloud (VPC) endpoint settings that you configured for your file transfer protocol-enabled server.

" }, "EndpointType":{ "shape":"EndpointType", - "documentation":"

The type of endpoint that your file transfer protocol-enabled server is connected to. If your server is connected to a VPC endpoint, your server isn't accessible over the public internet.

" + "documentation":"

Defines the type of endpoint that your file transfer protocol-enabled server is connected to. If your server is connected to a VPC endpoint, your server isn't accessible over the public internet.

" }, "HostKeyFingerprint":{ "shape":"HostKeyFingerprint", - "documentation":"

Contains the message-digest algorithm (MD5) hash of a file transfer protocol-enabled server's host key. This value is equivalent to the output of the ssh-keygen -l -E md5 -f my-new-server-key command.

" + "documentation":"

Specifies the Base64-encoded SHA256 fingerprint of the server's host key. This value is equivalent to the output of the ssh-keygen -l -f my-new-server-key command.

" }, "IdentityProviderDetails":{ "shape":"IdentityProviderDetails", @@ -593,34 +593,34 @@ }, "IdentityProviderType":{ "shape":"IdentityProviderType", - "documentation":"

Defines the mode of authentication method enabled for this service. A value of SERVICE_MANAGED means that you are using this file transfer protocol-enabled server to store and access user credentials within the service. A value of API_GATEWAY indicates that you have integrated an API Gateway endpoint that will be invoked for authenticating your user into the service.

" + "documentation":"

Specifies the mode of authentication method enabled for this service. A value of SERVICE_MANAGED means that you are using this file transfer protocol-enabled server to store and access user credentials within the service. A value of API_GATEWAY indicates that you have integrated an API Gateway endpoint that will be invoked for authenticating your user into the service.

" }, "LoggingRole":{ "shape":"Role", - "documentation":"

An AWS Identity and Access Management (IAM) entity that allows a file transfer protocol-enabled server to turn on Amazon CloudWatch logging for Amazon S3 events. When set, user activity can be viewed in your CloudWatch logs.

" + "documentation":"

Specifies the AWS Identity and Access Management (IAM) role that allows a file transfer protocol-enabled server to turn on Amazon CloudWatch logging for Amazon S3 events. When set, user activity can be viewed in your CloudWatch logs.

" }, "Protocols":{ "shape":"Protocols", - "documentation":"

Specifies the file transfer protocol or protocols over which your file transfer protocol client can connect to your server's endpoint. The available protocols are:

  • Secure Shell (SSH) File Transfer Protocol (SFTP): File transfer over SSH

  • File Transfer Protocol Secure (FTPS): File transfer with TLS encryption

  • File Transfer Protocol (FTP): Unencrypted file transfer

" + "documentation":"

Specifies the file transfer protocol or protocols over which your file transfer protocol client can connect to your server's endpoint. The available protocols are:

  • SFTP (Secure Shell (SSH) File Transfer Protocol): File transfer over SSH

  • FTPS (File Transfer Protocol Secure): File transfer with TLS encryption

  • FTP (File Transfer Protocol): Unencrypted file transfer

" }, "ServerId":{ "shape":"ServerId", - "documentation":"

Unique system-assigned identifier for a file transfer protocol-enabled server that you instantiate.

" + "documentation":"

Specifies the unique system-assigned identifier for a file transfer protocol-enabled server that you instantiate.

" }, "State":{ "shape":"State", - "documentation":"

The condition of a file transfer protocol-enabled server for the server that was described. A value of ONLINE indicates that the server can accept jobs and transfer files. A State value of OFFLINE means that the server cannot perform file transfer operations.

The states of STARTING and STOPPING indicate that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of START_FAILED or STOP_FAILED can indicate an error condition.

" + "documentation":"

Specifies the condition of a file transfer protocol-enabled server for the server that was described. A value of ONLINE indicates that the server can accept jobs and transfer files. A State value of OFFLINE means that the server cannot perform file transfer operations.

The states of STARTING and STOPPING indicate that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of START_FAILED or STOP_FAILED can indicate an error condition.

" }, "Tags":{ "shape":"Tags", - "documentation":"

Contains the key-value pairs that you can use to search for and group file transfer protocol-enabled servers that were assigned to the server that was described.

" + "documentation":"

Specifies the key-value pairs that you can use to search for and group file transfer protocol-enabled servers that were assigned to the server that was described.

" }, "UserCount":{ "shape":"UserCount", - "documentation":"

The number of users that are assigned to a file transfer protocol-enabled server you specified with the ServerId.

" + "documentation":"

Specifies the number of users that are assigned to a file transfer protocol-enabled server you specified with the ServerId.

" } }, - "documentation":"

Describes the properties of a file transfer protocol-enabled server that was specified. Information returned includes the following: the server Amazon Resource Name (ARN), the authentication configuration and type, the logging role, the server ID and state, and assigned tags or metadata.

" + "documentation":"

Describes the properties of a file transfer protocol-enabled server that was specified. Information returned includes the following: the server Amazon Resource Name (ARN), the certificate ARN (if the FTPS protocol was selected), the endpoint type and details, the authentication configuration and type, the logging role, the file transfer protocol or protocols, the server ID and state, and assigned tags or metadata.

" }, "DescribedUser":{ "type":"structure", @@ -628,19 +628,19 @@ "members":{ "Arn":{ "shape":"Arn", - "documentation":"

Contains the unique Amazon Resource Name (ARN) for the user that was requested to be described.

" + "documentation":"

Specifies the unique Amazon Resource Name (ARN) for the user that was requested to be described.

" }, "HomeDirectory":{ "shape":"HomeDirectory", - "documentation":"

Specifies the landing directory (or folder), which is the location that files are written to or read from in an Amazon S3 bucket for the described user. An example is /your s3 bucket name/home/username .

" + "documentation":"

Specifies the landing directory (or folder), which is the location that files are written to or read from in an Amazon S3 bucket, for the described user. An example is your-Amazon-S3-bucket-name>/home/username .

" }, "HomeDirectoryMappings":{ "shape":"HomeDirectoryMappings", - "documentation":"

Logical directory mappings that you specified for what Amazon S3 paths and keys should be visible to your user and how you want to make them visible. You will need to specify the \"Entry\" and \"Target\" pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 path. If you only specify a target, it will be displayed as is. You will need to also make sure that your AWS IAM Role provides access to paths in Target.

In most cases, you can use this value instead of the scope-down policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.

" + "documentation":"

Specifies the logical directory mappings that specify what Amazon S3 paths and keys should be visible to your user and how you want to make them visible. You will need to specify the \"Entry\" and \"Target\" pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 path. If you only specify a target, it will be displayed as is. You will need to also make sure that your AWS Identity and Access Management (IAM) role provides access to paths in Target.

In most cases, you can use this value instead of the scope-down policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.

" }, "HomeDirectoryType":{ "shape":"HomeDirectoryType", - "documentation":"

The type of landing directory (folder) you mapped for your users to see when they log into the file transfer protocol-enabled server. If you set it to PATH, the user will see the absolute Amazon S3 bucket paths as is in their file transfer protocol clients. If you set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings for how you want to make Amazon S3 paths visible to your users.

" + "documentation":"

Specifies the type of landing directory (folder) you mapped for your users to see when they log into the file transfer protocol-enabled server. If you set it to PATH, the user will see the absolute Amazon S3 bucket paths as is in their file transfer protocol clients. If you set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings for how you want to make Amazon S3 paths visible to your users.

" }, "Policy":{ "shape":"Policy", @@ -652,15 +652,15 @@ }, "SshPublicKeys":{ "shape":"SshPublicKeys", - "documentation":"

Contains the public key portion of the Secure Shell (SSH) keys stored for the described user.

" + "documentation":"

Specifies the public key portion of the Secure Shell (SSH) keys stored for the described user.

" }, "Tags":{ "shape":"Tags", - "documentation":"

Contains the key-value pairs for the user requested. Tag can be used to search for and group users for a variety of purposes.

" + "documentation":"

Specifies the key-value pairs for the user requested. Tag can be used to search for and group users for a variety of purposes.

" }, "UserName":{ "shape":"UserName", - "documentation":"

The name of the user that was requested to be described. User names are used for authentication purposes. This is the string that will be used by your user when they log in to your file transfer protocol-enabled server.

" + "documentation":"

Specifies the name of the user that was requested to be described. User names are used for authentication purposes. This is the string that will be used by your user when they log in to your file transfer protocol-enabled server.

" } }, "documentation":"

Returns properties of the user that you want to describe.

" @@ -674,15 +674,15 @@ }, "SubnetIds":{ "shape":"SubnetIds", - "documentation":"

A list of subnet IDs that are required to host your file transfer protocol-enabled server endpoint in your VPC.

" + "documentation":"

A list of subnet IDs that are required to host your file transfer protocol-enabled server endpoint in your VPC.

This property can only be used when EndpointType is set to VPC.

" }, "VpcEndpointId":{ "shape":"VpcEndpointId", - "documentation":"

The ID of the VPC endpoint.

" + "documentation":"

The ID of the VPC endpoint.

This property can only be used when EndpointType is set to VPC_ENDPOINT.

" }, "VpcId":{ "shape":"VpcId", - "documentation":"

The VPC ID of the VPC in which a file transfer protocol-enabled server's endpoint will be hosted.

" + "documentation":"

The VPC ID of the VPC in which a file transfer protocol-enabled server's endpoint will be hosted.

This property can only be used when EndpointType is set to VPC.

" } }, "documentation":"

The virtual private cloud (VPC) endpoint settings that are configured for your file transfer protocol-enabled server. With a VPC endpoint, you can restrict access to your server and resources only within your VPC. To control incoming internet traffic, invoke the UpdateServer API and attach an Elastic IP to your server's endpoint.

" @@ -742,7 +742,7 @@ "members":{ "Url":{ "shape":"Url", - "documentation":"

Contains the location of the service endpoint used to authenticate users.

" + "documentation":"

Provides the location of the service endpoint used to authenticate users.

" }, "InvocationRole":{ "shape":"Role", @@ -939,31 +939,31 @@ "members":{ "Arn":{ "shape":"Arn", - "documentation":"

The unique Amazon Resource Name (ARN) for a file transfer protocol-enabled server to be listed.

" + "documentation":"

Specifies the unique Amazon Resource Name (ARN) for a file transfer protocol-enabled server to be listed.

" }, "IdentityProviderType":{ "shape":"IdentityProviderType", - "documentation":"

The authentication method used to validate a user for a file transfer protocol-enabled server that was specified. This can include Secure Shell (SSH), user name and password combinations, or your own custom authentication method. Valid values include SERVICE_MANAGED or API_GATEWAY.

" + "documentation":"

Specifies the authentication method used to validate a user for a file transfer protocol-enabled server that was specified. This can include Secure Shell (SSH), user name and password combinations, or your own custom authentication method. Valid values include SERVICE_MANAGED or API_GATEWAY.

" }, "EndpointType":{ "shape":"EndpointType", - "documentation":"

The type of VPC endpoint that your file transfer protocol-enabled server is connected to. If your server is connected to a VPC endpoint, your server isn't accessible over the public internet.

" + "documentation":"

Specifies the type of VPC endpoint that your file transfer protocol-enabled server is connected to. If your server is connected to a VPC endpoint, your server isn't accessible over the public internet.

" }, "LoggingRole":{ "shape":"Role", - "documentation":"

The AWS Identity and Access Management (IAM) entity that allows a file transfer protocol-enabled server to turn on Amazon CloudWatch logging.

" + "documentation":"

Specifies the AWS Identity and Access Management (IAM) role that allows a file transfer protocol-enabled server to turn on Amazon CloudWatch logging.

" }, "ServerId":{ "shape":"ServerId", - "documentation":"

The unique system assigned identifier for a file transfer protocol-enabled servers that were listed.

" + "documentation":"

Specifies the unique system assigned identifier for a file transfer protocol-enabled servers that were listed.

" }, "State":{ "shape":"State", - "documentation":"

Describes the condition of a file transfer protocol-enabled server for the server that was described. A value of ONLINE indicates that the server can accept jobs and transfer files. A State value of OFFLINE means that the server cannot perform file transfer operations.

The states of STARTING and STOPPING indicate that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of START_FAILED or STOP_FAILED can indicate an error condition.

" + "documentation":"

Specifies the condition of a file transfer protocol-enabled server for the server that was described. A value of ONLINE indicates that the server can accept jobs and transfer files. A State value of OFFLINE means that the server cannot perform file transfer operations.

The states of STARTING and STOPPING indicate that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of START_FAILED or STOP_FAILED can indicate an error condition.

" }, "UserCount":{ "shape":"UserCount", - "documentation":"

A numeric value that indicates the number of users that are assigned to a file transfer protocol-enabled server you specified with the ServerId.

" + "documentation":"

Specifies the number of users that are assigned to a file transfer protocol-enabled server you specified with the ServerId.

" } }, "documentation":"

Returns properties of a file transfer protocol-enabled server that was specified.

" @@ -978,7 +978,7 @@ "members":{ "Arn":{ "shape":"Arn", - "documentation":"

The unique Amazon Resource Name (ARN) for the user that you want to learn about.

" + "documentation":"

Provides the unique Amazon Resource Name (ARN) for the user that you want to learn about.

" }, "HomeDirectory":{ "shape":"HomeDirectory", @@ -986,19 +986,19 @@ }, "HomeDirectoryType":{ "shape":"HomeDirectoryType", - "documentation":"

The type of landing directory (folder) you mapped for your users' home directory. If you set it to PATH, the user will see the absolute Amazon S3 bucket paths as is in their file transfer protocol clients. If you set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings for how you want to make Amazon S3 paths visible to your users.

" + "documentation":"

Specifies the type of landing directory (folder) you mapped for your users' home directory. If you set it to PATH, the user will see the absolute Amazon S3 bucket paths as is in their file transfer protocol clients. If you set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings for how you want to make Amazon S3 paths visible to your users.

" }, "Role":{ "shape":"Role", - "documentation":"

The role in use by this user. A role is an AWS Identity and Access Management (IAM) entity that, in this case, allows a file transfer protocol-enabled server to act on a user's behalf. It allows the server to inherit the trust relationship that enables that user to perform file operations to their Amazon S3 bucket.

" + "documentation":"

Specifies the role that is in use by this user. A role is an AWS Identity and Access Management (IAM) entity that, in this case, allows a file transfer protocol-enabled server to act on a user's behalf. It allows the server to inherit the trust relationship that enables that user to perform file operations to their Amazon S3 bucket.

" }, "SshPublicKeyCount":{ "shape":"SshPublicKeyCount", - "documentation":"

The number of SSH public keys stored for the user you specified.

" + "documentation":"

Specifies the number of SSH public keys stored for the user you specified.

" }, "UserName":{ "shape":"UserName", - "documentation":"

The name of the user whose ARN was specified. User names are used for authentication purposes.

" + "documentation":"

Specifies the name of the user whose ARN was specified. User names are used for authentication purposes.

" } }, "documentation":"

Returns properties of the user that you specify.

" @@ -1108,6 +1108,11 @@ "fault":true, "synthetic":true }, + "SourceIp":{ + "type":"string", + "max":32, + "pattern":"^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$" + }, "SshPublicKey":{ "type":"structure", "required":[ @@ -1118,15 +1123,15 @@ "members":{ "DateImported":{ "shape":"DateImported", - "documentation":"

The date that the public key was added to the user account.

" + "documentation":"

Specifies the date that the public key was added to the user account.

" }, "SshPublicKeyBody":{ "shape":"SshPublicKeyBody", - "documentation":"

The content of the SSH public key as specified by the PublicKeyId.

" + "documentation":"

Specifies the content of the SSH public key as specified by the PublicKeyId.

" }, "SshPublicKeyId":{ "shape":"SshPublicKeyId", - "documentation":"

The SshPublicKeyId parameter contains the identifier of the public key.

" + "documentation":"

Specifies the SshPublicKeyId parameter contains the identifier of the public key.

" } }, "documentation":"

Provides information about the public Secure Shell (SSH) key that is associated with a user account for the specific file transfer protocol-enabled server (as identified by ServerId). The information returned includes the date the key was imported, the public key contents, and the public key ID. A user can store more than one SSH public key associated with their user name on a specific server.

" @@ -1252,6 +1257,14 @@ "shape":"ServerId", "documentation":"

A system-assigned identifier for a specific file transfer protocol-enabled server. That server's user authentication method is tested with a user name and password.

" }, + "ServerProtocol":{ + "shape":"Protocol", + "documentation":"

The type of file transfer protocol to be tested.

The available protocols are:

  • Secure Shell (SSH) File Transfer Protocol (SFTP)

  • File Transfer Protocol Secure (FTPS)

  • File Transfer Protocol (FTP)

" + }, + "SourceIp":{ + "shape":"SourceIp", + "documentation":"

The source IP address of the user account to be tested.

" + }, "UserName":{ "shape":"UserName", "documentation":"

The name of the user account to be tested.

" @@ -1259,10 +1272,6 @@ "UserPassword":{ "shape":"UserPassword", "documentation":"

The password of the user account to be tested.

" - }, - "ServerProtocol":{ - "shape":"Protocol", - "documentation":"

The type of file transfer protocol to be tested.

The available protocols are:

  • Secure Shell (SSH) File Transfer Protocol (SFTP)

  • File Transfer Protocol Secure (FTPS)

  • File Transfer Protocol (FTP)

" } } }, @@ -1322,7 +1331,7 @@ "members":{ "Certificate":{ "shape":"Certificate", - "documentation":"

The Amazon Resource Name (ARN) of the AWS Certificate Manager (ACM) certificate. Required when Protocols is set to FTPS.

" + "documentation":"

The Amazon Resource Name (ARN) of the AWS Certificate Manager (ACM) certificate. Required when Protocols is set to FTPS.

To request a new public certificate, see Request a public certificate in the AWS Certificate Manager User Guide.

To import an existing certificate into ACM, see Importing certificates into ACM in the AWS Certificate Manager User Guide.

To request a private certificate to use FTPS through private IP addresses, see Request a private certificate in the AWS Certificate Manager User Guide.

Certificates with the following cryptographic algorithms and key sizes are supported:

  • 2048-bit RSA (RSA_2048)

  • 4096-bit RSA (RSA_4096)

  • Elliptic Prime Curve 256 bit (EC_prime256v1)

  • Elliptic Prime Curve 384 bit (EC_secp384r1)

  • Elliptic Prime Curve 521 bit (EC_secp521r1)

The certificate must be a valid SSL/TLS X.509 version 3 certificate with FQDN or IP address specified and information about the issuer.

" }, "EndpointDetails":{ "shape":"EndpointDetails", @@ -1330,11 +1339,11 @@ }, "EndpointType":{ "shape":"EndpointType", - "documentation":"

The type of endpoint that you want your file transfer protocol-enabled server to connect to. You can choose to connect to the public internet or a VPC endpoint. With a VPC endpoint, your server isn't accessible over the public internet.

" + "documentation":"

The type of endpoint that you want your file transfer protocol-enabled server to connect to. You can choose to connect to the public internet or a VPC endpoint. With a VPC endpoint, you can restrict access to your server and resources only within your VPC.

It is recommended that you use VPC as the EndpointType. With this endpoint type, you have the option to directly associate up to three Elastic IPv4 addresses (BYO IP included) with your server's endpoint and use VPC security groups to restrict traffic by the client's public IP address. This is not possible with EndpointType set to VPC_ENDPOINT.

" }, "HostKey":{ "shape":"HostKey", - "documentation":"

The RSA private key as generated by ssh-keygen -N \"\" -f my-new-server-key.

If you aren't planning to migrate existing users from an existing file transfer protocol-enabled server to a new server, don't update the host key. Accidentally changing a server's host key can be disruptive.

For more information, see Changing the Host Key for Your AWS Transfer Family Server in the AWS Transfer Family User Guide.

" + "documentation":"

The RSA private key as generated by ssh-keygen -N \"\" -m PEM -f my-new-server-key.

If you aren't planning to migrate existing users from an existing file transfer protocol-enabled server to a new server, don't update the host key. Accidentally changing a server's host key can be disruptive.

For more information, see Change the host key for your SFTP-enabled server in the AWS Transfer Family User Guide.

" }, "IdentityProviderDetails":{ "shape":"IdentityProviderDetails", @@ -1346,7 +1355,7 @@ }, "Protocols":{ "shape":"Protocols", - "documentation":"

Specifies the file transfer protocol or protocols over which your file transfer protocol client can connect to your server's endpoint. The available protocols are:

  • Secure Shell (SSH) File Transfer Protocol (SFTP): File transfer over SSH

  • File Transfer Protocol Secure (FTPS): File transfer with TLS encryption

  • File Transfer Protocol (FTP): Unencrypted file transfer

" + "documentation":"

Specifies the file transfer protocol or protocols over which your file transfer protocol client can connect to your server's endpoint. The available protocols are:

  • Secure Shell (SSH) File Transfer Protocol (SFTP): File transfer over SSH

  • File Transfer Protocol Secure (FTPS): File transfer with TLS encryption

  • File Transfer Protocol (FTP): Unencrypted file transfer

If you select FTPS, you must choose a certificate stored in AWS Certificate Manager (ACM) which will be used to identify your server when clients connect to it over FTPS.

If Protocol includes either FTP or FTPS, then the EndpointType must be VPC and the IdentityProviderType must be API_GATEWAY.

If Protocol includes FTP, then AddressAllocationIds cannot be associated.

If Protocol is set only to SFTP, the EndpointType can be set to PUBLIC and the IdentityProviderType can be set to SERVICE_MANAGED.

" }, "ServerId":{ "shape":"ServerId", @@ -1381,11 +1390,11 @@ }, "HomeDirectoryMappings":{ "shape":"HomeDirectoryMappings", - "documentation":"

Logical directory mappings that specify what Amazon S3 paths and keys should be visible to your user and how you want to make them visible. You will need to specify the \"Entry\" and \"Target\" pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 path. If you only specify a target, it will be displayed as is. You will need to also make sure that your AWS IAM Role provides access to paths in Target. The following is an example.

'[ \"/bucket2/documentation\", { \"Entry\": \"your-personal-report.pdf\", \"Target\": \"/bucket3/customized-reports/${transfer:UserName}.pdf\" } ]'

In most cases, you can use this value instead of the scope-down policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.

If the target of a logical directory entry does not exist in Amazon S3, the entry will be ignored. As a workaround, you can use the Amazon S3 api to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api call instead of s3 so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. Make sure that the end of the key name ends in a / for it to be considered a folder.

" + "documentation":"

Logical directory mappings that specify what Amazon S3 paths and keys should be visible to your user and how you want to make them visible. You will need to specify the \"Entry\" and \"Target\" pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 path. If you only specify a target, it will be displayed as is. You will need to also make sure that your IAM role provides access to paths in Target. The following is an example.

'[ \"/bucket2/documentation\", { \"Entry\": \"your-personal-report.pdf\", \"Target\": \"/bucket3/customized-reports/${transfer:UserName}.pdf\" } ]'

In most cases, you can use this value instead of the scope-down policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.

If the target of a logical directory entry does not exist in Amazon S3, the entry will be ignored. As a workaround, you can use the Amazon S3 api to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api call instead of s3 so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. Make sure that the end of the key name ends in a / for it to be considered a folder.

" }, "Policy":{ "shape":"Policy", - "documentation":"

Allows you to supply a scope-down policy for your user so you can use the same AWS Identity and Access Management (IAM) role across multiple users. The policy scopes down user access to portions of your Amazon S3 bucket. Variables you can use inside this policy include ${Transfer:UserName}, ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}.

For scope-down policies, AWS Transfer Family stores the policy as a JSON blob, instead of the Amazon Resource Name (ARN) of the policy. You save the policy as a JSON blob and pass it in the Policy argument.

For an example of a scope-down policy, see Creating a Scope-Down Policy.

For more information, see AssumeRole in the AWS Security Token Service API Reference.

" + "documentation":"

Allows you to supply a scope-down policy for your user so you can use the same IAM role across multiple users. The policy scopes down user access to portions of your Amazon S3 bucket. Variables you can use inside this policy include ${Transfer:UserName}, ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}.

For scope-down policies, AWS Transfer Family stores the policy as a JSON blob, instead of the Amazon Resource Name (ARN) of the policy. You save the policy as a JSON blob and pass it in the Policy argument.

For an example of a scope-down policy, see Creating a scope-down policy.

For more information, see AssumeRole in the AWS Security Token Service API Reference.

" }, "Role":{ "shape":"Role", diff --git a/botocore/data/worklink/2018-09-25/service-2.json b/botocore/data/worklink/2018-09-25/service-2.json index 6383c105..b60eccaa 100644 --- a/botocore/data/worklink/2018-09-25/service-2.json +++ b/botocore/data/worklink/2018-09-25/service-2.json @@ -319,6 +319,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServerErrorException"}, {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"TooManyRequestsException"} ], "documentation":"

Retrieves a list of domains associated to a specified fleet.

" @@ -339,6 +340,19 @@ ], "documentation":"

Retrieves a list of fleets for the current account and Region.

" }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{ResourceArn}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InvalidRequestException"} + ], + "documentation":"

Retrieves a list of tags for the specified resource.

" + }, "ListWebsiteAuthorizationProviders":{ "name":"ListWebsiteAuthorizationProviders", "http":{ @@ -423,6 +437,32 @@ ], "documentation":"

Signs the user out from all of their devices. The user can sign in again if they have valid credentials.

" }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{ResourceArn}" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InvalidRequestException"} + ], + "documentation":"

Adds or overwrites one or more tags for the specified resource, such as a fleet. Each tag consists of a key and an optional value. If a resource already has a tag with the same key, this operation updates its value.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{ResourceArn}" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InvalidRequestException"} + ], + "documentation":"

Removes one or more tags from the specified resource.

" + }, "UpdateAuditStreamConfiguration":{ "name":"UpdateAuditStreamConfiguration", "http":{ @@ -622,7 +662,10 @@ } } }, - "AuditStreamArn":{"type":"string"}, + "AuditStreamArn":{ + "type":"string", + "pattern":"^arn:aws:kinesis:.+:[0-9]{12}:stream/AmazonWorkLink-.*$" + }, "AuthorizationProviderType":{ "type":"string", "enum":["SAML"] @@ -660,6 +703,10 @@ "OptimizeForEndUserLocation":{ "shape":"Boolean", "documentation":"

The option to optimize for better performance by routing traffic through the closest AWS Region to users, which may be outside of your home Region.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

The tags to add to the resource. A tag is a key-value pair.

" } } }, @@ -668,7 +715,7 @@ "members":{ "FleetArn":{ "shape":"FleetArn", - "documentation":"

The ARN of the fleet.

" + "documentation":"

The Amazon Resource Name (ARN) of the fleet.

" } } }, @@ -859,7 +906,7 @@ "members":{ "FleetArn":{ "shape":"FleetArn", - "documentation":"

The ARN of the fleet.

" + "documentation":"

The Amazon Resource Name (ARN) of the fleet.

" } } }, @@ -893,6 +940,10 @@ "FleetStatus":{ "shape":"FleetStatus", "documentation":"

The current state of the fleet.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

The tags attached to the resource. A tag is a key-value pair.

" } } }, @@ -1155,7 +1206,7 @@ "members":{ "FleetArn":{ "shape":"FleetArn", - "documentation":"

The ARN of the fleet.

" + "documentation":"

The Amazon Resource Name (ARN) of the fleet.

" }, "CreatedTime":{ "shape":"DateTime", @@ -1171,7 +1222,7 @@ }, "DisplayName":{ "shape":"DisplayName", - "documentation":"

The name to display.

" + "documentation":"

The name of the fleet to display.

" }, "CompanyCode":{ "shape":"CompanyCode", @@ -1180,6 +1231,10 @@ "FleetStatus":{ "shape":"FleetStatus", "documentation":"

The status of the fleet.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

The tags attached to the resource. A tag is a key-value pair.

" } }, "documentation":"

The summary of the fleet.

" @@ -1303,6 +1358,27 @@ } } }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"FleetArn", + "documentation":"

The Amazon Resource Name (ARN) of the fleet.

", + "location":"uri", + "locationName":"ResourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagMap", + "documentation":"

The tags attached to the resource. A tag is a key-value pair.

" + } + } + }, "ListWebsiteAuthorizationProvidersRequest":{ "type":"structure", "required":["FleetArn"], @@ -1481,6 +1557,53 @@ "type":"list", "member":{"shape":"SubnetId"} }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^(?!aws:)[a-zA-Z+-=._:/]+$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":1 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"FleetArn", + "documentation":"

The Amazon Resource Name (ARN) of the fleet.

", + "location":"uri", + "locationName":"ResourceArn" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

The tags to add to the resource. A tag is a key-value pair.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256 + }, "TooManyRequestsException":{ "type":"structure", "members":{ @@ -1499,6 +1622,32 @@ "error":{"httpStatusCode":403}, "exception":true }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"FleetArn", + "documentation":"

The Amazon Resource Name (ARN) of the fleet.

", + "location":"uri", + "locationName":"ResourceArn" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

The list of tag keys to remove from the resource.

", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateAuditStreamConfigurationRequest":{ "type":"structure", "required":["FleetArn"], @@ -1703,5 +1852,5 @@ "member":{"shape":"WebsiteCaSummary"} } }, - "documentation":"

Amazon WorkLink is a cloud-based service that provides secure access to internal websites and web apps from iOS phones. In a single step, your users, such as employees, can access internal websites as efficiently as they access any other public website. They enter a URL in their web browser, or choose a link to an internal website in an email. Amazon WorkLink authenticates the user's access and securely renders authorized internal web content in a secure rendering service in the AWS cloud. Amazon WorkLink doesn't download or store any internal web content on mobile devices.

" + "documentation":"

Amazon WorkLink is a cloud-based service that provides secure access to internal websites and web apps from iOS and Android phones. In a single step, your users, such as employees, can access internal websites as efficiently as they access any other public website. They enter a URL in their web browser, or choose a link to an internal website in an email. Amazon WorkLink authenticates the user's access and securely renders authorized internal web content in a secure rendering service in the AWS cloud. Amazon WorkLink doesn't download or store any internal web content on mobile devices.

" } diff --git a/botocore/exceptions.py b/botocore/exceptions.py index f37b269a..fd5d4c94 100644 --- a/botocore/exceptions.py +++ b/botocore/exceptions.py @@ -559,6 +559,22 @@ class MissingServiceIdError(UndefinedModelAttributeError): self.kwargs = kwargs +class SSOError(BotoCoreError): + fmt = "An unspecified error happened when resolving SSO credentials" + + +class SSOTokenLoadError(SSOError): + fmt = "Error loading SSO Token: {error_msg}" + + +class UnauthorizedSSOTokenError(SSOError): + fmt = ( + "The SSO session associated with this profile has expired or is " + "otherwise invalid. To refresh this SSO session run aws sso login " + "with the corresponding profile." + ) + + class CapacityNotAvailableError(BotoCoreError): fmt = ( 'Insufficient request capacity available.' diff --git a/botocore/handlers.py b/botocore/handlers.py index 188fe00b..bf8cf9f2 100644 --- a/botocore/handlers.py +++ b/botocore/handlers.py @@ -41,6 +41,7 @@ from botocore.exceptions import MissingServiceIdError from botocore.utils import percent_encode, SAFE_CHARS from botocore.utils import switch_host_with_param from botocore.utils import hyphenize_service_id +from botocore.utils import conditionally_calculate_md5 from botocore import retryhandler from botocore import utils @@ -192,38 +193,6 @@ def json_decode_template_body(parsed, **kwargs): logger.debug('error loading JSON', exc_info=True) -def calculate_md5(params, **kwargs): - request_dict = params - if request_dict['body'] and 'Content-MD5' not in params['headers']: - body = request_dict['body'] - if isinstance(body, (bytes, bytearray)): - binary_md5 = _calculate_md5_from_bytes(body) - else: - binary_md5 = _calculate_md5_from_file(body) - base64_md5 = base64.b64encode(binary_md5).decode('ascii') - params['headers']['Content-MD5'] = base64_md5 - - -def _calculate_md5_from_bytes(body_bytes): - md5 = get_md5(body_bytes) - return md5.digest() - - -def _calculate_md5_from_file(fileobj): - start_position = fileobj.tell() - md5 = get_md5() - for chunk in iter(lambda: fileobj.read(1024 * 1024), b''): - md5.update(chunk) - fileobj.seek(start_position) - return md5.digest() - - -def conditionally_calculate_md5(params, context, request_signer, **kwargs): - """Only add a Content-MD5 if the system supports it.""" - if MD5_AVAILABLE: - calculate_md5(params, **kwargs) - - def validate_bucket_name(params, **kwargs): if 'Bucket' not in params: return @@ -949,26 +918,6 @@ BUILTIN_HANDLERS = [ set_list_objects_encoding_type_url), ('before-parameter-build.s3.ListObjectVersions', set_list_objects_encoding_type_url), - ('before-call.s3.PutBucketTagging', calculate_md5), - ('before-call.s3.PutBucketLifecycle', calculate_md5), - ('before-call.s3.PutBucketLifecycleConfiguration', calculate_md5), - ('before-call.s3.PutBucketCors', calculate_md5), - ('before-call.s3.DeleteObjects', calculate_md5), - ('before-call.s3.PutBucketReplication', calculate_md5), - ('before-call.s3.PutObject', conditionally_calculate_md5), - ('before-call.s3.UploadPart', conditionally_calculate_md5), - ('before-call.s3.PutBucketAcl', conditionally_calculate_md5), - ('before-call.s3.PutBucketLogging', conditionally_calculate_md5), - ('before-call.s3.PutBucketNotification', conditionally_calculate_md5), - ('before-call.s3.PutBucketPolicy', conditionally_calculate_md5), - ('before-call.s3.PutBucketRequestPayment', conditionally_calculate_md5), - ('before-call.s3.PutBucketVersioning', conditionally_calculate_md5), - ('before-call.s3.PutBucketWebsite', conditionally_calculate_md5), - ('before-call.s3.PutObjectAcl', conditionally_calculate_md5), - ('before-call.s3.PutObjectLegalHold', calculate_md5), - ('before-call.s3.PutObjectRetention', calculate_md5), - ('before-call.s3.PutObjectLockConfiguration', calculate_md5), - ('before-parameter-build.s3.CopyObject', handle_copy_source_param), ('before-parameter-build.s3.UploadPartCopy', @@ -983,6 +932,8 @@ BUILTIN_HANDLERS = [ ('before-call.s3', add_expect_header), ('before-call.glacier', add_glacier_version), ('before-call.apigateway', add_accept_header), + ('before-call.s3.PutObject', conditionally_calculate_md5), + ('before-call.s3.UploadPart', conditionally_calculate_md5), ('before-call.glacier.UploadArchive', add_glacier_checksums), ('before-call.glacier.UploadMultipartPart', add_glacier_checksums), ('before-call.ec2.CopySnapshot', inject_presigned_url_ec2), diff --git a/botocore/model.py b/botocore/model.py index 245f76eb..bb197b19 100644 --- a/botocore/model.py +++ b/botocore/model.py @@ -519,6 +519,10 @@ class OperationModel(object): def endpoint(self): return self._operation_model.get('endpoint') + @CachedProperty + def http_checksum_required(self): + return self._operation_model.get('httpChecksumRequired', False) + @CachedProperty def has_event_stream_input(self): return self.get_event_stream_input() is not None diff --git a/botocore/serialize.py b/botocore/serialize.py index cc609a9e..089e0fda 100644 --- a/botocore/serialize.py +++ b/botocore/serialize.py @@ -49,6 +49,7 @@ from botocore.compat import json, formatdate from botocore.utils import parse_to_aware_datetime from botocore.utils import percent_encode from botocore.utils import is_json_value_header +from botocore.utils import conditionally_calculate_md5 from botocore import validate @@ -184,6 +185,12 @@ class Serializer(object): return host_prefix_expression.format(**format_kwargs) + def _prepare_additional_traits(self, request, operation_model): + """Determine if additional traits are required for given model""" + if operation_model.http_checksum_required: + conditionally_calculate_md5(request) + return request + class QuerySerializer(Serializer): @@ -210,6 +217,8 @@ class QuerySerializer(Serializer): if host_prefix is not None: serialized['host_prefix'] = host_prefix + serialized = self._prepare_additional_traits(serialized, + operation_model) return serialized def _serialize(self, serialized, value, shape, prefix=''): @@ -343,6 +352,8 @@ class JSONSerializer(Serializer): if host_prefix is not None: serialized['host_prefix'] = host_prefix + serialized = self._prepare_additional_traits(serialized, + operation_model) return serialized def _serialize(self, serialized, value, shape, key=None): @@ -460,6 +471,8 @@ class BaseRestSerializer(Serializer): if host_prefix is not None: serialized['host_prefix'] = host_prefix + serialized = self._prepare_additional_traits(serialized, + operation_model) return serialized def _render_uri_template(self, uri_template, params): diff --git a/botocore/session.py b/botocore/session.py index 3382658b..e8e1f478 100644 --- a/botocore/session.py +++ b/botocore/session.py @@ -438,7 +438,7 @@ class Session(object): Where: - agent_name is the value of the `user_agent_name` attribute - of the session object (`Boto` by default). + of the session object (`Botocore` by default). - agent_version is the value of the `user_agent_version` attribute of the session object (the botocore version by default). by default. diff --git a/botocore/signers.py b/botocore/signers.py index 93ce8dd9..26fd9baa 100644 --- a/botocore/signers.py +++ b/botocore/signers.py @@ -674,6 +674,8 @@ def generate_presigned_post(self, Bucket, Key, Fields=None, Conditions=None, if fields is None: fields = {} + else: + fields = fields.copy() if conditions is None: conditions = [] diff --git a/botocore/utils.py b/botocore/utils.py index e38c4b38..5bfaee93 100644 --- a/botocore/utils.py +++ b/botocore/utils.py @@ -10,6 +10,7 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. +import base64 import re import time import logging @@ -29,14 +30,16 @@ from dateutil.tz import tzutc import botocore import botocore.awsrequest import botocore.httpsession -from botocore.compat import json, quote, zip_longest, urlsplit, urlunsplit -from botocore.compat import OrderedDict, six, urlparse, get_tzinfo_options +from botocore.compat import ( + json, quote, zip_longest, urlsplit, urlunsplit, OrderedDict, + six, urlparse, get_tzinfo_options, get_md5, MD5_AVAILABLE +) from botocore.vendored.six.moves.urllib.request import getproxies, proxy_bypass from botocore.exceptions import ( InvalidExpressionError, ConfigNotFound, InvalidDNSNameError, ClientError, MetadataRetrievalError, EndpointConnectionError, ReadTimeoutError, ConnectionClosedError, ConnectTimeoutError, UnsupportedS3ArnError, - UnsupportedS3AccesspointConfigurationError + UnsupportedS3AccesspointConfigurationError, SSOTokenLoadError, ) logger = logging.getLogger(__name__) @@ -1725,6 +1728,37 @@ def get_encoding_from_headers(headers, default='ISO-8859-1'): return default +def calculate_md5(body, **kwargs): + if isinstance(body, (bytes, bytearray)): + binary_md5 = _calculate_md5_from_bytes(body) + else: + binary_md5 = _calculate_md5_from_file(body) + return base64.b64encode(binary_md5).decode('ascii') + + +def _calculate_md5_from_bytes(body_bytes): + md5 = get_md5(body_bytes) + return md5.digest() + + +def _calculate_md5_from_file(fileobj): + start_position = fileobj.tell() + md5 = get_md5() + for chunk in iter(lambda: fileobj.read(1024 * 1024), b''): + md5.update(chunk) + fileobj.seek(start_position) + return md5.digest() + + +def conditionally_calculate_md5(params, **kwargs): + """Only add a Content-MD5 if the system supports it.""" + headers = params['headers'] + body = params['body'] + if MD5_AVAILABLE and body and 'Content-MD5' not in headers: + md5_digest = calculate_md5(body, **kwargs) + params['headers']['Content-MD5'] = md5_digest + + class FileWebIdentityTokenLoader(object): def __init__(self, web_identity_token_path, _open=open): self._web_identity_token_path = web_identity_token_path @@ -1733,3 +1767,26 @@ class FileWebIdentityTokenLoader(object): def __call__(self): with self._open(self._web_identity_token_path) as token_file: return token_file.read() + + +class SSOTokenLoader(object): + def __init__(self, cache=None): + if cache is None: + cache = {} + self._cache = cache + + def _generate_cache_key(self, start_url): + return hashlib.sha1(start_url.encode('utf-8')).hexdigest() + + def __call__(self, start_url): + cache_key = self._generate_cache_key(start_url) + try: + token = self._cache[cache_key] + return token['accessToken'] + except KeyError: + logger.debug('Failed to load SSO token:', exc_info=True) + error_msg = ( + 'The SSO access token has either expired or is otherwise ' + 'invalid.' + ) + raise SSOTokenLoadError(error_msg=error_msg) diff --git a/docs/source/conf.py b/docs/source/conf.py index 8ecb8374..027ff686 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -52,9 +52,9 @@ copyright = u'2013, Mitch Garnaat' # built documents. # # The short X.Y version. -version = '1.16.' +version = '1.17' # The full version, including alpha/beta/rc tags. -release = '1.16.19' +release = '1.17.5' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/tests/__init__.py b/tests/__init__.py index c08780c0..bf862556 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -514,9 +514,9 @@ class StubbedSession(botocore.session.Session): self._client_stubs[service_name] = stubber return client - def stub(self, service_name): + def stub(self, service_name, *args, **kwargs): if service_name not in self._client_stubs: - self.create_client(service_name) + self.create_client(service_name, *args, **kwargs) return self._client_stubs[service_name] def activate_stubs(self): diff --git a/tests/functional/retries/test_bucket.py b/tests/functional/retries/test_bucket.py index 1e9f33b4..54e01fc6 100644 --- a/tests/functional/retries/test_bucket.py +++ b/tests/functional/retries/test_bucket.py @@ -105,5 +105,5 @@ class TestTokenBucketThreading(unittest.TestCase): # thread acquisition(), e.g. must be with a 2 stddev range, but we # can sanity check that our implementation isn't drastically # starving a thread. So we'll arbitrarily say that a thread - # can't have less than 30% of the mean allocations per thread. - self.assertTrue(not any(x < (0.3 * mean) for x in distribution)) + # can't have less than 20% of the mean allocations per thread. + self.assertTrue(not any(x < (0.2 * mean) for x in distribution)) diff --git a/tests/functional/test_credentials.py b/tests/functional/test_credentials.py index 1f317d61..18db4154 100644 --- a/tests/functional/test_credentials.py +++ b/tests/functional/test_credentials.py @@ -26,6 +26,7 @@ from botocore.exceptions import CredentialRetrievalError from tests import unittest, IntegerRefresher, BaseEnvVar, random_chars from tests import temporary_file, StubbedSession, SessionHTTPStubber +from botocore import UNSIGNED from botocore.credentials import EnvProvider, ContainerProvider from botocore.credentials import InstanceMetadataProvider from botocore.credentials import Credentials, ReadOnlyCredentials @@ -33,10 +34,13 @@ from botocore.credentials import AssumeRoleProvider, ProfileProviderBuilder from botocore.credentials import CanonicalNameCredentialSourcer from botocore.credentials import DeferredRefreshableCredentials from botocore.credentials import create_credential_resolver +from botocore.credentials import JSONFileCache +from botocore.credentials import SSOProvider +from botocore.config import Config from botocore.session import Session from botocore.exceptions import InvalidConfigError, InfiniteLoopConfigError from botocore.stub import Stubber -from botocore.awsrequest import AWSResponse +from botocore.utils import datetime2timestamp class TestCredentialRefreshRaces(unittest.TestCase): @@ -245,7 +249,10 @@ class TestAssumeRole(BaseAssumeRoleTest): self.env_provider, self.container_provider, self.metadata_provider ]), - profile_provider_builder=ProfileProviderBuilder(session), + profile_provider_builder=ProfileProviderBuilder( + session, + sso_token_cache=JSONFileCache(self.tempdir), + ), ) stubber = session.stub('sts') stubber.activate() @@ -540,6 +547,65 @@ class TestAssumeRole(BaseAssumeRoleTest): with self.assertRaises(InvalidConfigError): session.get_credentials() + def test_sso_source_profile(self): + token_cache_key = 'f395038c92f1828cbb3991d2d6152d326b895606' + cached_token = { + 'accessToken': 'a.token', + 'expiresAt': self.some_future_time(), + } + temp_cache = JSONFileCache(self.tempdir) + temp_cache[token_cache_key] = cached_token + + config = ( + '[profile A]\n' + 'role_arn = arn:aws:iam::123456789:role/RoleA\n' + 'source_profile = B\n' + '[profile B]\n' + 'sso_region = us-east-1\n' + 'sso_start_url = https://test.url/start\n' + 'sso_role_name = SSORole\n' + 'sso_account_id = 1234567890\n' + ) + self.write_config(config) + + session, sts_stubber = self.create_session(profile='A') + client_config = Config( + region_name='us-east-1', + signature_version=UNSIGNED, + ) + sso_stubber = session.stub('sso', config=client_config) + sso_stubber.activate() + # The expiration needs to be in milliseconds + expiration = datetime2timestamp(self.some_future_time()) * 1000 + sso_role_creds = self.create_random_credentials() + sso_role_response = { + 'roleCredentials': { + 'accessKeyId': sso_role_creds.access_key, + 'secretAccessKey': sso_role_creds.secret_key, + 'sessionToken': sso_role_creds.token, + 'expiration': int(expiration), + } + } + sso_stubber.add_response('get_role_credentials', sso_role_response) + + expected_creds = self.create_random_credentials() + assume_role_response = self.create_assume_role_response(expected_creds) + sts_stubber.add_response('assume_role', assume_role_response) + + actual_creds = session.get_credentials() + self.assert_creds_equal(actual_creds, expected_creds) + sts_stubber.assert_no_pending_responses() + # Assert that the client was created with the credentials from the + # SSO get role credentials response + self.assertEqual(self.mock_client_creator.call_count, 1) + _, kwargs = self.mock_client_creator.call_args_list[0] + expected_kwargs = { + 'aws_access_key_id': sso_role_creds.access_key, + 'aws_secret_access_key': sso_role_creds.secret_key, + 'aws_session_token': sso_role_creds.token, + } + self.assertEqual(kwargs, expected_kwargs) + def test_web_identity_credential_source_ignores_env_vars(self): token_path = os.path.join(self.tempdir, 'token') with open(token_path, 'w') as token_file: diff --git a/tests/functional/test_s3.py b/tests/functional/test_s3.py index c069c1ed..009342bb 100644 --- a/tests/functional/test_s3.py +++ b/tests/functional/test_s3.py @@ -771,7 +771,7 @@ class TestS3SigV4(BaseS3OperationTest): def test_content_sha256_set_if_md5_is_unavailable(self): with mock.patch('botocore.auth.MD5_AVAILABLE', False): - with mock.patch('botocore.handlers.MD5_AVAILABLE', False): + with mock.patch('botocore.utils.MD5_AVAILABLE', False): with self.http_stubber: self.client.put_object(Bucket='foo', Key='bar', Body='baz') sent_headers = self.get_sent_headers() @@ -1106,6 +1106,65 @@ class TestGeneratePresigned(BaseS3OperationTest): 'get_object', {'Bucket': 'mybucket', 'Key': 'mykey'}) self.assert_is_v2_presigned_url(url) +def test_checksums_included_in_expected_operations(): + """Validate expected calls include Content-MD5 header""" + + t = S3ChecksumCases(_verify_checksum_in_headers) + yield t.case('put_bucket_tagging', + {"Bucket": "foo", "Tagging":{"TagSet":[]}}) + yield t.case('put_bucket_lifecycle', + {"Bucket": "foo", "LifecycleConfiguration":{"Rules":[]}}) + yield t.case('put_bucket_lifecycle_configuration', + {"Bucket": "foo", "LifecycleConfiguration":{"Rules":[]}}) + yield t.case('put_bucket_cors', + {"Bucket": "foo", "CORSConfiguration":{"CORSRules": []}}) + yield t.case('delete_objects', + {"Bucket": "foo", "Delete": {"Objects": [{"Key": "bar"}]}}) + yield t.case('put_bucket_replication', + {"Bucket": "foo", + "ReplicationConfiguration": {"Role":"", "Rules": []}}) + yield t.case('put_bucket_acl', + {"Bucket": "foo", "AccessControlPolicy":{}}) + yield t.case('put_bucket_logging', + {"Bucket": "foo", + "BucketLoggingStatus":{}}) + yield t.case('put_bucket_notification', + {"Bucket": "foo", "NotificationConfiguration":{}}) + yield t.case('put_bucket_policy', + {"Bucket": "foo", "Policy": ""}) + yield t.case('put_bucket_request_payment', + {"Bucket": "foo", "RequestPaymentConfiguration":{"Payer": ""}}) + yield t.case('put_bucket_versioning', + {"Bucket": "foo", "VersioningConfiguration":{}}) + yield t.case('put_bucket_website', + {"Bucket": "foo", + "WebsiteConfiguration":{}}) + yield t.case('put_object_acl', + {"Bucket": "foo", "Key": "bar", "AccessControlPolicy":{}}) + yield t.case('put_object_legal_hold', + {"Bucket": "foo", "Key": "bar", "LegalHold":{"Status": "ON"}}) + yield t.case('put_object_retention', + {"Bucket": "foo", "Key": "bar", + "Retention":{"RetainUntilDate":"2020-11-05"}}) + yield t.case('put_object_lock_configuration', + {"Bucket": "foo", "ObjectLockConfiguration":{}}) + + +def _verify_checksum_in_headers(operation, operation_kwargs): + environ = {} + with mock.patch('os.environ', environ): + environ['AWS_ACCESS_KEY_ID'] = 'access_key' + environ['AWS_SECRET_ACCESS_KEY'] = 'secret_key' + environ['AWS_CONFIG_FILE'] = 'no-exist-foo' + session = create_session() + session.config_filename = 'no-exist-foo' + client = session.create_client('s3') + with ClientHTTPStubber(client) as stub: + stub.add_response() + call = getattr(client, operation) + call(**operation_kwargs) + assert 'Content-MD5' in stub.requests[-1].headers + def test_correct_url_used_for_s3(): # Test that given various sets of config options and bucket names, @@ -1759,10 +1818,15 @@ def test_correct_url_used_for_s3(): 'https://bucket.s3.unknown.amazonaws.com/key')) -class S3AddressingCases(object): +class BaseTestCase: def __init__(self, verify_function): self._verify = verify_function + def case(self, **kwargs): + return self._verify, kwargs + + +class S3AddressingCases(BaseTestCase): def case(self, region=None, bucket='bucket', key='key', s3_config=None, is_secure=True, customer_provided_endpoint=None, expected_url=None, signature_version=None): @@ -1772,6 +1836,11 @@ class S3AddressingCases(object): ) +class S3ChecksumCases(BaseTestCase): + def case(self, operation, operation_args): + return self._verify, operation, operation_args + + def _verify_expected_endpoint_url(region, bucket, key, s3_config, is_secure=True, customer_provided_endpoint=None, diff --git a/tests/unit/test_credentials.py b/tests/unit/test_credentials.py index 2ece4241..77d6269c 100644 --- a/tests/unit/test_credentials.py +++ b/tests/unit/test_credentials.py @@ -26,13 +26,16 @@ from botocore import credentials from botocore.utils import ContainerMetadataFetcher from botocore.compat import json, six from botocore.session import Session -from botocore.utils import FileWebIdentityTokenLoader +from botocore.stub import Stubber +from botocore.utils import datetime2timestamp +from botocore.utils import FileWebIdentityTokenLoader, SSOTokenLoader from botocore.credentials import EnvProvider, create_assume_role_refresher from botocore.credentials import CredentialProvider, AssumeRoleProvider from botocore.credentials import ConfigProvider, SharedCredentialProvider from botocore.credentials import ProcessProvider from botocore.credentials import AssumeRoleWithWebIdentityProvider from botocore.credentials import Credentials, ProfileProviderBuilder +from botocore.credentials import SSOCredentialFetcher, SSOProvider from botocore.configprovider import ConfigValueStore import botocore.exceptions import botocore.session @@ -3206,6 +3209,7 @@ class TestProfileProviderBuilder(unittest.TestCase): providers = self.builder.providers('some-profile') expected_providers = [ AssumeRoleWithWebIdentityProvider, + SSOProvider, SharedCredentialProvider, ProcessProvider, ConfigProvider, @@ -3214,3 +3218,196 @@ class TestProfileProviderBuilder(unittest.TestCase): zipped_providers = six.moves.zip(providers, expected_providers) for provider, expected_type in zipped_providers: self.assertTrue(isinstance(provider, expected_type)) + + +class TestSSOCredentialFetcher(unittest.TestCase): + def setUp(self): + self.sso = Session().create_client('sso', region_name='us-east-1') + self.stubber = Stubber(self.sso) + self.mock_session = mock.Mock(spec=Session) + self.mock_session.create_client.return_value = self.sso + + self.cache = {} + self.sso_region = 'us-east-1' + self.start_url = 'https://d-92671207e4.awsapps.com/start' + self.role_name = 'test-role' + self.account_id = '1234567890' + self.access_token = 'some.sso.token' + # This is just an arbitrary point in time we can pin to + self.now = datetime(2008, 9, 23, 12, 26, 40, tzinfo=tzutc()) + # The SSO endpoint uses ms whereas the OIDC endpoint uses seconds + self.now_timestamp = 1222172800000 + + self.loader = mock.Mock(spec=SSOTokenLoader) + self.loader.return_value = self.access_token + self.fetcher = SSOCredentialFetcher( + self.start_url, self.sso_region, self.role_name, self.account_id, + self.mock_session.create_client, token_loader=self.loader, + cache=self.cache, + ) + + def test_can_fetch_credentials(self): + expected_params = { + 'roleName': self.role_name, + 'accountId': self.account_id, + 'accessToken': self.access_token, + } + expected_response = { + 'roleCredentials': { + 'accessKeyId': 'foo', + 'secretAccessKey': 'bar', + 'sessionToken': 'baz', + 'expiration': self.now_timestamp + 1000000, + } + } + self.stubber.add_response( + 'get_role_credentials', + expected_response, + expected_params=expected_params, + ) + with self.stubber: + credentials = self.fetcher.fetch_credentials() + self.assertEqual(credentials['access_key'], 'foo') + self.assertEqual(credentials['secret_key'], 'bar') + self.assertEqual(credentials['token'], 'baz') + self.assertEqual(credentials['expiry_time'], '2008-09-23T12:43:20UTC') + cache_key = '048db75bbe50955c16af7aba6ff9c41a3131bb7e' + expected_cached_credentials = { + 'ProviderType': 'sso', + 'Credentials': { + 'AccessKeyId': 'foo', + 'SecretAccessKey': 'bar', + 'SessionToken': 'baz', + 'Expiration': '2008-09-23T12:43:20UTC', + } + } + self.assertEqual(self.cache[cache_key], expected_cached_credentials) + + def test_raises_helpful_message_on_unauthorized_exception(self): + expected_params = { + 'roleName': self.role_name, + 'accountId': self.account_id, + 'accessToken': self.access_token, + } + self.stubber.add_client_error( + 'get_role_credentials', + service_error_code='UnauthorizedException', + expected_params=expected_params, + ) + with self.assertRaises(botocore.exceptions.UnauthorizedSSOTokenError): + with self.stubber: + credentials = self.fetcher.fetch_credentials() + + +class TestSSOProvider(unittest.TestCase): + def setUp(self): + self.sso = Session().create_client('sso', region_name='us-east-1') + self.stubber = Stubber(self.sso) + self.mock_session = mock.Mock(spec=Session) + self.mock_session.create_client.return_value = self.sso + + self.sso_region = 'us-east-1' + self.start_url = 'https://d-92671207e4.awsapps.com/start' + self.role_name = 'test-role' + self.account_id = '1234567890' + self.access_token = 'some.sso.token' + + self.profile_name = 'sso-profile' + self.config = { + 'sso_region': self.sso_region, + 'sso_start_url': self.start_url, + 'sso_role_name': self.role_name, + 'sso_account_id': self.account_id, + } + self.expires_at = datetime.now(tzlocal()) + timedelta(hours=24) + self.cached_creds_key = '048db75bbe50955c16af7aba6ff9c41a3131bb7e' + self.cached_token_key = '13f9d35043871d073ab260e020f0ffde092cb14b' + self.cache = { + self.cached_token_key: { + 'accessToken': self.access_token, + 'expiresAt': self.expires_at.strftime('%Y-%m-%dT%H:%M:%S%Z'), + } + } + self.provider = SSOProvider( + load_config=self._mock_load_config, + client_creator=self.mock_session.create_client, + profile_name=self.profile_name, + cache=self.cache, + token_cache=self.cache, + ) + + self.expected_get_role_credentials_params = { + 'roleName': self.role_name, + 'accountId': self.account_id, + 'accessToken': self.access_token, + } + expiration = datetime2timestamp(self.expires_at) + self.expected_get_role_credentials_response = { + 'roleCredentials': { + 'accessKeyId': 'foo', + 'secretAccessKey': 'bar', + 'sessionToken': 'baz', + 'expiration': int(expiration * 1000), + } + } + + def _mock_load_config(self): + return { + 'profiles': { + self.profile_name: self.config, + } + } + + def _add_get_role_credentials_response(self): + self.stubber.add_response( + 'get_role_credentials', + self.expected_get_role_credentials_response, + self.expected_get_role_credentials_params, + ) + + def test_load_sso_credentials_without_cache(self): + self._add_get_role_credentials_response() + with self.stubber: + credentials = self.provider.load() + self.assertEqual(credentials.access_key, 'foo') + self.assertEqual(credentials.secret_key, 'bar') + self.assertEqual(credentials.token, 'baz') + + def test_load_sso_credentials_with_cache(self): + cached_creds = { + 'Credentials': { + 'AccessKeyId': 'cached-akid', + 'SecretAccessKey': 'cached-sak', + 'SessionToken': 'cached-st', + 'Expiration': self.expires_at.strftime('%Y-%m-%dT%H:%M:%S%Z'), + } + } + self.cache[self.cached_creds_key] = cached_creds + credentials = self.provider.load() + self.assertEqual(credentials.access_key, 'cached-akid') + self.assertEqual(credentials.secret_key, 'cached-sak') + self.assertEqual(credentials.token, 'cached-st') + + def test_load_sso_credentials_with_cache_expired(self): + cached_creds = { + 'Credentials': { + 'AccessKeyId': 'expired-akid', + 'SecretAccessKey': 'expired-sak', + 'SessionToken': 'expired-st', + 'Expiration': '2002-10-22T20:52:11UTC', + } + } + self.cache[self.cached_creds_key] = cached_creds + + self._add_get_role_credentials_response() + with self.stubber: + credentials = self.provider.load() + self.assertEqual(credentials.access_key, 'foo') + self.assertEqual(credentials.secret_key, 'bar') + self.assertEqual(credentials.token, 'baz') + + def test_required_config_not_set(self): + del self.config['sso_start_url'] + # If any required configuration is missing we should get an error + with self.assertRaises(botocore.exceptions.InvalidConfigError): + self.provider.load() diff --git a/tests/unit/test_handlers.py b/tests/unit/test_handlers.py index a41bacb2..4c2baf87 100644 --- a/tests/unit/test_handlers.py +++ b/tests/unit/test_handlers.py @@ -38,6 +38,7 @@ from botocore.model import DenormalizedStructureBuilder from botocore.session import Session from botocore.signers import RequestSigner from botocore.credentials import Credentials +from botocore.utils import conditionally_calculate_md5 from botocore import handlers @@ -1124,7 +1125,7 @@ class TestAddMD5(BaseMD5Test): 'method': 'PUT', 'headers': {}} context = self.get_context() - handlers.conditionally_calculate_md5( + conditionally_calculate_md5( request_dict, request_signer=request_signer, context=context) self.assertTrue('Content-MD5' in request_dict['headers']) @@ -1138,7 +1139,7 @@ class TestAddMD5(BaseMD5Test): 'method': 'PUT', 'headers': {}} context = self.get_context({'payload_signing_enabled': False}) - handlers.conditionally_calculate_md5( + conditionally_calculate_md5( request_dict, request_signer=request_signer, context=context) self.assertTrue('Content-MD5' in request_dict['headers']) @@ -1153,8 +1154,8 @@ class TestAddMD5(BaseMD5Test): context = self.get_context() self.set_md5_available(False) - with mock.patch('botocore.handlers.MD5_AVAILABLE', False): - handlers.conditionally_calculate_md5( + with mock.patch('botocore.utils.MD5_AVAILABLE', False): + conditionally_calculate_md5( request_dict, request_signer=request_signer, context=context) self.assertFalse('Content-MD5' in request_dict['headers']) @@ -1169,7 +1170,7 @@ class TestAddMD5(BaseMD5Test): self.set_md5_available(False) with self.assertRaises(MD5UnavailableError): - handlers.calculate_md5( + conditionally_calculate_md5( request_dict, request_signer=request_signer) def test_adds_md5_when_s3v2(self): @@ -1181,7 +1182,7 @@ class TestAddMD5(BaseMD5Test): 'method': 'PUT', 'headers': {}} context = self.get_context() - handlers.conditionally_calculate_md5( + conditionally_calculate_md5( request_dict, request_signer=request_signer, context=context) self.assertTrue('Content-MD5' in request_dict['headers']) @@ -1191,7 +1192,7 @@ class TestAddMD5(BaseMD5Test): 'headers': {} } self.md5_digest.return_value = b'8X\xf6"0\xac<\x91_0\x0cfC\x12\xc6?' - handlers.calculate_md5(request_dict) + conditionally_calculate_md5(request_dict) self.assertEqual(request_dict['headers']['Content-MD5'], 'OFj2IjCsPJFfMAxmQxLGPw==') @@ -1201,7 +1202,7 @@ class TestAddMD5(BaseMD5Test): 'headers': {} } self.md5_digest.return_value = b'8X\xf6"0\xac<\x91_0\x0cfC\x12\xc6?' - handlers.calculate_md5(request_dict) + conditionally_calculate_md5(request_dict) self.assertEqual( request_dict['headers']['Content-MD5'], 'OFj2IjCsPJFfMAxmQxLGPw==') @@ -1212,7 +1213,7 @@ class TestAddMD5(BaseMD5Test): 'headers': {} } self.md5_digest.return_value = b'8X\xf6"0\xac<\x91_0\x0cfC\x12\xc6?' - handlers.calculate_md5(request_dict) + conditionally_calculate_md5(request_dict) self.assertEqual( request_dict['headers']['Content-MD5'], 'OFj2IjCsPJFfMAxmQxLGPw==') diff --git a/tests/unit/test_signers.py b/tests/unit/test_signers.py index 97f64f08..3c9c1d66 100644 --- a/tests/unit/test_signers.py +++ b/tests/unit/test_signers.py @@ -923,6 +923,8 @@ class TestGeneratePresignedPost(unittest.TestCase): self.client.generate_presigned_post( self.bucket, self.key, Fields=fields, Conditions=conditions) + self.assertEqual(fields, {'acl': 'public-read'}) + _, post_kwargs = self.presign_post_mock.call_args request_dict = post_kwargs['request_dict'] fields = post_kwargs['fields'] diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index 69e04a24..7c78c9b2 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -66,6 +66,8 @@ from botocore.utils import S3ArnParamHandler from botocore.utils import S3EndpointSetter from botocore.utils import ContainerMetadataFetcher from botocore.utils import InstanceMetadataFetcher +from botocore.utils import SSOTokenLoader +from botocore.exceptions import SSOTokenLoadError from botocore.utils import IMDSFetcher from botocore.utils import BadIMDSRequestError from botocore.model import DenormalizedStructureBuilder @@ -2428,3 +2430,31 @@ class TestInstanceMetadataFetcher(unittest.TestCase): result = InstanceMetadataFetcher( user_agent=user_agent).retrieve_iam_role_credentials() self.assertEqual(result, {}) + + +class TestSSOTokenLoader(unittest.TestCase): + def setUp(self): + super(TestSSOTokenLoader, self).setUp() + self.start_url = 'https://d-abc123.awsapps.com/start' + self.cache_key = '40a89917e3175433e361b710a9d43528d7f1890a' + self.access_token = 'totally.a.token' + self.cached_token = { + 'accessToken': self.access_token, + 'expiresAt': '2002-10-18T03:52:38UTC' + } + self.cache = {} + self.loader = SSOTokenLoader(cache=self.cache) + + def test_can_load_token_exists(self): + self.cache[self.cache_key] = self.cached_token + access_token = self.loader(self.start_url) + self.assertEqual(self.access_token, access_token) + + def test_can_handle_does_not_exist(self): + with self.assertRaises(SSOTokenLoadError): + access_token = self.loader(self.start_url) + + def test_can_handle_invalid_cache(self): + self.cache[self.cache_key] = {} + with self.assertRaises(SSOTokenLoadError): + access_token = self.loader(self.start_url)