From 6dd15efbc479a1fed49e67171621c75467819ecb Mon Sep 17 00:00:00 2001 From: TANIGUCHI Takaki Date: Thu, 24 Aug 2017 11:33:12 +0900 Subject: [PATCH] New upstream version 1.6.6 --- PKG-INFO | 2 +- botocore.egg-info/PKG-INFO | 2 +- botocore.egg-info/SOURCES.txt | 9 + botocore/__init__.py | 2 +- botocore/args.py | 1 + botocore/client.py | 20 +- botocore/config.py | 30 +- .../data/appstream/2016-12-01/service-2.json | 315 +- .../data/athena/2017-05-18/paginators-1.json | 3 +- botocore/data/batch/2016-08-10/service-2.json | 6 +- .../data/budgets/2016-10-20/service-2.json | 19 +- .../clouddirectory/2016-05-10/service-2.json | 687 ++- .../cloudformation/2010-05-15/service-2.json | 1208 +++- .../cloudhsmv2/2017-04-28/paginators-1.json | 3 + .../data/cloudhsmv2/2017-04-28/service-2.json | 838 +++ .../data/cloudwatch/2010-08-01/service-2.json | 21 +- .../data/codebuild/2016-10-06/service-2.json | 4 +- .../data/codedeploy/2014-10-06/service-2.json | 129 +- .../cognito-idp/2016-04-18/service-2.json | 655 +- .../data/config/2014-11-12/service-2.json | 77 +- .../data/dynamodb/2012-08-10/service-2.json | 20 +- botocore/data/ec2/2016-11-15/service-2.json | 280 +- botocore/data/efs/2015-02-01/service-2.json | 24 +- .../2010-12-01/service-2.json | 37 +- botocore/data/emr/2009-03-31/service-2.json | 64 +- botocore/data/endpoints.json | 69 +- .../firehose/2015-08-04/paginators-1.json | 3 + .../data/firehose/2015-08-04/service-2.json | 205 +- .../data/gamelift/2015-10-01/service-2.json | 996 ++- .../data/glue/2017-03-31/paginators-1.json | 3 + botocore/data/glue/2017-03-31/service-2.json | 5463 +++++++++++++++++ .../data/inspector/2016-02-16/service-2.json | 16 +- .../data/kinesis/2013-12-02/paginators-1.json | 3 +- .../2015-08-14/service-2.json | 46 +- .../data/lambda/2015-03-31/examples-1.json | 2 +- .../data/mgh/2017-05-31/paginators-1.json | 3 + botocore/data/mgh/2017-05-31/service-2.json | 1123 ++++ .../data/pinpoint/2016-12-01/service-2.json | 320 +- botocore/data/rds/2014-10-31/service-2.json | 120 +- .../data/route53/2013-04-01/service-2.json | 8 +- botocore/data/ses/2010-12-01/service-2.json | 66 +- botocore/data/ssm/2014-11-06/service-2.json | 1510 ++++- .../storagegateway/2013-06-30/service-2.json | 24 +- botocore/docs/bcdoc/style.py | 7 +- botocore/docs/sharedexample.py | 4 +- botocore/docs/utils.py | 18 + botocore/exceptions.py | 20 +- botocore/handlers.py | 8 + botocore/paginate.py | 13 +- botocore/translate.py | 33 +- docs/source/conf.py | 4 +- tests/functional/test_retry.py | 103 + tests/functional/test_route53.py | 51 + tests/unit/docs/bcdoc/test_document.py | 8 +- tests/unit/docs/bcdoc/test_style.py | 15 +- tests/unit/docs/test_sharedexample.py | 19 + tests/unit/docs/test_utils.py | 8 + tests/unit/test_args.py | 21 + tests/unit/test_client.py | 36 + tests/unit/test_exceptions.py | 16 + tests/unit/test_paginate.py | 143 +- tests/unit/test_translate.py | 40 + 62 files changed, 14550 insertions(+), 453 deletions(-) mode change 100755 => 100644 botocore/data/budgets/2016-10-20/service-2.json create mode 100644 botocore/data/cloudhsmv2/2017-04-28/paginators-1.json create mode 100644 botocore/data/cloudhsmv2/2017-04-28/service-2.json mode change 100755 => 100644 botocore/data/dynamodb/2012-08-10/service-2.json mode change 100755 => 100644 botocore/data/elasticbeanstalk/2010-12-01/service-2.json create mode 100644 botocore/data/firehose/2015-08-04/paginators-1.json create mode 100644 botocore/data/glue/2017-03-31/paginators-1.json create mode 100644 botocore/data/glue/2017-03-31/service-2.json create mode 100644 botocore/data/mgh/2017-05-31/paginators-1.json create mode 100644 botocore/data/mgh/2017-05-31/service-2.json create mode 100644 tests/functional/test_retry.py create mode 100644 tests/functional/test_route53.py diff --git a/PKG-INFO b/PKG-INFO index 94256cb6..0b05117e 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: botocore -Version: 1.5.84 +Version: 1.6.6 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services diff --git a/botocore.egg-info/PKG-INFO b/botocore.egg-info/PKG-INFO index 94256cb6..0b05117e 100644 --- a/botocore.egg-info/PKG-INFO +++ b/botocore.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: botocore -Version: 1.5.84 +Version: 1.6.6 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services diff --git a/botocore.egg-info/SOURCES.txt b/botocore.egg-info/SOURCES.txt index b6d8d523..d25fa12d 100644 --- a/botocore.egg-info/SOURCES.txt +++ b/botocore.egg-info/SOURCES.txt @@ -115,6 +115,8 @@ botocore/data/cloudfront/2017-03-25/service-2.json botocore/data/cloudfront/2017-03-25/waiters-2.json botocore/data/cloudhsm/2014-05-30/examples-1.json botocore/data/cloudhsm/2014-05-30/service-2.json +botocore/data/cloudhsmv2/2017-04-28/paginators-1.json +botocore/data/cloudhsmv2/2017-04-28/service-2.json botocore/data/cloudsearch/2011-02-01/service-2.json botocore/data/cloudsearch/2013-01-01/paginators-1.json botocore/data/cloudsearch/2013-01-01/service-2.json @@ -248,6 +250,7 @@ botocore/data/events/2015-10-07/examples-1.json botocore/data/events/2015-10-07/paginators-1.json botocore/data/events/2015-10-07/service-2.json botocore/data/firehose/2015-08-04/examples-1.json +botocore/data/firehose/2015-08-04/paginators-1.json botocore/data/firehose/2015-08-04/service-2.json botocore/data/gamelift/2015-10-01/examples-1.json botocore/data/gamelift/2015-10-01/paginators-1.json @@ -256,6 +259,8 @@ botocore/data/glacier/2012-06-01/examples-1.json botocore/data/glacier/2012-06-01/paginators-1.json botocore/data/glacier/2012-06-01/service-2.json botocore/data/glacier/2012-06-01/waiters-2.json +botocore/data/glue/2017-03-31/paginators-1.json +botocore/data/glue/2017-03-31/service-2.json botocore/data/greengrass/2017-06-07/service-2.json botocore/data/health/2016-08-04/examples-1.json botocore/data/health/2016-08-04/paginators-1.json @@ -309,6 +314,8 @@ botocore/data/marketplacecommerceanalytics/2015-07-01/paginators-1.json botocore/data/marketplacecommerceanalytics/2015-07-01/service-2.json botocore/data/meteringmarketplace/2016-01-14/examples-1.json botocore/data/meteringmarketplace/2016-01-14/service-2.json +botocore/data/mgh/2017-05-31/paginators-1.json +botocore/data/mgh/2017-05-31/service-2.json botocore/data/mturk/2017-01-17/paginators-1.json botocore/data/mturk/2017-01-17/service-2.json botocore/data/opsworks/2013-02-18/examples-1.json @@ -602,6 +609,8 @@ tests/functional/test_paginator_config.py tests/functional/test_public_apis.py tests/functional/test_rds.py tests/functional/test_regions.py +tests/functional/test_retry.py +tests/functional/test_route53.py tests/functional/test_s3.py tests/functional/test_session.py tests/functional/test_six_imports.py diff --git a/botocore/__init__.py b/botocore/__init__.py index 29db9911..5ee337f0 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re import logging -__version__ = '1.5.84' +__version__ = '1.6.6' class NullHandler(logging.Handler): diff --git a/botocore/args.py b/botocore/args.py index 49af1c7b..fb6b5bc2 100644 --- a/botocore/args.py +++ b/botocore/args.py @@ -131,6 +131,7 @@ class ClientArgsCreator(object): read_timeout=client_config.read_timeout, max_pool_connections=client_config.max_pool_connections, proxies=client_config.proxies, + retries=client_config.retries ) s3_config = self.compute_s3_config(scoped_config, client_config) diff --git a/botocore/client.py b/botocore/client.py index e7a81c5b..3843368a 100644 --- a/botocore/client.py +++ b/botocore/client.py @@ -70,6 +70,7 @@ class ClientCreator(object): service_model, region_name, is_secure, endpoint_url, verify, credentials, scoped_config, client_config, endpoint_bridge) service_client = cls(**client_args) + self._register_retries(service_client) self._register_s3_events( service_client, endpoint_bridge, endpoint_url, client_config, scoped_config) @@ -95,11 +96,10 @@ class ClientCreator(object): json_model = self._loader.load_service_model(service_name, 'service-2', api_version=api_version) service_model = ServiceModel(json_model, service_name=service_name) - self._register_retries(service_model) return service_model - def _register_retries(self, service_model): - endpoint_prefix = service_model.endpoint_prefix + def _register_retries(self, client): + endpoint_prefix = client.meta.service_model.endpoint_prefix # First, we load the entire retry config for all services, # then pull out just the information we need. @@ -109,15 +109,17 @@ class ClientCreator(object): retry_config = self._retry_config_translator.build_retry_config( endpoint_prefix, original_config.get('retry', {}), - original_config.get('definitions', {})) + original_config.get('definitions', {}), + client.meta.config.retries + ) logger.debug("Registering retry handlers for service: %s", - service_model.service_name) + client.meta.service_model.service_name) handler = self._retry_handler_factory.create_retry_handler( retry_config, endpoint_prefix) unique_id = 'retry-config-%s' % endpoint_prefix - self._event_emitter.register('needs-retry.%s' % endpoint_prefix, - handler, unique_id=unique_id) + client.meta.events.register('needs-retry.%s' % endpoint_prefix, + handler, unique_id=unique_id) def _register_s3_events(self, client, endpoint_bridge, endpoint_url, client_config, scoped_config): @@ -681,9 +683,11 @@ class BaseClient(object): documented_paginator_cls = type( paginator_class_name, (Paginator,), {'paginate': paginate}) + operation_model = self._service_model.operation_model(actual_operation_name) paginator = documented_paginator_cls( getattr(self, operation_name), - paginator_config) + paginator_config, + operation_model) return paginator def can_paginate(self, operation_name): diff --git a/botocore/config.py b/botocore/config.py index b318941e..4b2e4c50 100644 --- a/botocore/config.py +++ b/botocore/config.py @@ -15,6 +15,8 @@ from botocore.compat import OrderedDict from botocore.endpoint import DEFAULT_TIMEOUT, MAX_POOL_CONNECTIONS from botocore.exceptions import InvalidS3AddressingStyleError +from botocore.exceptions import InvalidRetryConfigurationError +from botocore.exceptions import InvalidMaxRetryAttemptsError class Config(object): @@ -88,6 +90,18 @@ class Config(object): * path -- Addressing style is always by path. Endpoints will be addressed as such: s3.amazonaws.com/mybucket + + :type retries: dict + :param retries: A dictionary for retry specific configurations. + Valid keys are: + + * 'max_attempts' -- An integer representing the maximum number of + retry attempts that will be made on a single request. For + example, setting this value to 2 will result in the request + being retried at most two times after the initial request. Setting + this value to 0 will result in no retries ever being attempted on + the initial request. If not provided, the number of retries will + default to whatever is modeled, which is typically four retries. """ OPTION_DEFAULTS = OrderedDict([ ('region_name', None), @@ -99,7 +113,8 @@ class Config(object): ('parameter_validation', True), ('max_pool_connections', MAX_POOL_CONNECTIONS), ('proxies', None), - ('s3', None) + ('s3', None), + ('retries', None) ]) def __init__(self, *args, **kwargs): @@ -117,6 +132,8 @@ class Config(object): # Validate the s3 options self._validate_s3_configuration(self.s3) + self._validate_retry_configuration(self.retries) + def _record_user_provided_options(self, args, kwargs): option_order = list(self.OPTION_DEFAULTS) user_provided_options = {} @@ -157,6 +174,17 @@ class Config(object): raise InvalidS3AddressingStyleError( s3_addressing_style=addressing_style) + def _validate_retry_configuration(self, retries): + if retries is not None: + for key in retries: + if key not in ['max_attempts']: + raise InvalidRetryConfigurationError( + retry_config_option=key) + if key == 'max_attempts' and retries[key] < 0: + raise InvalidMaxRetryAttemptsError( + provided_max_attempts=retries[key] + ) + def merge(self, other_config): """Merges the config object with another config object diff --git a/botocore/data/appstream/2016-12-01/service-2.json b/botocore/data/appstream/2016-12-01/service-2.json index 1f0ce616..57a25035 100644 --- a/botocore/data/appstream/2016-12-01/service-2.json +++ b/botocore/data/appstream/2016-12-01/service-2.json @@ -24,10 +24,25 @@ {"shape":"LimitExceededException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ConcurrentModificationException"}, - {"shape":"IncompatibleImageException"} + {"shape":"IncompatibleImageException"}, + {"shape":"OperationNotPermittedException"} ], "documentation":"

Associate a fleet to a stack.

" }, + "CreateDirectoryConfig":{ + "name":"CreateDirectoryConfig", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDirectoryConfigRequest"}, + "output":{"shape":"CreateDirectoryConfigResult"}, + "errors":[ + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Creates a directory configuration with the given parameters.

" + }, "CreateFleet":{ "name":"CreateFleet", "http":{ @@ -42,7 +57,9 @@ {"shape":"ResourceNotFoundException"}, {"shape":"LimitExceededException"}, {"shape":"InvalidRoleException"}, - {"shape":"ConcurrentModificationException"} + {"shape":"ConcurrentModificationException"}, + {"shape":"InvalidParameterCombinationException"}, + {"shape":"IncompatibleImageException"} ], "documentation":"

Creates a new fleet.

" }, @@ -80,6 +97,20 @@ ], "documentation":"

Creates a URL to start an AppStream 2.0 streaming session for a user. By default, the URL is valid only for 1 minute from the time that it is generated.

" }, + "DeleteDirectoryConfig":{ + "name":"DeleteDirectoryConfig", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDirectoryConfigRequest"}, + "output":{"shape":"DeleteDirectoryConfigResult"}, + "errors":[ + {"shape":"ResourceInUseException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Deletes the directory configuration with the given parameters.

" + }, "DeleteFleet":{ "name":"DeleteFleet", "http":{ @@ -110,6 +141,19 @@ ], "documentation":"

Deletes the stack. After this operation completes, the environment can no longer be activated, and any reservations made for the stack are released.

" }, + "DescribeDirectoryConfigs":{ + "name":"DescribeDirectoryConfigs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDirectoryConfigsRequest"}, + "output":{"shape":"DescribeDirectoryConfigsResult"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Returns a list describing the specified directory configurations.

" + }, "DescribeFleets":{ "name":"DescribeFleets", "http":{ @@ -147,7 +191,7 @@ "errors":[ {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Describes the streaming sessions for a stack and a fleet. If a user ID is provided, this operation returns streaming sessions for only that user. Pass this value for the nextToken parameter in a subsequent call to this operation to retrieve the next set of items. If an authentication type is not provided, the operation defaults to users authenticated using a streaming URL.

" + "documentation":"

Describes the streaming sessions for a stack and a fleet. If a user ID is provided, this operation returns streaming sessions for only that user. To retrieve the next set of items, pass this value for the nextToken parameter in a subsequent call to this operation. If an authentication type is not provided, the operation defaults to users authenticated using a streaming URL.

" }, "DescribeStacks":{ "name":"DescribeStacks", @@ -160,7 +204,7 @@ "errors":[ {"shape":"ResourceNotFoundException"} ], - "documentation":"

If stack names are not provided, this operation describes the specified stacks; otherwise, all stacks in the account are described. Pass the nextToken value in a subsequent call to this operation to retrieve the next set of items.

" + "documentation":"

If stack names are not provided, this operation describes the specified stacks; otherwise, all stacks in the account are described. To retrieve the next set of items, pass the nextToken value in a subsequent call to this operation.

" }, "DisassociateFleet":{ "name":"DisassociateFleet", @@ -237,6 +281,21 @@ ], "documentation":"

Stops a fleet.

" }, + "UpdateDirectoryConfig":{ + "name":"UpdateDirectoryConfig", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateDirectoryConfigRequest"}, + "output":{"shape":"UpdateDirectoryConfigResult"}, + "errors":[ + {"shape":"ResourceInUseException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConcurrentModificationException"} + ], + "documentation":"

Updates the directory configuration with the given parameters.

" + }, "UpdateFleet":{ "name":"UpdateFleet", "http":{ @@ -253,7 +312,8 @@ {"shape":"ResourceNotAvailableException"}, {"shape":"InvalidParameterCombinationException"}, {"shape":"ConcurrentModificationException"}, - {"shape":"IncompatibleImageException"} + {"shape":"IncompatibleImageException"}, + {"shape":"OperationNotPermittedException"} ], "documentation":"

Updates an existing fleet. All the attributes except the fleet name can be updated in the STOPPED state. When a fleet is in the RUNNING state, only DisplayName and ComputeCapacity can be updated. A fleet cannot be updated in a status of STARTING or STOPPING.

" }, @@ -277,6 +337,17 @@ } }, "shapes":{ + "AccountName":{ + "type":"string", + "min":1, + "sensitive":true + }, + "AccountPassword":{ + "type":"string", + "max":127, + "min":1, + "sensitive":true + }, "Application":{ "type":"structure", "members":{ @@ -302,7 +373,7 @@ }, "Enabled":{ "shape":"Boolean", - "documentation":"

An application can be disabled after image creation if there is a problem.

" + "documentation":"

If there is a problem, an application can be disabled after image creation.

" }, "Metadata":{ "shape":"Metadata", @@ -393,6 +464,37 @@ "documentation":"

An API error occurred. Wait a few minutes and try again.

", "exception":true }, + "CreateDirectoryConfigRequest":{ + "type":"structure", + "required":[ + "DirectoryName", + "OrganizationalUnitDistinguishedNames", + "ServiceAccountCredentials" + ], + "members":{ + "DirectoryName":{ + "shape":"DirectoryName", + "documentation":"

The fully qualified name of the directory, such as corp.example.com

" + }, + "OrganizationalUnitDistinguishedNames":{ + "shape":"OrganizationalUnitDistinguishedNamesList", + "documentation":"

The list of the distinguished names of organizational units to place computer accounts in.

" + }, + "ServiceAccountCredentials":{ + "shape":"ServiceAccountCredentials", + "documentation":"

The AccountName and AccountPassword values for the service account, which are used by the streaming instance to connect to the directory.

" + } + } + }, + "CreateDirectoryConfigResult":{ + "type":"structure", + "members":{ + "DirectoryConfig":{ + "shape":"DirectoryConfig", + "documentation":"

Directory configuration details.

" + } + } + }, "CreateFleetRequest":{ "type":"structure", "required":[ @@ -412,7 +514,7 @@ }, "InstanceType":{ "shape":"String", - "documentation":"

The instance type of compute resources for the fleet. Fleet instances are launched from this instance type.

" + "documentation":"

The instance type of compute resources for the fleet. Fleet instances are launched from this instance type. Available instance types are:

" }, "ComputeCapacity":{ "shape":"ComputeCapacity", @@ -440,7 +542,11 @@ }, "EnableDefaultInternetAccess":{ "shape":"BooleanObject", - "documentation":"

Enables or disables default Internet access for the fleet.

" + "documentation":"

Enables or disables default internet access for the fleet.

" + }, + "DomainJoinInfo":{ + "shape":"DomainJoinInfo", + "documentation":"

The DirectoryName and OrganizationalUnitDistinguishedName values, which are used to join domains for the AppStream 2.0 streaming instances.

" } }, "documentation":"

Contains the parameters for the new fleet to create.

" @@ -502,7 +608,7 @@ "documentation":"

The fleet for which the URL is generated.

" }, "UserId":{ - "shape":"UserId", + "shape":"StreamingUrlUserId", "documentation":"

A unique user ID for whom the URL is generated.

" }, "ApplicationId":{ @@ -528,10 +634,25 @@ }, "Expires":{ "shape":"Timestamp", - "documentation":"

Elapsed seconds after the Unix epoch, at which time this URL expires.

" + "documentation":"

Elapsed seconds after the Unix epoch, when this URL expires.

" } } }, + "DeleteDirectoryConfigRequest":{ + "type":"structure", + "required":["DirectoryName"], + "members":{ + "DirectoryName":{ + "shape":"DirectoryName", + "documentation":"

The name of the directory configuration to be deleted.

" + } + } + }, + "DeleteDirectoryConfigResult":{ + "type":"structure", + "members":{ + } + }, "DeleteFleetRequest":{ "type":"structure", "required":["Name"], @@ -562,6 +683,36 @@ "members":{ } }, + "DescribeDirectoryConfigsRequest":{ + "type":"structure", + "members":{ + "DirectoryNames":{ + "shape":"DirectoryNameList", + "documentation":"

A specific list of directory names.

" + }, + "MaxResults":{ + "shape":"Integer", + "documentation":"

The size of each page of results.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The DescribeDirectoryConfigsResult.NextToken from a previous call to DescribeDirectoryConfigs. If this is the first call, pass null.

" + } + } + }, + "DescribeDirectoryConfigsResult":{ + "type":"structure", + "members":{ + "DirectoryConfigs":{ + "shape":"DirectoryConfigList", + "documentation":"

The list of directory configurations.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

If not null, more results are available. To retrieve the next set of items, pass this value for the NextToken parameter in a subsequent call to DescribeDirectoryConfigs.

" + } + } + }, "DescribeFleetsRequest":{ "type":"structure", "members":{ @@ -682,6 +833,38 @@ "type":"string", "max":256 }, + "DirectoryConfig":{ + "type":"structure", + "required":["DirectoryName"], + "members":{ + "DirectoryName":{ + "shape":"DirectoryName", + "documentation":"

The fully qualified name of the directory, such as corp.example.com

" + }, + "OrganizationalUnitDistinguishedNames":{ + "shape":"OrganizationalUnitDistinguishedNamesList", + "documentation":"

The list of the distinguished names of organizational units in which to place computer accounts.

" + }, + "ServiceAccountCredentials":{ + "shape":"ServiceAccountCredentials", + "documentation":"

The AccountName and AccountPassword of the service account, to be used by the streaming instance to connect to the directory.

" + }, + "CreatedTime":{ + "shape":"Timestamp", + "documentation":"

The time stamp when the directory configuration was created within AppStream 2.0.

" + } + }, + "documentation":"

Full directory configuration details, which are used to join domains for the AppStream 2.0 streaming instances.

" + }, + "DirectoryConfigList":{ + "type":"list", + "member":{"shape":"DirectoryConfig"} + }, + "DirectoryName":{"type":"string"}, + "DirectoryNameList":{ + "type":"list", + "member":{"shape":"DirectoryName"} + }, "DisassociateFleetRequest":{ "type":"structure", "required":[ @@ -708,6 +891,20 @@ "type":"string", "max":100 }, + "DomainJoinInfo":{ + "type":"structure", + "members":{ + "DirectoryName":{ + "shape":"DirectoryName", + "documentation":"

The fully qualified name of the directory, such as corp.example.com

" + }, + "OrganizationalUnitDistinguishedName":{ + "shape":"OrganizationalUnitDistinguishedName", + "documentation":"

The distinguished name of the organizational unit to place the computer account in.

" + } + }, + "documentation":"

The DirectoryName and OrganizationalUnitDistinguishedName values, which are used to join domains for the AppStream 2.0 streaming instances.

" + }, "ErrorMessage":{ "type":"string", "documentation":"

The error message in the exception.

" @@ -792,7 +989,11 @@ }, "EnableDefaultInternetAccess":{ "shape":"BooleanObject", - "documentation":"

Whether default Internet access is enabled for the fleet.

" + "documentation":"

Whether default internet access is enabled for the fleet.

" + }, + "DomainJoinInfo":{ + "shape":"DomainJoinInfo", + "documentation":"

The DirectoryName and OrganizationalUnitDistinguishedName values, which are used to join domains for the AppStream 2.0 streaming instances.

" } }, "documentation":"

Contains the parameters for a fleet.

" @@ -802,7 +1003,8 @@ "documentation":"

Fleet attribute.

", "enum":[ "VPC_CONFIGURATION", - "VPC_CONFIGURATION_SECURITY_GROUP_IDS" + "VPC_CONFIGURATION_SECURITY_GROUP_IDS", + "DOMAIN_JOIN_INFO" ] }, "FleetAttributes":{ @@ -837,7 +1039,21 @@ "IAM_SERVICE_ROLE_MISSING_DESCRIBE_SUBNET_ACTION", "SUBNET_NOT_FOUND", "IMAGE_NOT_FOUND", - "INVALID_SUBNET_CONFIGURATION" + "INVALID_SUBNET_CONFIGURATION", + "SECURITY_GROUPS_NOT_FOUND", + "IAM_SERVICE_ROLE_MISSING_DESCRIBE_SECURITY_GROUPS_ACTION", + "DOMAIN_JOIN_ERROR_FILE_NOT_FOUND", + "DOMAIN_JOIN_ERROR_ACCESS_DENIED", + "DOMAIN_JOIN_ERROR_LOGON_FAILURE", + "DOMAIN_JOIN_ERROR_INVALID_PARAMETER", + "DOMAIN_JOIN_ERROR_MORE_DATA", + "DOMAIN_JOIN_ERROR_NO_SUCH_DOMAIN", + "DOMAIN_JOIN_ERROR_NOT_SUPPORTED", + "DOMAIN_JOIN_NERR_INVALID_WORKGROUP_NAME", + "DOMAIN_JOIN_NERR_WORKSTATION_NOT_STARTED", + "DOMAIN_JOIN_ERROR_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED", + "DOMAIN_JOIN_NERR_PASSWORD_EXPIRED", + "DOMAIN_JOIN_INTERNAL_SERVICE_ERROR" ] }, "FleetErrors":{ @@ -880,7 +1096,7 @@ }, "State":{ "shape":"ImageState", - "documentation":"

The image starts in the PENDING state, and then moves to AVAILABLE if image creation succeeds and FAILED if image creation has failed.

" + "documentation":"

The image starts in the PENDING state. If image creation succeeds, it moves to AVAILABLE. If image creation fails, it moves to FAILED.

" }, "Visibility":{ "shape":"VisibilityType", @@ -908,7 +1124,7 @@ }, "CreatedTime":{ "shape":"Timestamp", - "documentation":"

The timestamp when the image was created.

" + "documentation":"

The time stamp when the image was created.

" }, "PublicBaseImageReleasedDate":{ "shape":"Timestamp", @@ -1058,6 +1274,14 @@ "documentation":"

The attempted operation is not permitted.

", "exception":true }, + "OrganizationalUnitDistinguishedName":{ + "type":"string", + "max":2000 + }, + "OrganizationalUnitDistinguishedNamesList":{ + "type":"list", + "member":{"shape":"OrganizationalUnitDistinguishedName"} + }, "PlatformType":{ "type":"string", "enum":["WINDOWS"] @@ -1105,6 +1329,24 @@ "documentation":"

A list of security groups.

", "max":5 }, + "ServiceAccountCredentials":{ + "type":"structure", + "required":[ + "AccountName", + "AccountPassword" + ], + "members":{ + "AccountName":{ + "shape":"AccountName", + "documentation":"

The user name of an account in the directory that is used by AppStream 2.0 streaming instances to connect to the directory. This account must have the following privileges: create computer objects, join computers to the domain, change/reset the password on descendant computer objects for the organizational units specified.

" + }, + "AccountPassword":{ + "shape":"AccountPassword", + "documentation":"

The password for the user account for directory actions.

" + } + }, + "documentation":"

The AccountName and AccountPassword of the service account, to be used by the streaming instance to connect to the directory.

" + }, "Session":{ "type":"structure", "required":[ @@ -1178,7 +1420,7 @@ }, "CreatedTime":{ "shape":"Timestamp", - "documentation":"

The timestamp when the stack was created.

" + "documentation":"

The time stamp when the stack was created.

" }, "StorageConnectors":{ "shape":"StorageConnectorList", @@ -1277,6 +1519,12 @@ "documentation":"

The type of storage connector. The possible values include: HOMEFOLDERS.

", "enum":["HOMEFOLDERS"] }, + "StreamingUrlUserId":{ + "type":"string", + "max":32, + "min":2, + "pattern":"[\\w+=,.@-]*" + }, "String":{ "type":"string", "min":1 @@ -1291,6 +1539,33 @@ "documentation":"

A list of subnet IDs.

" }, "Timestamp":{"type":"timestamp"}, + "UpdateDirectoryConfigRequest":{ + "type":"structure", + "required":["DirectoryName"], + "members":{ + "DirectoryName":{ + "shape":"DirectoryName", + "documentation":"

The name of the existing directory configuration to be updated.

" + }, + "OrganizationalUnitDistinguishedNames":{ + "shape":"OrganizationalUnitDistinguishedNamesList", + "documentation":"

The list of the distinguished names of organizational units to place computer accounts in.

" + }, + "ServiceAccountCredentials":{ + "shape":"ServiceAccountCredentials", + "documentation":"

The AccountName and AccountPassword values for the service account, which are used by the streaming instance to connect to the directory

" + } + } + }, + "UpdateDirectoryConfigResult":{ + "type":"structure", + "members":{ + "DirectoryConfig":{ + "shape":"DirectoryConfig", + "documentation":"

The updated directory configuration details.

" + } + } + }, "UpdateFleetRequest":{ "type":"structure", "required":["Name"], @@ -1305,7 +1580,7 @@ }, "InstanceType":{ "shape":"String", - "documentation":"

The instance type of compute resources for the fleet. Fleet instances are launched from this instance type.

" + "documentation":"

The instance type of compute resources for the fleet. Fleet instances are launched from this instance type. Available instance types are:

" }, "ComputeCapacity":{ "shape":"ComputeCapacity", @@ -1338,7 +1613,11 @@ }, "EnableDefaultInternetAccess":{ "shape":"BooleanObject", - "documentation":"

Enables or disables default Internet access for the fleet.

" + "documentation":"

Enables or disables default internet access for the fleet.

" + }, + "DomainJoinInfo":{ + "shape":"DomainJoinInfo", + "documentation":"

The DirectoryName and OrganizationalUnitDistinguishedName values, which are used to join domains for the AppStream 2.0 streaming instances.

" }, "AttributesToDelete":{ "shape":"FleetAttributes", diff --git a/botocore/data/athena/2017-05-18/paginators-1.json b/botocore/data/athena/2017-05-18/paginators-1.json index ce96fb1b..8cefb4a0 100644 --- a/botocore/data/athena/2017-05-18/paginators-1.json +++ b/botocore/data/athena/2017-05-18/paginators-1.json @@ -16,7 +16,8 @@ "input_token": "NextToken", "output_token": "NextToken", "limit_key": "MaxResults", - "result_key": "ResultSet" + "result_key": "ResultSet.Rows", + "non_aggregate_keys": ["ResultSet.ResultSetMetadata"] } } } diff --git a/botocore/data/batch/2016-08-10/service-2.json b/botocore/data/batch/2016-08-10/service-2.json index 7d5423b6..48ac970b 100644 --- a/botocore/data/batch/2016-08-10/service-2.json +++ b/botocore/data/batch/2016-08-10/service-2.json @@ -255,7 +255,8 @@ "reason":{ "shape":"String", "documentation":"

A short (255 max characters) human-readable string to provide additional details about a running or stopped container.

" - } + }, + "logStreamName":{"shape":"String"} }, "documentation":"

An object representing the details of a container that is part of a job attempt.

" }, @@ -573,7 +574,8 @@ "taskArn":{ "shape":"String", "documentation":"

The Amazon Resource Name (ARN) of the Amazon ECS task that is associated with the container job.

" - } + }, + "logStreamName":{"shape":"String"} }, "documentation":"

An object representing the details of a container that is part of a job.

" }, diff --git a/botocore/data/budgets/2016-10-20/service-2.json b/botocore/data/budgets/2016-10-20/service-2.json old mode 100755 new mode 100644 index 9dfcd5eb..194d8049 --- a/botocore/data/budgets/2016-10-20/service-2.json +++ b/botocore/data/budgets/2016-10-20/service-2.json @@ -250,16 +250,17 @@ }, "BudgetName":{ "type":"string", - "documentation":"A string represents the budget name. No \":\" character is allowed.", + "documentation":"A string represents the budget name. No \":\" and \"\\\" character is allowed.", "max":100, - "pattern":"[^:]+" + "pattern":"[^:\\\\]+" }, "BudgetType":{ "type":"string", - "documentation":"The type of a budget. Can be COST or USAGE.", + "documentation":"The type of a budget. It should be COST, USAGE, or RI_UTILIZATION.", "enum":[ "USAGE", - "COST" + "COST", + "RI_UTILIZATION" ] }, "Budgets":{ @@ -654,7 +655,7 @@ ], "members":{ "Amount":{"shape":"NumericValue"}, - "Unit":{"shape":"GenericString"} + "Unit":{"shape":"UnitValue"} }, "documentation":"A structure represent either a cost spend or usage spend. Contains an amount and a unit." }, @@ -699,13 +700,19 @@ }, "TimeUnit":{ "type":"string", - "documentation":"The time unit of the budget. e.g. weekly, monthly, etc.", + "documentation":"The time unit of the budget. e.g. MONTHLY, QUARTERLY, etc.", "enum":[ + "DAILY", "MONTHLY", "QUARTERLY", "ANNUALLY" ] }, + "UnitValue":{ + "type":"string", + "documentation":"A string to represent budget spend unit. It should be not null and not empty.", + "min":1 + }, "UpdateBudgetRequest":{ "type":"structure", "required":[ diff --git a/botocore/data/clouddirectory/2016-05-10/service-2.json b/botocore/data/clouddirectory/2016-05-10/service-2.json index 0b1c1a2c..35864698 100644 --- a/botocore/data/clouddirectory/2016-05-10/service-2.json +++ b/botocore/data/clouddirectory/2016-05-10/service-2.json @@ -141,6 +141,7 @@ {"shape":"ValidationException"}, {"shape":"LimitExceededException"}, {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InvalidAttachmentException"}, {"shape":"ValidationException"}, @@ -517,6 +518,7 @@ {"shape":"ValidationException"}, {"shape":"LimitExceededException"}, {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, {"shape":"ResourceNotFoundException"}, {"shape":"FacetValidationException"} ], @@ -810,6 +812,7 @@ {"shape":"ValidationException"}, {"shape":"LimitExceededException"}, {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InvalidNextTokenException"}, {"shape":"FacetValidationException"} @@ -970,6 +973,7 @@ {"shape":"ValidationException"}, {"shape":"LimitExceededException"}, {"shape":"AccessDeniedException"}, + {"shape":"DirectoryNotEnabledException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InvalidNextTokenException"}, {"shape":"FacetValidationException"} @@ -1657,7 +1661,7 @@ "documentation":"

The name of the link.

" } }, - "documentation":"

Represents the output of an AttachObject operation.

" + "documentation":"

Represents the output of an AttachObject operation.

" }, "BatchAttachObjectResponse":{ "type":"structure", @@ -1667,7 +1671,137 @@ "documentation":"

The ObjectIdentifier of the object that has been attached.

" } }, - "documentation":"

Represents the output batch AttachObject response operation.

" + "documentation":"

Represents the output batch AttachObject response operation.

" + }, + "BatchAttachPolicy":{ + "type":"structure", + "required":[ + "PolicyReference", + "ObjectReference" + ], + "members":{ + "PolicyReference":{ + "shape":"ObjectReference", + "documentation":"

The reference that is associated with the policy object.

" + }, + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

The reference that identifies the object to which the policy will be attached.

" + } + }, + "documentation":"

Attaches a policy object to a regular object inside a BatchRead operation. For more information, see AttachPolicy and BatchReadRequest$Operations.

" + }, + "BatchAttachPolicyResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

Represents the output of an AttachPolicy response operation.

" + }, + "BatchAttachToIndex":{ + "type":"structure", + "required":[ + "IndexReference", + "TargetReference" + ], + "members":{ + "IndexReference":{ + "shape":"ObjectReference", + "documentation":"

A reference to the index that you are attaching the object to.

" + }, + "TargetReference":{ + "shape":"ObjectReference", + "documentation":"

A reference to the object that you are attaching to the index.

" + } + }, + "documentation":"

Attaches the specified object to the specified index inside a BatchRead operation. For more information, see AttachToIndex and BatchReadRequest$Operations.

" + }, + "BatchAttachToIndexResponse":{ + "type":"structure", + "members":{ + "AttachedObjectIdentifier":{ + "shape":"ObjectIdentifier", + "documentation":"

The ObjectIdentifier of the object that was attached to the index.

" + } + }, + "documentation":"

Represents the output of a AttachToIndex response operation.

" + }, + "BatchAttachTypedLink":{ + "type":"structure", + "required":[ + "SourceObjectReference", + "TargetObjectReference", + "TypedLinkFacet", + "Attributes" + ], + "members":{ + "SourceObjectReference":{ + "shape":"ObjectReference", + "documentation":"

Identifies the source object that the typed link will attach to.

" + }, + "TargetObjectReference":{ + "shape":"ObjectReference", + "documentation":"

Identifies the target object that the typed link will attach to.

" + }, + "TypedLinkFacet":{ + "shape":"TypedLinkSchemaAndFacetName", + "documentation":"

Identifies the typed link facet that is associated with the typed link.

" + }, + "Attributes":{ + "shape":"AttributeNameAndValueList", + "documentation":"

A set of attributes that are associated with the typed link.

" + } + }, + "documentation":"

Attaches a typed link to a specified source and target object inside a BatchRead operation. For more information, see AttachTypedLink and BatchReadRequest$Operations.

" + }, + "BatchAttachTypedLinkResponse":{ + "type":"structure", + "members":{ + "TypedLinkSpecifier":{ + "shape":"TypedLinkSpecifier", + "documentation":"

Returns a typed link specifier as output.

" + } + }, + "documentation":"

Represents the output of a AttachTypedLink response operation.

" + }, + "BatchCreateIndex":{ + "type":"structure", + "required":[ + "OrderedIndexedAttributeList", + "IsUnique" + ], + "members":{ + "OrderedIndexedAttributeList":{ + "shape":"AttributeKeyList", + "documentation":"

Specifies the attributes that should be indexed on. Currently only a single attribute is supported.

" + }, + "IsUnique":{ + "shape":"Bool", + "documentation":"

Indicates whether the attribute that is being indexed has unique values or not.

" + }, + "ParentReference":{ + "shape":"ObjectReference", + "documentation":"

A reference to the parent object that contains the index object.

" + }, + "LinkName":{ + "shape":"LinkName", + "documentation":"

The name of the link between the parent object and the index object.

" + }, + "BatchReferenceName":{ + "shape":"BatchReferenceName", + "documentation":"

The batch reference name. See Batches for more information.

" + } + }, + "documentation":"

Creates an index object inside of a BatchRead operation. For more information, see CreateIndex and BatchReadRequest$Operations.

" + }, + "BatchCreateIndexResponse":{ + "type":"structure", + "members":{ + "ObjectIdentifier":{ + "shape":"ObjectIdentifier", + "documentation":"

The ObjectIdentifier of the index created by this operation.

" + } + }, + "documentation":"

Represents the output of a CreateIndex response operation.

" }, "BatchCreateObject":{ "type":"structure", @@ -1700,7 +1834,7 @@ "documentation":"

The batch reference name. See Batches for more information.

" } }, - "documentation":"

Represents the output of a CreateObject operation.

" + "documentation":"

Represents the output of a CreateObject operation.

" }, "BatchCreateObjectResponse":{ "type":"structure", @@ -1710,7 +1844,7 @@ "documentation":"

The ID that is associated with the object.

" } }, - "documentation":"

Represents the output of a CreateObject response operation.

" + "documentation":"

Represents the output of a CreateObject response operation.

" }, "BatchDeleteObject":{ "type":"structure", @@ -1721,13 +1855,41 @@ "documentation":"

The reference that identifies the object.

" } }, - "documentation":"

Represents the output of a DeleteObject operation.

" + "documentation":"

Represents the output of a DeleteObject operation.

" }, "BatchDeleteObjectResponse":{ "type":"structure", "members":{ }, - "documentation":"

Represents the output of a DeleteObject response operation.

" + "documentation":"

Represents the output of a DeleteObject response operation.

" + }, + "BatchDetachFromIndex":{ + "type":"structure", + "required":[ + "IndexReference", + "TargetReference" + ], + "members":{ + "IndexReference":{ + "shape":"ObjectReference", + "documentation":"

A reference to the index object.

" + }, + "TargetReference":{ + "shape":"ObjectReference", + "documentation":"

A reference to the object being detached from the index.

" + } + }, + "documentation":"

Detaches the specified object from the specified index inside a BatchRead operation. For more information, see DetachFromIndex and BatchReadRequest$Operations.

" + }, + "BatchDetachFromIndexResponse":{ + "type":"structure", + "members":{ + "DetachedObjectIdentifier":{ + "shape":"ObjectIdentifier", + "documentation":"

The ObjectIdentifier of the object that was detached from the index.

" + } + }, + "documentation":"

Represents the output of a DetachFromIndex response operation.

" }, "BatchDetachObject":{ "type":"structure", @@ -1750,7 +1912,7 @@ "documentation":"

The batch reference name. See Batches for more information.

" } }, - "documentation":"

Represents the output of a DetachObject operation.

" + "documentation":"

Represents the output of a DetachObject operation.

" }, "BatchDetachObjectResponse":{ "type":"structure", @@ -1760,7 +1922,184 @@ "documentation":"

The ObjectIdentifier of the detached object.

" } }, - "documentation":"

Represents the output of a DetachObject response operation.

" + "documentation":"

Represents the output of a DetachObject response operation.

" + }, + "BatchDetachPolicy":{ + "type":"structure", + "required":[ + "PolicyReference", + "ObjectReference" + ], + "members":{ + "PolicyReference":{ + "shape":"ObjectReference", + "documentation":"

Reference that identifies the policy object.

" + }, + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

Reference that identifies the object whose policy object will be detached.

" + } + }, + "documentation":"

Detaches the specified policy from the specified directory inside a BatchRead operation. For more information, see DetachPolicy and BatchReadRequest$Operations.

" + }, + "BatchDetachPolicyResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

Represents the output of a DetachPolicy response operation.

" + }, + "BatchDetachTypedLink":{ + "type":"structure", + "required":["TypedLinkSpecifier"], + "members":{ + "TypedLinkSpecifier":{ + "shape":"TypedLinkSpecifier", + "documentation":"

Used to accept a typed link specifier as input.

" + } + }, + "documentation":"

Detaches a typed link from a specified source and target object inside a BatchRead operation. For more information, see DetachTypedLink and BatchReadRequest$Operations.

" + }, + "BatchDetachTypedLinkResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

Represents the output of a DetachTypedLink response operation.

" + }, + "BatchGetObjectInformation":{ + "type":"structure", + "required":["ObjectReference"], + "members":{ + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

A reference to the object.

" + } + }, + "documentation":"

Retrieves metadata about an object inside a BatchRead operation. For more information, see GetObjectInformation and BatchReadRequest$Operations.

" + }, + "BatchGetObjectInformationResponse":{ + "type":"structure", + "members":{ + "SchemaFacets":{ + "shape":"SchemaFacetList", + "documentation":"

The facets attached to the specified object.

" + }, + "ObjectIdentifier":{ + "shape":"ObjectIdentifier", + "documentation":"

The ObjectIdentifier of the specified object.

" + } + }, + "documentation":"

Represents the output of a GetObjectInformation response operation.

" + }, + "BatchListAttachedIndices":{ + "type":"structure", + "required":["TargetReference"], + "members":{ + "TargetReference":{ + "shape":"ObjectReference", + "documentation":"

A reference to the object that has indices attached.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of results to retrieve.

" + } + }, + "documentation":"

Lists indices attached to an object inside a BatchRead operation. For more information, see ListAttachedIndices and BatchReadRequest$Operations.

" + }, + "BatchListAttachedIndicesResponse":{ + "type":"structure", + "members":{ + "IndexAttachments":{ + "shape":"IndexAttachmentList", + "documentation":"

The indices attached to the specified object.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + }, + "documentation":"

Represents the output of a ListAttachedIndices response operation.

" + }, + "BatchListIncomingTypedLinks":{ + "type":"structure", + "required":["ObjectReference"], + "members":{ + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

The reference that identifies the object whose attributes will be listed.

" + }, + "FilterAttributeRanges":{ + "shape":"TypedLinkAttributeRangeList", + "documentation":"

Provides range filters for multiple attributes. When providing ranges to typed link selection, any inexact ranges must be specified at the end. Any attributes that do not have a range specified are presumed to match the entire range.

" + }, + "FilterTypedLink":{ + "shape":"TypedLinkSchemaAndFacetName", + "documentation":"

Filters are interpreted in the order of the attributes on the typed link facet, not the order in which they are supplied to any API calls.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of results to retrieve.

" + } + }, + "documentation":"

Returns a paginated list of all the incoming TypedLinkSpecifier information for an object inside a BatchRead operation. For more information, see ListIncomingTypedLinks and BatchReadRequest$Operations.

" + }, + "BatchListIncomingTypedLinksResponse":{ + "type":"structure", + "members":{ + "LinkSpecifiers":{ + "shape":"TypedLinkSpecifierList", + "documentation":"

Returns one or more typed link specifiers as output.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + }, + "documentation":"

Represents the output of a ListIncomingTypedLinks response operation.

" + }, + "BatchListIndex":{ + "type":"structure", + "required":["IndexReference"], + "members":{ + "RangesOnIndexedValues":{ + "shape":"ObjectAttributeRangeList", + "documentation":"

Specifies the ranges of indexed values that you want to query.

" + }, + "IndexReference":{ + "shape":"ObjectReference", + "documentation":"

The reference to the index to list.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of results to retrieve.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + }, + "documentation":"

Lists objects attached to the specified index inside a BatchRead operation. For more information, see ListIndex and BatchReadRequest$Operations.

" + }, + "BatchListIndexResponse":{ + "type":"structure", + "members":{ + "IndexAttachments":{ + "shape":"IndexAttachmentList", + "documentation":"

The objects and indexed values attached to the index.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + }, + "documentation":"

Represents the output of a ListIndex response operation.

" }, "BatchListObjectAttributes":{ "type":"structure", @@ -1783,7 +2122,7 @@ "documentation":"

Used to filter the list of object attributes that are associated with a certain facet.

" } }, - "documentation":"

Represents the output of a ListObjectAttributes operation.

" + "documentation":"

Represents the output of a ListObjectAttributes operation.

" }, "BatchListObjectAttributesResponse":{ "type":"structure", @@ -1797,7 +2136,7 @@ "documentation":"

The pagination token.

" } }, - "documentation":"

Represents the output of a ListObjectAttributes response operation.

" + "documentation":"

Represents the output of a ListObjectAttributes response operation.

" }, "BatchListObjectChildren":{ "type":"structure", @@ -1816,7 +2155,7 @@ "documentation":"

Maximum number of items to be retrieved in a single call. This is an approximate number.

" } }, - "documentation":"

Represents the output of a ListObjectChildren operation.

" + "documentation":"

Represents the output of a ListObjectChildren operation.

" }, "BatchListObjectChildrenResponse":{ "type":"structure", @@ -1830,7 +2169,180 @@ "documentation":"

The pagination token.

" } }, - "documentation":"

Represents the output of a ListObjectChildren response operation.

" + "documentation":"

Represents the output of a ListObjectChildren response operation.

" + }, + "BatchListObjectParentPaths":{ + "type":"structure", + "required":["ObjectReference"], + "members":{ + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

The reference that identifies the object whose attributes will be listed.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of results to retrieve.

" + } + }, + "documentation":"

Retrieves all available parent paths for any object type such as node, leaf node, policy node, and index node objects inside a BatchRead operation. For more information, see ListObjectParentPaths and BatchReadRequest$Operations.

" + }, + "BatchListObjectParentPathsResponse":{ + "type":"structure", + "members":{ + "PathToObjectIdentifiersList":{ + "shape":"PathToObjectIdentifiersList", + "documentation":"

Returns the path to the ObjectIdentifiers that are associated with the directory.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + }, + "documentation":"

Represents the output of a ListObjectParentPaths response operation.

" + }, + "BatchListObjectPolicies":{ + "type":"structure", + "required":["ObjectReference"], + "members":{ + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

The reference that identifies the object whose attributes will be listed.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of results to retrieve.

" + } + }, + "documentation":"

Returns policies attached to an object in pagination fashion inside a BatchRead operation. For more information, see ListObjectPolicies and BatchReadRequest$Operations.

" + }, + "BatchListObjectPoliciesResponse":{ + "type":"structure", + "members":{ + "AttachedPolicyIds":{ + "shape":"ObjectIdentifierList", + "documentation":"

A list of policy ObjectIdentifiers, that are attached to the object.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + }, + "documentation":"

Represents the output of a ListObjectPolicies response operation.

" + }, + "BatchListOutgoingTypedLinks":{ + "type":"structure", + "required":["ObjectReference"], + "members":{ + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

The reference that identifies the object whose attributes will be listed.

" + }, + "FilterAttributeRanges":{ + "shape":"TypedLinkAttributeRangeList", + "documentation":"

Provides range filters for multiple attributes. When providing ranges to typed link selection, any inexact ranges must be specified at the end. Any attributes that do not have a range specified are presumed to match the entire range.

" + }, + "FilterTypedLink":{ + "shape":"TypedLinkSchemaAndFacetName", + "documentation":"

Filters are interpreted in the order of the attributes defined on the typed link facet, not the order they are supplied to any API calls.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of results to retrieve.

" + } + }, + "documentation":"

Returns a paginated list of all the outgoing TypedLinkSpecifier information for an object inside a BatchRead operation. For more information, see ListOutgoingTypedLinks and BatchReadRequest$Operations.

" + }, + "BatchListOutgoingTypedLinksResponse":{ + "type":"structure", + "members":{ + "TypedLinkSpecifiers":{ + "shape":"TypedLinkSpecifierList", + "documentation":"

Returns a typed link specifier as output.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + }, + "documentation":"

Represents the output of a ListOutgoingTypedLinks response operation.

" + }, + "BatchListPolicyAttachments":{ + "type":"structure", + "required":["PolicyReference"], + "members":{ + "PolicyReference":{ + "shape":"ObjectReference", + "documentation":"

The reference that identifies the policy object.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of results to retrieve.

" + } + }, + "documentation":"

Returns all of the ObjectIdentifiers to which a given policy is attached inside a BatchRead operation. For more information, see ListPolicyAttachments and BatchReadRequest$Operations.

" + }, + "BatchListPolicyAttachmentsResponse":{ + "type":"structure", + "members":{ + "ObjectIdentifiers":{ + "shape":"ObjectIdentifierList", + "documentation":"

A list of ObjectIdentifiers to which the policy is attached.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + }, + "documentation":"

Represents the output of a ListPolicyAttachments response operation.

" + }, + "BatchLookupPolicy":{ + "type":"structure", + "required":["ObjectReference"], + "members":{ + "ObjectReference":{ + "shape":"ObjectReference", + "documentation":"

Reference that identifies the object whose policies will be looked up.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "MaxResults":{ + "shape":"NumberResults", + "documentation":"

The maximum number of results to retrieve.

" + } + }, + "documentation":"

Lists all policies from the root of the Directory to the object specified inside a BatchRead operation. For more information, see LookupPolicy and BatchReadRequest$Operations.

" + }, + "BatchLookupPolicyResponse":{ + "type":"structure", + "members":{ + "PolicyToPathList":{ + "shape":"PolicyToPathList", + "documentation":"

Provides list of path to policies. Policies contain PolicyId, ObjectIdentifier, and PolicyType. For more information, see Policies.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + } + }, + "documentation":"

Represents the output of a LookupPolicy response operation.

" }, "BatchOperationIndex":{"type":"integer"}, "BatchReadException":{ @@ -1855,7 +2367,14 @@ "ResourceNotFoundException", "InvalidNextTokenException", "AccessDeniedException", - "NotNodeException" + "NotNodeException", + "FacetValidationException", + "CannotListParentOfRootException", + "NotIndexException", + "NotPolicyException", + "DirectoryNotEnabledException", + "LimitExceededException", + "InternalServiceException" ] }, "BatchReadOperation":{ @@ -1868,6 +2387,42 @@ "ListObjectChildren":{ "shape":"BatchListObjectChildren", "documentation":"

Returns a paginated list of child objects that are associated with a given object.

" + }, + "ListAttachedIndices":{ + "shape":"BatchListAttachedIndices", + "documentation":"

Lists indices attached to an object.

" + }, + "ListObjectParentPaths":{ + "shape":"BatchListObjectParentPaths", + "documentation":"

Retrieves all available parent paths for any object type such as node, leaf node, policy node, and index node objects. For more information about objects, see Directory Structure.

" + }, + "GetObjectInformation":{ + "shape":"BatchGetObjectInformation", + "documentation":"

Retrieves metadata about an object.

" + }, + "ListObjectPolicies":{ + "shape":"BatchListObjectPolicies", + "documentation":"

Returns policies attached to an object in pagination fashion.

" + }, + "ListPolicyAttachments":{ + "shape":"BatchListPolicyAttachments", + "documentation":"

Returns all of the ObjectIdentifiers to which a given policy is attached.

" + }, + "LookupPolicy":{ + "shape":"BatchLookupPolicy", + "documentation":"

Lists all policies from the root of the Directory to the object specified. If there are no policies present, an empty list is returned. If policies are present, and if some objects don't have the policies attached, it returns the ObjectIdentifier for such objects. If policies are present, it returns ObjectIdentifier, policyId, and policyType. Paths that don't lead to the root from the target object are ignored. For more information, see Policies.

" + }, + "ListIndex":{ + "shape":"BatchListIndex", + "documentation":"

Lists objects attached to the specified index.

" + }, + "ListOutgoingTypedLinks":{ + "shape":"BatchListOutgoingTypedLinks", + "documentation":"

Returns a paginated list of all the outgoing TypedLinkSpecifier information for an object. It also supports filtering by typed link facet and identity attributes. For more information, see Typed link.

" + }, + "ListIncomingTypedLinks":{ + "shape":"BatchListIncomingTypedLinks", + "documentation":"

Returns a paginated list of all the incoming TypedLinkSpecifier information for an object. It also supports filtering by typed link facet and identity attributes. For more information, see Typed link.

" } }, "documentation":"

Represents the output of a BatchRead operation.

" @@ -1938,6 +2493,42 @@ "ListObjectChildren":{ "shape":"BatchListObjectChildrenResponse", "documentation":"

Returns a paginated list of child objects that are associated with a given object.

" + }, + "GetObjectInformation":{ + "shape":"BatchGetObjectInformationResponse", + "documentation":"

Retrieves metadata about an object.

" + }, + "ListAttachedIndices":{ + "shape":"BatchListAttachedIndicesResponse", + "documentation":"

Lists indices attached to an object.

" + }, + "ListObjectParentPaths":{ + "shape":"BatchListObjectParentPathsResponse", + "documentation":"

Retrieves all available parent paths for any object type such as node, leaf node, policy node, and index node objects. For more information about objects, see Directory Structure.

" + }, + "ListObjectPolicies":{ + "shape":"BatchListObjectPoliciesResponse", + "documentation":"

Returns policies attached to an object in pagination fashion.

" + }, + "ListPolicyAttachments":{ + "shape":"BatchListPolicyAttachmentsResponse", + "documentation":"

Returns all of the ObjectIdentifiers to which a given policy is attached.

" + }, + "LookupPolicy":{ + "shape":"BatchLookupPolicyResponse", + "documentation":"

Lists all policies from the root of the Directory to the object specified. If there are no policies present, an empty list is returned. If policies are present, and if some objects don't have the policies attached, it returns the ObjectIdentifier for such objects. If policies are present, it returns ObjectIdentifier, policyId, and policyType. Paths that don't lead to the root from the target object are ignored. For more information, see Policies.

" + }, + "ListIndex":{ + "shape":"BatchListIndexResponse", + "documentation":"

Lists objects attached to the specified index.

" + }, + "ListOutgoingTypedLinks":{ + "shape":"BatchListOutgoingTypedLinksResponse", + "documentation":"

Returns a paginated list of all the outgoing TypedLinkSpecifier information for an object. It also supports filtering by typed link facet and identity attributes. For more information, see Typed link.

" + }, + "ListIncomingTypedLinks":{ + "shape":"BatchListIncomingTypedLinksResponse", + "documentation":"

Returns a paginated list of all the incoming TypedLinkSpecifier information for an object. It also supports filtering by typed link facet and identity attributes. For more information, see Typed link.

" } }, "documentation":"

Represents the output of a BatchRead success response operation.

" @@ -2016,7 +2607,15 @@ "FacetValidationException", "ObjectNotDetachedException", "ResourceNotFoundException", - "AccessDeniedException" + "AccessDeniedException", + "InvalidAttachmentException", + "NotIndexException", + "IndexedAttributeMissingException", + "ObjectAlreadyDetachedException", + "NotPolicyException", + "DirectoryNotEnabledException", + "LimitExceededException", + "UnsupportedIndexTypeException" ] }, "BatchWriteOperation":{ @@ -2049,6 +2648,34 @@ "RemoveFacetFromObject":{ "shape":"BatchRemoveFacetFromObject", "documentation":"

A batch operation that removes a facet from an object.

" + }, + "AttachPolicy":{ + "shape":"BatchAttachPolicy", + "documentation":"

Attaches a policy object to a regular object. An object can have a limited number of attached policies.

" + }, + "DetachPolicy":{ + "shape":"BatchDetachPolicy", + "documentation":"

Detaches a policy from a Directory.

" + }, + "CreateIndex":{ + "shape":"BatchCreateIndex", + "documentation":"

Creates an index object. See Indexing for more information.

" + }, + "AttachToIndex":{ + "shape":"BatchAttachToIndex", + "documentation":"

Attaches the specified object to the specified index.

" + }, + "DetachFromIndex":{ + "shape":"BatchDetachFromIndex", + "documentation":"

Detaches the specified object from the specified index.

" + }, + "AttachTypedLink":{ + "shape":"BatchAttachTypedLink", + "documentation":"

Attaches a typed link to a specified source and target object. For more information, see Typed link.

" + }, + "DetachTypedLink":{ + "shape":"BatchDetachTypedLink", + "documentation":"

Detaches a typed link from a specified source and target object. For more information, see Typed link.

" } }, "documentation":"

Represents the output of a BatchWrite operation.

" @@ -2087,6 +2714,34 @@ "RemoveFacetFromObject":{ "shape":"BatchRemoveFacetFromObjectResponse", "documentation":"

The result of a batch remove facet from object operation.

" + }, + "AttachPolicy":{ + "shape":"BatchAttachPolicyResponse", + "documentation":"

Attaches a policy object to a regular object. An object can have a limited number of attached policies.

" + }, + "DetachPolicy":{ + "shape":"BatchDetachPolicyResponse", + "documentation":"

Detaches a policy from a Directory.

" + }, + "CreateIndex":{ + "shape":"BatchCreateIndexResponse", + "documentation":"

Creates an index object. See Indexing for more information.

" + }, + "AttachToIndex":{ + "shape":"BatchAttachToIndexResponse", + "documentation":"

Attaches the specified object to the specified index.

" + }, + "DetachFromIndex":{ + "shape":"BatchDetachFromIndexResponse", + "documentation":"

Detaches the specified object from the specified index.

" + }, + "AttachTypedLink":{ + "shape":"BatchAttachTypedLinkResponse", + "documentation":"

Attaches a typed link to a specified source and target object. For more information, see Typed link.

" + }, + "DetachTypedLink":{ + "shape":"BatchDetachTypedLinkResponse", + "documentation":"

Detaches a typed link from a specified source and target object. For more information, see Typed link.

" } }, "documentation":"

Represents the output of a BatchWrite response operation.

" @@ -3169,7 +3824,7 @@ }, "TargetReference":{ "shape":"ObjectReference", - "documentation":"

A reference to the object to that has indices attached.

" + "documentation":"

A reference to the object that has indices attached.

" }, "NextToken":{ "shape":"NextToken", @@ -4515,7 +5170,7 @@ }, "IdentityAttributeOrder":{ "shape":"AttributeNameList", - "documentation":"

The set of attributes that distinguish links made from this facet from each other, in the order of significance. Listing typed links can filter on the values of these attributes. See ListOutgoingTypedLinks and ListIncomingTypeLinks for details.

" + "documentation":"

The set of attributes that distinguish links made from this facet from each other, in the order of significance. Listing typed links can filter on the values of these attributes. See ListOutgoingTypedLinks and ListIncomingTypedLinks for details.

" } }, "documentation":"

Defines the typed links structure and its attributes. To create a typed link facet, use the CreateTypedLinkFacet API.

" diff --git a/botocore/data/cloudformation/2010-05-15/service-2.json b/botocore/data/cloudformation/2010-05-15/service-2.json index 7a7c1fd8..c0d1222d 100644 --- a/botocore/data/cloudformation/2010-05-15/service-2.json +++ b/botocore/data/cloudformation/2010-05-15/service-2.json @@ -75,6 +75,45 @@ ], "documentation":"

Creates a stack as specified in the template. After the call completes successfully, the stack creation starts. You can check the status of the stack via the DescribeStacks API.

" }, + "CreateStackInstances":{ + "name":"CreateStackInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateStackInstancesInput"}, + "output":{ + "shape":"CreateStackInstancesOutput", + "resultWrapper":"CreateStackInstancesResult" + }, + "errors":[ + {"shape":"StackSetNotFoundException"}, + {"shape":"OperationInProgressException"}, + {"shape":"OperationIdAlreadyExistsException"}, + {"shape":"StaleRequestException"}, + {"shape":"InvalidOperationException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Creates stack instances for the specified accounts, within the specified regions. A stack instance refers to a stack in a specific account and region. Accounts and Regions are required parameters—you must specify at least one account and one region.

" + }, + "CreateStackSet":{ + "name":"CreateStackSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateStackSetInput"}, + "output":{ + "shape":"CreateStackSetOutput", + "resultWrapper":"CreateStackSetResult" + }, + "errors":[ + {"shape":"NameAlreadyExistsException"}, + {"shape":"CreatedButModifiedException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Creates a stack set.

" + }, "DeleteChangeSet":{ "name":"DeleteChangeSet", "http":{ @@ -103,6 +142,43 @@ ], "documentation":"

Deletes a specified stack. Once the call completes successfully, stack deletion starts. Deleted stacks do not show up in the DescribeStacks API if the deletion has been completed successfully.

" }, + "DeleteStackInstances":{ + "name":"DeleteStackInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteStackInstancesInput"}, + "output":{ + "shape":"DeleteStackInstancesOutput", + "resultWrapper":"DeleteStackInstancesResult" + }, + "errors":[ + {"shape":"StackSetNotFoundException"}, + {"shape":"OperationInProgressException"}, + {"shape":"OperationIdAlreadyExistsException"}, + {"shape":"StaleRequestException"}, + {"shape":"InvalidOperationException"} + ], + "documentation":"

Deletes stack instances for the specified accounts, in the specified regions.

" + }, + "DeleteStackSet":{ + "name":"DeleteStackSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteStackSetInput"}, + "output":{ + "shape":"DeleteStackSetOutput", + "resultWrapper":"DeleteStackSetResult" + }, + "errors":[ + {"shape":"StackSetNotEmptyException"}, + {"shape":"OperationInProgressException"} + ], + "documentation":"

Deletes a stack set. Before you can delete a stack set, all of its member stack instances must be deleted. For more information about how to do this, see DeleteStackInstances.

" + }, "DescribeAccountLimits":{ "name":"DescribeAccountLimits", "http":{ @@ -145,6 +221,23 @@ }, "documentation":"

Returns all stack related events for a specified stack in reverse chronological order. For more information about a stack's event history, go to Stacks in the AWS CloudFormation User Guide.

You can list events for stacks that have failed to create or have been deleted by specifying the unique stack identifier (stack ID).

" }, + "DescribeStackInstance":{ + "name":"DescribeStackInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeStackInstanceInput"}, + "output":{ + "shape":"DescribeStackInstanceOutput", + "resultWrapper":"DescribeStackInstanceResult" + }, + "errors":[ + {"shape":"StackSetNotFoundException"}, + {"shape":"StackInstanceNotFoundException"} + ], + "documentation":"

Returns the stack instance that's associated with the specified stack set, AWS account, and region.

For a list of stack instances that are associated with a specific stack set, use ListStackInstances.

" + }, "DescribeStackResource":{ "name":"DescribeStackResource", "http":{ @@ -171,6 +264,39 @@ }, "documentation":"

Returns AWS resource descriptions for running and deleted stacks. If StackName is specified, all the associated resources that are part of the stack are returned. If PhysicalResourceId is specified, the associated resources of the stack that the resource belongs to are returned.

Only the first 100 resources will be returned. If your stack has more resources than this, you should use ListStackResources instead.

For deleted stacks, DescribeStackResources returns resource information for up to 90 days after the stack has been deleted.

You must specify either StackName or PhysicalResourceId, but not both. In addition, you can specify LogicalResourceId to filter the returned result. For more information about resources, the LogicalResourceId and PhysicalResourceId, go to the AWS CloudFormation User Guide.

A ValidationError is returned if you specify both StackName and PhysicalResourceId in the same request.

" }, + "DescribeStackSet":{ + "name":"DescribeStackSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeStackSetInput"}, + "output":{ + "shape":"DescribeStackSetOutput", + "resultWrapper":"DescribeStackSetResult" + }, + "errors":[ + {"shape":"StackSetNotFoundException"} + ], + "documentation":"

Returns the description of the specified stack set.

" + }, + "DescribeStackSetOperation":{ + "name":"DescribeStackSetOperation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeStackSetOperationInput"}, + "output":{ + "shape":"DescribeStackSetOperationOutput", + "resultWrapper":"DescribeStackSetOperationResult" + }, + "errors":[ + {"shape":"StackSetNotFoundException"}, + {"shape":"OperationNotFoundException"} + ], + "documentation":"

Returns the description of the specified stack set operation.

" + }, "DescribeStacks":{ "name":"DescribeStacks", "http":{ @@ -256,6 +382,9 @@ "shape":"GetTemplateSummaryOutput", "resultWrapper":"GetTemplateSummaryResult" }, + "errors":[ + {"shape":"StackSetNotFoundException"} + ], "documentation":"

Returns information about a new or existing template. The GetTemplateSummary action is useful for viewing parameter information, such as default parameter values and parameter types, before you create or update a stack.

You can use the GetTemplateSummary action when you submit a template, or you can get template information for a running or deleted stack.

For deleted stacks, GetTemplateSummary returns the template information for up to 90 days after the stack has been deleted. If the template does not exist, a ValidationError is returned.

" }, "ListChangeSets":{ @@ -297,6 +426,22 @@ }, "documentation":"

Lists all stacks that are importing an exported output value. To modify or remove an exported output value, first use this action to see which stacks are using it. To see the exported output values in your account, see ListExports.

For more information about importing an exported output value, see the Fn::ImportValue function.

" }, + "ListStackInstances":{ + "name":"ListStackInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListStackInstancesInput"}, + "output":{ + "shape":"ListStackInstancesOutput", + "resultWrapper":"ListStackInstancesResult" + }, + "errors":[ + {"shape":"StackSetNotFoundException"} + ], + "documentation":"

Returns summary information about stack instances that are associated with the specified stack set. You can filter for stack instances that are associated with a specific AWS account name or region.

" + }, "ListStackResources":{ "name":"ListStackResources", "http":{ @@ -310,6 +455,52 @@ }, "documentation":"

Returns descriptions of all resources of the specified stack.

For deleted stacks, ListStackResources returns resource information for up to 90 days after the stack has been deleted.

" }, + "ListStackSetOperationResults":{ + "name":"ListStackSetOperationResults", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListStackSetOperationResultsInput"}, + "output":{ + "shape":"ListStackSetOperationResultsOutput", + "resultWrapper":"ListStackSetOperationResultsResult" + }, + "errors":[ + {"shape":"StackSetNotFoundException"}, + {"shape":"OperationNotFoundException"} + ], + "documentation":"

Returns summary information about the results of a stack set operation.

" + }, + "ListStackSetOperations":{ + "name":"ListStackSetOperations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListStackSetOperationsInput"}, + "output":{ + "shape":"ListStackSetOperationsOutput", + "resultWrapper":"ListStackSetOperationsResult" + }, + "errors":[ + {"shape":"StackSetNotFoundException"} + ], + "documentation":"

Returns summary information about operations performed on a stack set.

" + }, + "ListStackSets":{ + "name":"ListStackSets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListStackSetsInput"}, + "output":{ + "shape":"ListStackSetsOutput", + "resultWrapper":"ListStackSetsResult" + }, + "documentation":"

Returns summary information about stack sets that are associated with the user.

" + }, "ListStacks":{ "name":"ListStacks", "http":{ @@ -341,6 +532,24 @@ "input":{"shape":"SignalResourceInput"}, "documentation":"

Sends a signal to the specified resource with a success or failure status. You can use the SignalResource API in conjunction with a creation policy or update policy. AWS CloudFormation doesn't proceed with a stack creation or update until resources receive the required number of signals or the timeout period is exceeded. The SignalResource API is useful in cases where you want to send signals from anywhere other than an Amazon EC2 instance.

" }, + "StopStackSetOperation":{ + "name":"StopStackSetOperation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopStackSetOperationInput"}, + "output":{ + "shape":"StopStackSetOperationOutput", + "resultWrapper":"StopStackSetOperationResult" + }, + "errors":[ + {"shape":"StackSetNotFoundException"}, + {"shape":"OperationNotFoundException"}, + {"shape":"InvalidOperationException"} + ], + "documentation":"

Stops an in-progress operation on a stack set and its associated stack instances.

" + }, "UpdateStack":{ "name":"UpdateStack", "http":{ @@ -358,6 +567,26 @@ ], "documentation":"

Updates a stack as specified in the template. After the call completes successfully, the stack update starts. You can check the status of the stack via the DescribeStacks action.

To get a copy of the template for an existing stack, you can use the GetTemplate action.

For more information about creating an update template, updating a stack, and monitoring the progress of the update, see Updating a Stack.

" }, + "UpdateStackSet":{ + "name":"UpdateStackSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateStackSetInput"}, + "output":{ + "shape":"UpdateStackSetOutput", + "resultWrapper":"UpdateStackSetResult" + }, + "errors":[ + {"shape":"StackSetNotFoundException"}, + {"shape":"OperationInProgressException"}, + {"shape":"OperationIdAlreadyExistsException"}, + {"shape":"StaleRequestException"}, + {"shape":"InvalidOperationException"} + ], + "documentation":"

Updates the stack set and all associated stack instances.

Even if the stack set operation created by updating the stack set fails (completely or partially, below or above a specified failure tolerance), the stack set is updated with your changes. Subsequent CreateStackInstances calls on the specified stack set use the updated stack set.

" + }, "ValidateTemplate":{ "name":"ValidateTemplate", "http":{ @@ -373,6 +602,33 @@ } }, "shapes":{ + "Account":{ + "type":"string", + "pattern":"[0-9]{12}" + }, + "AccountGateResult":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"AccountGateStatus", + "documentation":"

The status of the account gate function.

" + }, + "StatusReason":{ + "shape":"AccountGateStatusReason", + "documentation":"

The reason for the account gate status assigned to this account.

" + } + }, + "documentation":"

Structure that contains the results of the account gate function AWS CloudFormation StackSets invokes, if present, before proceeding with stack set operations in an account.

Account gating enables you to specify a Lamdba function for an account that encapsulates any requirements that must be met before AWS CloudFormation StackSets proceeds with stack set operations in that account. CloudFormation invokes the function each time stack set operations are initiated for that account, and only proceeds if the function returns a success code.

" + }, + "AccountGateStatus":{ + "type":"string", + "enum":[ + "SUCCEEDED", + "FAILED", + "SKIPPED" + ] + }, + "AccountGateStatusReason":{"type":"string"}, "AccountLimit":{ "type":"structure", "members":{ @@ -391,6 +647,10 @@ "type":"list", "member":{"shape":"AccountLimit"} }, + "AccountList":{ + "type":"list", + "member":{"shape":"Account"} + }, "AllowedValue":{"type":"string"}, "AllowedValues":{ "type":"list", @@ -400,7 +660,7 @@ "type":"structure", "members":{ }, - "documentation":"

Resource with the name requested already exists.

", + "documentation":"

The resource with the name requested already exists.

", "error":{ "code":"AlreadyExistsException", "httpStatusCode":400, @@ -573,7 +833,7 @@ "type":"string", "max":128, "min":1, - "pattern":"[a-zA-Z][-a-zA-Z0-9]*" + "pattern":"[a-zA-Z0-9][-a-zA-Z0-9]*" }, "ClientToken":{ "type":"string", @@ -594,7 +854,7 @@ }, "ResourcesToSkip":{ "shape":"ResourcesToSkip", - "documentation":"

A list of the logical IDs of the resources that AWS CloudFormation skips during the continue update rollback operation. You can specify only resources that are in the UPDATE_FAILED state because a rollback failed. You can't specify resources that are in the UPDATE_FAILED state for other reasons, for example, because an update was canceled. To check why a resource update failed, use the DescribeStackResources action, and view the resource status reason.

Specify this property to skip rolling back resources that AWS CloudFormation can't successfully roll back. We recommend that you troubleshoot resources before skipping them. AWS CloudFormation sets the status of the specified resources to UPDATE_COMPLETE and continues to roll back the stack. After the rollback is complete, the state of the skipped resources will be inconsistent with the state of the resources in the stack template. Before performing another stack update, you must update the stack or resources to be consistent with each other. If you don't, subsequent stack updates might fail, and the stack will become unrecoverable.

Specify the minimum number of resources required to successfully roll back your stack. For example, a failed resource update might cause dependent resources to fail. In this case, it might not be necessary to skip the dependent resources.

To specify resources in a nested stack, use the following format: NestedStackName.ResourceLogicalID. If the ResourceLogicalID is a stack resource (Type: AWS::CloudFormation::Stack), it must be in one of the following states: DELETE_IN_PROGRESS, DELETE_COMPLETE, or DELETE_FAILED.

" + "documentation":"

A list of the logical IDs of the resources that AWS CloudFormation skips during the continue update rollback operation. You can specify only resources that are in the UPDATE_FAILED state because a rollback failed. You can't specify resources that are in the UPDATE_FAILED state for other reasons, for example, because an update was cancelled. To check why a resource update failed, use the DescribeStackResources action, and view the resource status reason.

Specify this property to skip rolling back resources that AWS CloudFormation can't successfully roll back. We recommend that you troubleshoot resources before skipping them. AWS CloudFormation sets the status of the specified resources to UPDATE_COMPLETE and continues to roll back the stack. After the rollback is complete, the state of the skipped resources will be inconsistent with the state of the resources in the stack template. Before performing another stack update, you must update the stack or resources to be consistent with each other. If you don't, subsequent stack updates might fail, and the stack will become unrecoverable.

Specify the minimum number of resources required to successfully roll back your stack. For example, a failed resource update might cause dependent resources to fail. In this case, it might not be necessary to skip the dependent resources.

To skip resources that are part of nested stacks, use the following format: NestedStackName.ResourceLogicalID. If you want to specify the logical ID of a stack resource (Type: AWS::CloudFormation::Stack) in the ResourcesToSkip list, then its corresponding embedded stack must be in one of the following states: DELETE_IN_PROGRESS, DELETE_COMPLETE, or DELETE_FAILED.

Don't confuse a child stack's name with its corresponding logical ID defined in the parent stack. For an example of a continue update rollback operation with nested stacks, see Using ResourcesToSkip to recover a nested stacks hierarchy.

" }, "ClientRequestToken":{ "shape":"ClientRequestToken", @@ -654,7 +914,7 @@ }, "Tags":{ "shape":"Tags", - "documentation":"

Key-value pairs to associate with this stack. AWS CloudFormation also propagates these tags to resources in the stack. You can specify a maximum of 10 tags.

" + "documentation":"

Key-value pairs to associate with this stack. AWS CloudFormation also propagates these tags to resources in the stack. You can specify a maximum of 50 tags.

" }, "ChangeSetName":{ "shape":"ChangeSetName", @@ -747,15 +1007,55 @@ }, "Tags":{ "shape":"Tags", - "documentation":"

Key-value pairs to associate with this stack. AWS CloudFormation also propagates these tags to the resources created in the stack. A maximum number of 10 tags can be specified.

" + "documentation":"

Key-value pairs to associate with this stack. AWS CloudFormation also propagates these tags to the resources created in the stack. A maximum number of 50 tags can be specified.

" }, "ClientRequestToken":{ "shape":"ClientRequestToken", - "documentation":"

A unique identifier for this CreateStack request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to create a stack with the same name. You might retry CreateStack requests to ensure that AWS CloudFormation successfully received them.

" + "documentation":"

A unique identifier for this CreateStack request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to create a stack with the same name. You might retry CreateStack requests to ensure that AWS CloudFormation successfully received them.

All events triggered by a given stack operation are assigned the same client request token, which you can use to track operations. For example, if you execute a CreateStack operation with the token token1, then all the StackEvents generated by that operation will have ClientRequestToken set as token1.

In the console, stack operations display the client request token on the Events tab. Stack operations that are initiated from the console use the token format Console-StackOperation-ID, which helps you easily identify the stack operation . For example, if you create a stack using the console, each stack event would be assigned the same token in the following format: Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002.

" } }, "documentation":"

The input for CreateStack action.

" }, + "CreateStackInstancesInput":{ + "type":"structure", + "required":[ + "StackSetName", + "Accounts", + "Regions" + ], + "members":{ + "StackSetName":{ + "shape":"StackSetName", + "documentation":"

The name or unique ID of the stack set that you want to create stack instances from.

" + }, + "Accounts":{ + "shape":"AccountList", + "documentation":"

The names of one or more AWS accounts that you want to create stack instances in the specified region(s) for.

" + }, + "Regions":{ + "shape":"RegionList", + "documentation":"

The names of one or more regions where you want to create stack instances using the specified AWS account(s).

" + }, + "OperationPreferences":{ + "shape":"StackSetOperationPreferences", + "documentation":"

Preferences for how AWS CloudFormation performs this stack set operation.

" + }, + "OperationId":{ + "shape":"ClientRequestToken", + "documentation":"

The unique identifier for this stack set operation.

The operation ID also functions as an idempotency token, to ensure that AWS CloudFormation performs the stack set operation only once, even if you retry the request multiple times. You might retry stack set operation requests to ensure that AWS CloudFormation successfully received them.

If you don't specify an operation ID, the SDK generates one automatically.

Repeating this stack set operation with a new operation ID retries all stack instances whose status is OUTDATED.

", + "idempotencyToken":true + } + } + }, + "CreateStackInstancesOutput":{ + "type":"structure", + "members":{ + "OperationId":{ + "shape":"ClientRequestToken", + "documentation":"

The unique identifier for this stack set operation.

" + } + } + }, "CreateStackOutput":{ "type":"structure", "members":{ @@ -766,6 +1066,66 @@ }, "documentation":"

The output for a CreateStack action.

" }, + "CreateStackSetInput":{ + "type":"structure", + "required":["StackSetName"], + "members":{ + "StackSetName":{ + "shape":"StackSetName", + "documentation":"

The name to associate with the stack set. The name must be unique in the region where you create your stack set.

A stack name can contain only alphanumeric characters (case-sensitive) and hyphens. It must start with an alphabetic character and can't be longer than 128 characters.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

A description of the stack set. You can use the description to identify the stack set's purpose or other important information.

" + }, + "TemplateBody":{ + "shape":"TemplateBody", + "documentation":"

The structure that contains the template body, with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, see Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.

" + }, + "TemplateURL":{ + "shape":"TemplateURL", + "documentation":"

The location of the file that contains the template body. The URL must point to a template (maximum size: 460,800 bytes) that's located in an Amazon S3 bucket. For more information, see Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.

" + }, + "Parameters":{ + "shape":"Parameters", + "documentation":"

The input parameters for the stack set template.

" + }, + "Capabilities":{ + "shape":"Capabilities", + "documentation":"

A list of values that you must specify before AWS CloudFormation can create certain stack sets. Some stack set templates might include resources that can affect permissions in your AWS account—for example, by creating new AWS Identity and Access Management (IAM) users. For those stack sets, you must explicitly acknowledge their capabilities by specifying this parameter.

The only valid values are CAPABILITY_IAM and CAPABILITY_NAMED_IAM. The following resources require you to specify this parameter:

If your stack template contains these resources, we recommend that you review all permissions that are associated with them and edit their permissions if necessary.

If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify this parameter, this action returns an InsufficientCapabilities error.

For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates.

" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

The key-value pairs to associate with this stack set and the stacks created from it. AWS CloudFormation also propagates these tags to supported resources that are created in the stacks. A maximum number of 50 tags can be specified.

If you specify tags as part of a CreateStackSet action, AWS CloudFormation checks to see if you have the required IAM permission to tag resources. If you don't, the entire CreateStackSet action fails with an access denied error, and the stack set is not created.

" + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

A unique identifier for this CreateStackSet request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to create another stack set with the same name. You might retry CreateStackSet requests to ensure that AWS CloudFormation successfully received them.

If you don't specify an operation ID, the SDK generates one automatically.

", + "idempotencyToken":true + } + } + }, + "CreateStackSetOutput":{ + "type":"structure", + "members":{ + "StackSetId":{ + "shape":"StackSetId", + "documentation":"

The ID of the stack set that you're creating.

" + } + } + }, + "CreatedButModifiedException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The specified resource exists, but has been changed.

", + "error":{ + "code":"CreatedButModifiedException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, "CreationTime":{"type":"timestamp"}, "DeleteChangeSetInput":{ "type":"structure", @@ -806,11 +1166,71 @@ }, "ClientRequestToken":{ "shape":"ClientRequestToken", - "documentation":"

A unique identifier for this DeleteStack request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to delete a stack with the same name. You might retry DeleteStack requests to ensure that AWS CloudFormation successfully received them.

" + "documentation":"

A unique identifier for this DeleteStack request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to delete a stack with the same name. You might retry DeleteStack requests to ensure that AWS CloudFormation successfully received them.

All events triggered by a given stack operation are assigned the same client request token, which you can use to track operations. For example, if you execute a CreateStack operation with the token token1, then all the StackEvents generated by that operation will have ClientRequestToken set as token1.

In the console, stack operations display the client request token on the Events tab. Stack operations that are initiated from the console use the token format Console-StackOperation-ID, which helps you easily identify the stack operation . For example, if you create a stack using the console, each stack event would be assigned the same token in the following format: Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002.

" } }, "documentation":"

The input for DeleteStack action.

" }, + "DeleteStackInstancesInput":{ + "type":"structure", + "required":[ + "StackSetName", + "Accounts", + "Regions", + "RetainStacks" + ], + "members":{ + "StackSetName":{ + "shape":"StackSetName", + "documentation":"

The name or unique ID of the stack set that you want to delete stack instances for.

" + }, + "Accounts":{ + "shape":"AccountList", + "documentation":"

The names of the AWS accounts that you want to delete stack instances for.

" + }, + "Regions":{ + "shape":"RegionList", + "documentation":"

The regions where you want to delete stack set instances.

" + }, + "OperationPreferences":{ + "shape":"StackSetOperationPreferences", + "documentation":"

Preferences for how AWS CloudFormation performs this stack set operation.

" + }, + "RetainStacks":{ + "shape":"RetainStacks", + "documentation":"

Removes the stack instances from the specified stack set, but doesn't delete the stacks. You can't reassociate a retained stack or add an existing, saved stack to a new stack set.

" + }, + "OperationId":{ + "shape":"ClientRequestToken", + "documentation":"

The unique identifier for this stack set operation.

If you don't specify an operation ID, the SDK generates one automatically.

The operation ID also functions as an idempotency token, to ensure that AWS CloudFormation performs the stack set operation only once, even if you retry the request multiple times. You can retry stack set operation requests to ensure that AWS CloudFormation successfully received them.

Repeating this stack set operation with a new operation ID retries all stack instances whose status is OUTDATED.

", + "idempotencyToken":true + } + } + }, + "DeleteStackInstancesOutput":{ + "type":"structure", + "members":{ + "OperationId":{ + "shape":"ClientRequestToken", + "documentation":"

The unique identifier for this stack set operation.

" + } + } + }, + "DeleteStackSetInput":{ + "type":"structure", + "required":["StackSetName"], + "members":{ + "StackSetName":{ + "shape":"StackSetName", + "documentation":"

The name or unique ID of the stack set that you're deleting. You can obtain this value by running ListStackSets.

" + } + } + }, + "DeleteStackSetOutput":{ + "type":"structure", + "members":{ + } + }, "DeletionTime":{"type":"timestamp"}, "DescribeAccountLimitsInput":{ "type":"structure", @@ -949,6 +1369,37 @@ }, "documentation":"

The output for a DescribeStackEvents action.

" }, + "DescribeStackInstanceInput":{ + "type":"structure", + "required":[ + "StackSetName", + "StackInstanceAccount", + "StackInstanceRegion" + ], + "members":{ + "StackSetName":{ + "shape":"StackSetName", + "documentation":"

The name or the unique stack ID of the stack set that you want to get stack instance information for.

" + }, + "StackInstanceAccount":{ + "shape":"Account", + "documentation":"

The ID of an AWS account that's associated with this stack instance.

" + }, + "StackInstanceRegion":{ + "shape":"Region", + "documentation":"

The name of a region that's associated with this stack instance.

" + } + } + }, + "DescribeStackInstanceOutput":{ + "type":"structure", + "members":{ + "StackInstance":{ + "shape":"StackInstance", + "documentation":"

The stack instance that matches the specified request parameters.

" + } + } + }, "DescribeStackResourceInput":{ "type":"structure", "required":[ @@ -1005,6 +1456,51 @@ }, "documentation":"

The output for a DescribeStackResources action.

" }, + "DescribeStackSetInput":{ + "type":"structure", + "required":["StackSetName"], + "members":{ + "StackSetName":{ + "shape":"StackSetName", + "documentation":"

The name or unique ID of the stack set whose description you want.

" + } + } + }, + "DescribeStackSetOperationInput":{ + "type":"structure", + "required":[ + "StackSetName", + "OperationId" + ], + "members":{ + "StackSetName":{ + "shape":"StackSetName", + "documentation":"

The name or the unique stack ID of the stack set for the stack operation.

" + }, + "OperationId":{ + "shape":"ClientRequestToken", + "documentation":"

The unique ID of the stack set operation.

" + } + } + }, + "DescribeStackSetOperationOutput":{ + "type":"structure", + "members":{ + "StackSetOperation":{ + "shape":"StackSetOperation", + "documentation":"

The specified stack set operation.

" + } + } + }, + "DescribeStackSetOutput":{ + "type":"structure", + "members":{ + "StackSet":{ + "shape":"StackSet", + "documentation":"

The specified stack set.

" + } + } + }, "DescribeStacksInput":{ "type":"structure", "members":{ @@ -1135,6 +1631,15 @@ "type":"list", "member":{"shape":"Export"} }, + "FailureToleranceCount":{ + "type":"integer", + "min":0 + }, + "FailureTolerancePercentage":{ + "type":"integer", + "max":100, + "min":0 + }, "GetStackPolicyInput":{ "type":"structure", "required":["StackName"], @@ -1202,6 +1707,10 @@ "StackName":{ "shape":"StackNameOrId", "documentation":"

The name or the stack ID that is associated with the stack, which are not always interchangeable. For running stacks, you can specify either the stack's name or its unique stack ID. For deleted stack, you must specify the unique stack ID.

Conditional: You must specify only one of the following parameters: StackName, TemplateBody, or TemplateURL.

" + }, + "StackSetName":{ + "shape":"StackSetNameOrId", + "documentation":"

The name or unique ID of the stack set from which the stack was created.

" } }, "documentation":"

The input for the GetTemplateSummary action.

" @@ -1252,7 +1761,7 @@ "type":"structure", "members":{ }, - "documentation":"

The template contains resources with capabilities that were not specified in the Capabilities parameter.

", + "documentation":"

The template contains resources with capabilities that weren't specified in the Capabilities parameter.

", "error":{ "code":"InsufficientCapabilitiesException", "httpStatusCode":400, @@ -1264,7 +1773,7 @@ "type":"structure", "members":{ }, - "documentation":"

The specified change set cannot be used to update the stack. For example, the change set status might be CREATE_IN_PROGRESS or the stack status might be UPDATE_IN_PROGRESS.

", + "documentation":"

The specified change set can't be used to update the stack. For example, the change set status might be CREATE_IN_PROGRESS, or the stack status might be UPDATE_IN_PROGRESS.

", "error":{ "code":"InvalidChangeSetStatus", "httpStatusCode":400, @@ -1272,12 +1781,24 @@ }, "exception":true }, + "InvalidOperationException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The specified operation isn't valid.

", + "error":{ + "code":"InvalidOperationException", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "LastUpdatedTime":{"type":"timestamp"}, "LimitExceededException":{ "type":"structure", "members":{ }, - "documentation":"

Quota for the resource has already been reached.

", + "documentation":"

The quota for the resource has already been reached.

", "error":{ "code":"LimitExceededException", "httpStatusCode":400, @@ -1365,6 +1886,45 @@ } } }, + "ListStackInstancesInput":{ + "type":"structure", + "required":["StackSetName"], + "members":{ + "StackSetName":{ + "shape":"StackSetName", + "documentation":"

The name or unique ID of the stack set that you want to list stack instances for.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the previous request didn't return all of the remaining results, the response's NextToken parameter value is set to a token. To retrieve the next set of results, call ListStackInstances again and assign that token to the request object's NextToken parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.

" + }, + "StackInstanceAccount":{ + "shape":"Account", + "documentation":"

The name of the AWS account that you want to list stack instances for.

" + }, + "StackInstanceRegion":{ + "shape":"Region", + "documentation":"

The name of the region where you want to list stack instances.

" + } + } + }, + "ListStackInstancesOutput":{ + "type":"structure", + "members":{ + "Summaries":{ + "shape":"StackInstanceSummaries", + "documentation":"

A list of StackInstanceSummary structures that contain information about the specified stack instances.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the request doesn't return all of the remaining results, NextToken is set to a token. To retrieve the next set of results, call ListStackInstances again and assign that token to the request object's NextToken parameter. If the request returns all results, NextToken is set to null.

" + } + } + }, "ListStackResourcesInput":{ "type":"structure", "required":["StackName"], @@ -1394,6 +1954,105 @@ }, "documentation":"

The output for a ListStackResources action.

" }, + "ListStackSetOperationResultsInput":{ + "type":"structure", + "required":[ + "StackSetName", + "OperationId" + ], + "members":{ + "StackSetName":{ + "shape":"StackSetName", + "documentation":"

The name or unique ID of the stack set that you want to get operation results for.

" + }, + "OperationId":{ + "shape":"ClientRequestToken", + "documentation":"

The ID of the stack set operation.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the previous request didn't return all of the remaining results, the response object's NextToken parameter value is set to a token. To retrieve the next set of results, call ListStackSetOperationResults again and assign that token to the request object's NextToken parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.

" + } + } + }, + "ListStackSetOperationResultsOutput":{ + "type":"structure", + "members":{ + "Summaries":{ + "shape":"StackSetOperationResultSummaries", + "documentation":"

A list of StackSetOperationResultSummary structures that contain information about the specified operation results, for accounts and regions that are included in the operation.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the request doesn't return all results, NextToken is set to a token. To retrieve the next set of results, call ListOperationResults again and assign that token to the request object's NextToken parameter. If there are no remaining results, NextToken is set to null.

" + } + } + }, + "ListStackSetOperationsInput":{ + "type":"structure", + "required":["StackSetName"], + "members":{ + "StackSetName":{ + "shape":"StackSetName", + "documentation":"

The name or unique ID of the stack set that you want to get operation summaries for.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the previous paginated request didn't return all of the remaining results, the response object's NextToken parameter value is set to a token. To retrieve the next set of results, call ListStackSetOperations again and assign that token to the request object's NextToken parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.

" + } + } + }, + "ListStackSetOperationsOutput":{ + "type":"structure", + "members":{ + "Summaries":{ + "shape":"StackSetOperationSummaries", + "documentation":"

A list of StackSetOperationSummary structures that contain summary information about operations for the specified stack set.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the request doesn't return all results, NextToken is set to a token. To retrieve the next set of results, call ListOperationResults again and assign that token to the request object's NextToken parameter. If there are no remaining results, NextToken is set to null.

" + } + } + }, + "ListStackSetsInput":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the previous paginated request didn't return all of the remaining results, the response object's NextToken parameter value is set to a token. To retrieve the next set of results, call ListStackSets again and assign that token to the request object's NextToken parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.

" + }, + "Status":{ + "shape":"StackSetStatus", + "documentation":"

The status of the stack sets that you want to get summary information about.

" + } + } + }, + "ListStackSetsOutput":{ + "type":"structure", + "members":{ + "Summaries":{ + "shape":"StackSetSummaries", + "documentation":"

A list of StackSetSummary structures that contain information about the user's stack sets.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the request doesn't return all of the remaining results, NextToken is set to a token. To retrieve the next set of results, call ListStackInstances again and assign that token to the request object's NextToken parameter. If the request returns all results, NextToken is set to null.

" + } + } + }, "ListStacksInput":{ "type":"structure", "members":{ @@ -1423,7 +2082,33 @@ "documentation":"

The output for ListStacks action.

" }, "LogicalResourceId":{"type":"string"}, + "MaxConcurrentCount":{ + "type":"integer", + "min":1 + }, + "MaxConcurrentPercentage":{ + "type":"integer", + "max":100, + "min":1 + }, + "MaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, "Metadata":{"type":"string"}, + "NameAlreadyExistsException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The specified name is already in use.

", + "error":{ + "code":"NameAlreadyExistsException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, "NextToken":{ "type":"string", "max":1024, @@ -1444,6 +2129,42 @@ "DELETE" ] }, + "OperationIdAlreadyExistsException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The specified operation ID already exists.

", + "error":{ + "code":"OperationIdAlreadyExistsException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "OperationInProgressException":{ + "type":"structure", + "members":{ + }, + "documentation":"

Another operation is currently in progress for this stack set. Only one operation can be performed for a stack set at a given time.

", + "error":{ + "code":"OperationInProgressException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "OperationNotFoundException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The specified ID refers to an operation that doesn't exist.

", + "error":{ + "code":"OperationNotFoundException", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, "Output":{ "type":"structure", "members":{ @@ -1458,6 +2179,10 @@ "Description":{ "shape":"Description", "documentation":"

User defined description associated with the output.

" + }, + "ExportName":{ + "shape":"ExportName", + "documentation":"

The name of the export associated with the output.

" } }, "documentation":"

The Output data type.

" @@ -1539,6 +2264,12 @@ }, "PhysicalResourceId":{"type":"string"}, "PropertyName":{"type":"string"}, + "Reason":{"type":"string"}, + "Region":{"type":"string"}, + "RegionList":{ + "type":"list", + "member":{"shape":"Region"} + }, "Replacement":{ "type":"string", "enum":[ @@ -1694,6 +2425,8 @@ "type":"list", "member":{"shape":"LogicalResourceId"} }, + "RetainStacks":{"type":"boolean"}, + "RetainStacksNullable":{"type":"boolean"}, "RoleARN":{ "type":"string", "max":2048, @@ -1876,7 +2609,7 @@ }, "ClientRequestToken":{ "shape":"ClientRequestToken", - "documentation":"

The token passed to the operation that generated this event.

For example, if you execute a CreateStack operation with the token token1, then all the StackEvents generated by that operation will have ClientRequestToken set as token1.

" + "documentation":"

The token passed to the operation that generated this event.

All events triggered by a given stack operation are assigned the same client request token, which you can use to track operations. For example, if you execute a CreateStack operation with the token token1, then all the StackEvents generated by that operation will have ClientRequestToken set as token1.

In the console, stack operations display the client request token on the Events tab. Stack operations that are initiated from the console use the token format Console-StackOperation-ID, which helps you easily identify the stack operation . For example, if you create a stack using the console, each stack event would be assigned the same token in the following format: Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002.

" } }, "documentation":"

The StackEvent data type.

" @@ -1886,6 +2619,90 @@ "member":{"shape":"StackEvent"} }, "StackId":{"type":"string"}, + "StackInstance":{ + "type":"structure", + "members":{ + "StackSetId":{ + "shape":"StackSetId", + "documentation":"

The name or unique ID of the stack set that the stack instance is associated with.

" + }, + "Region":{ + "shape":"Region", + "documentation":"

The name of the AWS region that the stack instance is associated with.

" + }, + "Account":{ + "shape":"Account", + "documentation":"

The name of the AWS account that the stack instance is associated with.

" + }, + "StackId":{ + "shape":"StackId", + "documentation":"

The ID of the stack instance.

" + }, + "Status":{ + "shape":"StackInstanceStatus", + "documentation":"

The status of the stack instance, in terms of its synchronization with its associated stack set.

" + }, + "StatusReason":{ + "shape":"Reason", + "documentation":"

The explanation for the specific status code that is assigned to this stack instance.

" + } + }, + "documentation":"

An AWS CloudFormation stack, in a specific account and region, that's part of a stack set operation. A stack instance is a reference to an attempted or actual stack in a given account within a given region. A stack instance can exist without a stack—for example, if the stack couldn't be created for some reason. A stack instance is associated with only one stack set. Each stack instance contains the ID of its associated stack set, as well as the ID of the actual stack and the stack status.

" + }, + "StackInstanceNotFoundException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The specified stack instance doesn't exist.

", + "error":{ + "code":"StackInstanceNotFoundException", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "StackInstanceStatus":{ + "type":"string", + "enum":[ + "CURRENT", + "OUTDATED", + "INOPERABLE" + ] + }, + "StackInstanceSummaries":{ + "type":"list", + "member":{"shape":"StackInstanceSummary"} + }, + "StackInstanceSummary":{ + "type":"structure", + "members":{ + "StackSetId":{ + "shape":"StackSetId", + "documentation":"

The name or unique ID of the stack set that the stack instance is associated with.

" + }, + "Region":{ + "shape":"Region", + "documentation":"

The name of the AWS region that the stack instance is associated with.

" + }, + "Account":{ + "shape":"Account", + "documentation":"

The name of the AWS account that the stack instance is associated with.

" + }, + "StackId":{ + "shape":"StackId", + "documentation":"

The ID of the stack instance.

" + }, + "Status":{ + "shape":"StackInstanceStatus", + "documentation":"

The status of the stack instance, in terms of its synchronization with its associated stack set.

" + }, + "StatusReason":{ + "shape":"Reason", + "documentation":"

The explanation for the specific status code assigned to this stack instance.

" + } + }, + "documentation":"

The structure that contains summary information about a stack instance.

" + }, "StackName":{"type":"string"}, "StackNameOrId":{ "type":"string", @@ -2056,6 +2873,260 @@ "type":"list", "member":{"shape":"StackResource"} }, + "StackSet":{ + "type":"structure", + "members":{ + "StackSetName":{ + "shape":"StackSetName", + "documentation":"

The name that's associated with the stack set.

" + }, + "StackSetId":{ + "shape":"StackSetId", + "documentation":"

The ID of the stack set.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

A description of the stack set that you specify when the stack set is created or updated.

" + }, + "Status":{ + "shape":"StackSetStatus", + "documentation":"

The status of the stack set.

" + }, + "TemplateBody":{ + "shape":"TemplateBody", + "documentation":"

The structure that contains the body of the template that was used to create or update the stack set.

" + }, + "Parameters":{ + "shape":"Parameters", + "documentation":"

A list of input parameters for a stack set.

" + }, + "Capabilities":{ + "shape":"Capabilities", + "documentation":"

The capabilities that are allowed in the stack set. Some stack set templates might include resources that can affect permissions in your AWS account—for example, by creating new AWS Identity and Access Management (IAM) users. For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates.

" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

A list of tags that specify information about the stack set. A maximum number of 50 tags can be specified.

" + } + }, + "documentation":"

A structure that contains information about a stack set. A stack set enables you to provision stacks into AWS accounts and across regions by using a single CloudFormation template. In the stack set, you specify the template to use, as well as any parameters and capabilities that the template requires.

" + }, + "StackSetId":{"type":"string"}, + "StackSetName":{"type":"string"}, + "StackSetNameOrId":{ + "type":"string", + "min":1, + "pattern":"[a-zA-Z][-a-zA-Z0-9]*" + }, + "StackSetNotEmptyException":{ + "type":"structure", + "members":{ + }, + "documentation":"

You can't yet delete this stack set, because it still contains one or more stack instances. Delete all stack instances from the stack set before deleting the stack set.

", + "error":{ + "code":"StackSetNotEmptyException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "StackSetNotFoundException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The specified stack set doesn't exist.

", + "error":{ + "code":"StackSetNotFoundException", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "StackSetOperation":{ + "type":"structure", + "members":{ + "OperationId":{ + "shape":"ClientRequestToken", + "documentation":"

The unique ID of a stack set operation.

" + }, + "StackSetId":{ + "shape":"StackSetId", + "documentation":"

The ID of the stack set.

" + }, + "Action":{ + "shape":"StackSetOperationAction", + "documentation":"

The type of stack set operation: CREATE, UPDATE, or DELETE. Create and delete operations affect only the specified stack set instances that are associated with the specified stack set. Update operations affect both the stack set itself, as well as all associated stack set instances.

" + }, + "Status":{ + "shape":"StackSetOperationStatus", + "documentation":"

The status of the operation.

" + }, + "OperationPreferences":{ + "shape":"StackSetOperationPreferences", + "documentation":"

The preferences for how AWS CloudFormation performs this stack set operation.

" + }, + "RetainStacks":{ + "shape":"RetainStacksNullable", + "documentation":"

For stack set operations of action type DELETE, specifies whether to remove the stack instances from the specified stack set, but doesn't delete the stacks. You can't reassociate a retained stack, or add an existing, saved stack to a new stack set.

" + }, + "CreationTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which the operation was initiated. Note that the creation times for the stack set operation might differ from the creation time of the individual stacks themselves. This is because AWS CloudFormation needs to perform preparatory work for the operation, such as dispatching the work to the requested regions, before actually creating the first stacks.

" + }, + "EndTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which the stack set operation ended, across all accounts and regions specified. Note that this doesn't necessarily mean that the stack set operation was successful, or even attempted, in each account or region.

" + } + }, + "documentation":"

The structure that contains information about a stack set operation.

" + }, + "StackSetOperationAction":{ + "type":"string", + "enum":[ + "CREATE", + "UPDATE", + "DELETE" + ] + }, + "StackSetOperationPreferences":{ + "type":"structure", + "members":{ + "RegionOrder":{ + "shape":"RegionList", + "documentation":"

The order of the regions in where you want to perform the stack operation.

" + }, + "FailureToleranceCount":{ + "shape":"FailureToleranceCount", + "documentation":"

The number of accounts, per region, for which this operation can fail before AWS CloudFormation stops the operation in that region. If the operation is stopped in a region, AWS CloudFormation doesn't attempt the operation in any subsequent regions.

Conditional: You must specify either FailureToleranceCount or FailureTolerancePercentage (but not both).

" + }, + "FailureTolerancePercentage":{ + "shape":"FailureTolerancePercentage", + "documentation":"

The percentage of accounts, per region, for which this stack operation can fail before AWS CloudFormation stops the operation in that region. If the operation is stopped in a region, AWS CloudFormation doesn't attempt the operation in any subsequent regions.

When calculating the number of accounts based on the specified percentage, AWS CloudFormation rounds down to the next whole number.

Conditional: You must specify either FailureToleranceCount or FailureTolerancePercentage, but not both.

" + }, + "MaxConcurrentCount":{ + "shape":"MaxConcurrentCount", + "documentation":"

The maximum number of accounts in which to perform this operation at one time. This is dependent on the value of FailureToleranceCountMaxConcurrentCount is at most one more than the FailureToleranceCount .

Conditional: You must specify either MaxConcurrentCount or MaxConcurrentPercentage, but not both.

" + }, + "MaxConcurrentPercentage":{ + "shape":"MaxConcurrentPercentage", + "documentation":"

The maximum percentage of accounts in which to perform this operation at one time.

When calculating the number of accounts based on the specified percentage, AWS CloudFormation rounds down to the next whole number. This is true except in cases where rounding down would result is zero. In this case, CloudFormation sets the number as one instead.

Conditional: You must specify either MaxConcurrentCount or MaxConcurrentPercentage, but not both.

" + } + }, + "documentation":"

The user-specified preferences for how AWS CloudFormation performs a stack set operation.

" + }, + "StackSetOperationResultStatus":{ + "type":"string", + "enum":[ + "PENDING", + "RUNNING", + "SUCCEEDED", + "FAILED", + "CANCELLED" + ] + }, + "StackSetOperationResultSummaries":{ + "type":"list", + "member":{"shape":"StackSetOperationResultSummary"} + }, + "StackSetOperationResultSummary":{ + "type":"structure", + "members":{ + "Account":{ + "shape":"Account", + "documentation":"

The name of the AWS account for this operation result.

" + }, + "Region":{ + "shape":"Region", + "documentation":"

The name of the AWS region for this operation result.

" + }, + "Status":{ + "shape":"StackSetOperationResultStatus", + "documentation":"

The result status of the stack set operation for the given account in the given region.

" + }, + "StatusReason":{ + "shape":"Reason", + "documentation":"

The reason for the assigned result status.

" + }, + "AccountGateResult":{ + "shape":"AccountGateResult", + "documentation":"

The results of the account gate function AWS CloudFormation invokes, if present, before proceeding with stack set operations in an account

" + } + }, + "documentation":"

The structure that contains information about a specified operation's results for a given account in a given region.

" + }, + "StackSetOperationStatus":{ + "type":"string", + "enum":[ + "RUNNING", + "SUCCEEDED", + "FAILED", + "STOPPING", + "STOPPED" + ] + }, + "StackSetOperationSummaries":{ + "type":"list", + "member":{"shape":"StackSetOperationSummary"} + }, + "StackSetOperationSummary":{ + "type":"structure", + "members":{ + "OperationId":{ + "shape":"ClientRequestToken", + "documentation":"

The unique ID of the stack set operation.

" + }, + "Action":{ + "shape":"StackSetOperationAction", + "documentation":"

The type of operation: CREATE, UPDATE, or DELETE. Create and delete operations affect only the specified stack instances that are associated with the specified stack set. Update operations affect both the stack set itself as well as all associated stack set instances.

" + }, + "Status":{ + "shape":"StackSetOperationStatus", + "documentation":"

The overall status of the operation.

" + }, + "CreationTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which the operation was initiated. Note that the creation times for the stack set operation might differ from the creation time of the individual stacks themselves. This is because AWS CloudFormation needs to perform preparatory work for the operation, such as dispatching the work to the requested regions, before actually creating the first stacks.

" + }, + "EndTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which the stack set operation ended, across all accounts and regions specified. Note that this doesn't necessarily mean that the stack set operation was successful, or even attempted, in each account or region.

" + } + }, + "documentation":"

The structures that contain summary information about the specified operation.

" + }, + "StackSetStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "DELETED" + ] + }, + "StackSetSummaries":{ + "type":"list", + "member":{"shape":"StackSetSummary"} + }, + "StackSetSummary":{ + "type":"structure", + "members":{ + "StackSetName":{ + "shape":"StackSetName", + "documentation":"

The name of the stack set.

" + }, + "StackSetId":{ + "shape":"StackSetId", + "documentation":"

The ID of the stack set.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

A description of the stack set that you specify when the stack set is created or updated.

" + }, + "Status":{ + "shape":"StackSetStatus", + "documentation":"

The status of the stack set.

" + } + }, + "documentation":"

The structures that contain summary information about the specified stack set.

" + }, "StackStatus":{ "type":"string", "enum":[ @@ -2138,8 +3209,46 @@ "type":"list", "member":{"shape":"TemplateStage"} }, + "StaleRequestException":{ + "type":"structure", + "members":{ + }, + "documentation":"

Another operation has been performed on this stack set since the specified operation was performed.

", + "error":{ + "code":"StaleRequestException", + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "StopStackSetOperationInput":{ + "type":"structure", + "required":[ + "StackSetName", + "OperationId" + ], + "members":{ + "StackSetName":{ + "shape":"StackSetName", + "documentation":"

The name or unique ID of the stack set that you want to stop the operation for.

" + }, + "OperationId":{ + "shape":"ClientRequestToken", + "documentation":"

The ID of the stack operation.

" + } + } + }, + "StopStackSetOperationOutput":{ + "type":"structure", + "members":{ + } + }, "Tag":{ "type":"structure", + "required":[ + "Key", + "Value" + ], "members":{ "Key":{ "shape":"TagKey", @@ -2152,11 +3261,20 @@ }, "documentation":"

The Tag type enables you to specify a key-value pair that can be used to store information about an AWS CloudFormation stack.

" }, - "TagKey":{"type":"string"}, - "TagValue":{"type":"string"}, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagValue":{ + "type":"string", + "max":256, + "min":1 + }, "Tags":{ "type":"list", - "member":{"shape":"Tag"} + "member":{"shape":"Tag"}, + "max":50 }, "TemplateBody":{ "type":"string", @@ -2281,11 +3399,11 @@ }, "Tags":{ "shape":"Tags", - "documentation":"

Key-value pairs to associate with this stack. AWS CloudFormation also propagates these tags to supported resources in the stack. You can specify a maximum number of 10 tags.

If you don't specify this parameter, AWS CloudFormation doesn't modify the stack's tags. If you specify an empty value, AWS CloudFormation removes all associated tags.

" + "documentation":"

Key-value pairs to associate with this stack. AWS CloudFormation also propagates these tags to supported resources in the stack. You can specify a maximum number of 50 tags.

If you don't specify this parameter, AWS CloudFormation doesn't modify the stack's tags. If you specify an empty value, AWS CloudFormation removes all associated tags.

" }, "ClientRequestToken":{ "shape":"ClientRequestToken", - "documentation":"

A unique identifier for this UpdateStack request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to update a stack with the same name. You might retry UpdateStack requests to ensure that AWS CloudFormation successfully received them.

" + "documentation":"

A unique identifier for this UpdateStack request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to update a stack with the same name. You might retry UpdateStack requests to ensure that AWS CloudFormation successfully received them.

All events triggered by a given stack operation are assigned the same client request token, which you can use to track operations. For example, if you execute a CreateStack operation with the token token1, then all the StackEvents generated by that operation will have ClientRequestToken set as token1.

In the console, stack operations display the client request token on the Events tab. Stack operations that are initiated from the console use the token format Console-StackOperation-ID, which helps you easily identify the stack operation . For example, if you create a stack using the console, each stack event would be assigned the same token in the following format: Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002.

" } }, "documentation":"

The input for an UpdateStack action.

" @@ -2300,6 +3418,62 @@ }, "documentation":"

The output for an UpdateStack action.

" }, + "UpdateStackSetInput":{ + "type":"structure", + "required":["StackSetName"], + "members":{ + "StackSetName":{ + "shape":"StackSetName", + "documentation":"

The name or unique ID of the stack set that you want to update.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

A brief description of updates that you are making.

" + }, + "TemplateBody":{ + "shape":"TemplateBody", + "documentation":"

The structure that contains the template body, with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, see Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must specify only one of the following parameters: TemplateBody or TemplateURL—or set UsePreviousTemplate to true.

" + }, + "TemplateURL":{ + "shape":"TemplateURL", + "documentation":"

The location of the file that contains the template body. The URL must point to a template (maximum size: 460,800 bytes) that is located in an Amazon S3 bucket. For more information, see Template Anatomy in the AWS CloudFormation User Guide.

Conditional: You must specify only one of the following parameters: TemplateBody or TemplateURL—or set UsePreviousTemplate to true.

" + }, + "UsePreviousTemplate":{ + "shape":"UsePreviousTemplate", + "documentation":"

Use the existing template that's associated with the stack set that you're updating.

Conditional: You must specify only one of the following parameters: TemplateBody or TemplateURL—or set UsePreviousTemplate to true.

" + }, + "Parameters":{ + "shape":"Parameters", + "documentation":"

A list of input parameters for the stack set template.

" + }, + "Capabilities":{ + "shape":"Capabilities", + "documentation":"

A list of values that you must specify before AWS CloudFormation can create certain stack sets. Some stack set templates might include resources that can affect permissions in your AWS account—for example, by creating new AWS Identity and Access Management (IAM) users. For those stack sets, you must explicitly acknowledge their capabilities by specifying this parameter.

The only valid values are CAPABILITY_IAM and CAPABILITY_NAMED_IAM. The following resources require you to specify this parameter:

If your stack template contains these resources, we recommend that you review all permissions that are associated with them and edit their permissions if necessary.

If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify this parameter, this action returns an InsufficientCapabilities error.

For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates.

" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

The key-value pairs to associate with this stack set and the stacks created from it. AWS CloudFormation also propagates these tags to supported resources that are created in the stacks. You can specify a maximum number of 50 tags.

If you specify tags for this parameter, those tags replace any list of tags that are currently associated with this stack set. This means:

If you specify new tags as part of an UpdateStackSet action, AWS CloudFormation checks to see if you have the required IAM permission to tag resources. If you omit tags that are currently associated with the stack set from the list of tags you specify, AWS CloudFormation assumes that you want to remove those tags from the stack set, and checks to see if you have permission to untag resources. If you don't have the necessary permission(s), the entire UpdateStackSet action fails with an access denied error, and the stack set is not updated.

" + }, + "OperationPreferences":{ + "shape":"StackSetOperationPreferences", + "documentation":"

Preferences for how AWS CloudFormation performs this stack set operation.

" + }, + "OperationId":{ + "shape":"ClientRequestToken", + "documentation":"

The unique ID for this stack set operation.

The operation ID also functions as an idempotency token, to ensure that AWS CloudFormation performs the stack set operation only once, even if you retry the request multiple times. You might retry stack set operation requests to ensure that AWS CloudFormation successfully received them.

If you don't specify an operation ID, AWS CloudFormation generates one automatically.

Repeating this stack set operation with a new operation ID retries all stack instances whose status is OUTDATED.

", + "idempotencyToken":true + } + } + }, + "UpdateStackSetOutput":{ + "type":"structure", + "members":{ + "OperationId":{ + "shape":"ClientRequestToken", + "documentation":"

The unique ID for this stack set operation.

" + } + } + }, "Url":{"type":"string"}, "UsePreviousTemplate":{"type":"boolean"}, "UsePreviousValue":{"type":"boolean"}, @@ -2345,5 +3519,5 @@ }, "Version":{"type":"string"} }, - "documentation":"AWS CloudFormation

AWS CloudFormation allows you to create and manage AWS infrastructure deployments predictably and repeatedly. You can use AWS CloudFormation to leverage AWS products, such as Amazon Elastic Compute Cloud, Amazon Elastic Block Store, Amazon Simple Notification Service, Elastic Load Balancing, and Auto Scaling to build highly-reliable, highly scalable, cost-effective applications without creating or configuring the underlying AWS infrastructure.

With AWS CloudFormation, you declare all of your resources and dependencies in a template file. The template defines a collection of resources as a single unit called a stack. AWS CloudFormation creates and deletes all member resources of the stack together and manages all dependencies between the resources for you.

For more information about AWS CloudFormation, see the AWS CloudFormation Product Page.

Amazon CloudFormation makes use of other AWS products. If you need additional technical information about a specific AWS product, you can find the product's technical documentation at docs.aws.amazon.com.

" + "documentation":"AWS CloudFormation

AWS CloudFormation allows you to create and manage AWS infrastructure deployments predictably and repeatedly. You can use AWS CloudFormation to leverage AWS products, such as Amazon Elastic Compute Cloud, Amazon Elastic Block Store, Amazon Simple Notification Service, Elastic Load Balancing, and Auto Scaling to build highly-reliable, highly scalable, cost-effective applications without creating or configuring the underlying AWS infrastructure.

With AWS CloudFormation, you declare all of your resources and dependencies in a template file. The template defines a collection of resources as a single unit called a stack. AWS CloudFormation creates and deletes all member resources of the stack together and manages all dependencies between the resources for you.

For more information about AWS CloudFormation, see the AWS CloudFormation Product Page.

Amazon CloudFormation makes use of other AWS products. If you need additional technical information about a specific AWS product, you can find the product's technical documentation at docs.aws.amazon.com.

APIs for stacks

When you use AWS CloudFormation, you manage related resources as a single unit called a stack. You create, update, and delete a collection of resources by creating, updating, and deleting stacks. All the resources in a stack are defined by the stack's AWS CloudFormation template.

Actions

Data Types

APIs for change sets

If you need to make changes to the running resources in a stack, you update the stack. Before making changes to your resources, you can generate a change set, which is summary of your proposed changes. Change sets allow you to see how your changes might impact your running resources, especially for critical resources, before implementing them.

Actions

Data Types

APIs for stack sets

AWS CloudFormation StackSets lets you create a collection, or stack set, of stacks that can automatically and safely provision a common set of AWS resources across multiple AWS accounts and multiple AWS regions from a single AWS CloudFormation template. When you create a stack set, AWS CloudFormation provisions a stack in each of the specified accounts and regions by using the supplied AWS CloudFormation template and parameters. Stack sets let you manage a common set of AWS resources in a selection of accounts and regions in a single operation.

Actions

Data Types

" } diff --git a/botocore/data/cloudhsmv2/2017-04-28/paginators-1.json b/botocore/data/cloudhsmv2/2017-04-28/paginators-1.json new file mode 100644 index 00000000..ea142457 --- /dev/null +++ b/botocore/data/cloudhsmv2/2017-04-28/paginators-1.json @@ -0,0 +1,3 @@ +{ + "pagination": {} +} diff --git a/botocore/data/cloudhsmv2/2017-04-28/service-2.json b/botocore/data/cloudhsmv2/2017-04-28/service-2.json new file mode 100644 index 00000000..eed606cd --- /dev/null +++ b/botocore/data/cloudhsmv2/2017-04-28/service-2.json @@ -0,0 +1,838 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2017-04-28", + "endpointPrefix":"cloudhsmv2", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"CloudHSM V2", + "serviceFullName":"AWS CloudHSM V2", + "signatureVersion":"v4", + "signingName":"cloudhsm", + "targetPrefix":"BaldrApiService", + "uid":"cloudhsmv2-2017-04-28" + }, + "operations":{ + "CreateCluster":{ + "name":"CreateCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateClusterRequest"}, + "output":{"shape":"CreateClusterResponse"}, + "errors":[ + {"shape":"CloudHsmInternalFailureException"}, + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmResourceNotFoundException"}, + {"shape":"CloudHsmInvalidRequestException"}, + {"shape":"CloudHsmAccessDeniedException"} + ], + "documentation":"

Creates a new AWS CloudHSM cluster.

" + }, + "CreateHsm":{ + "name":"CreateHsm", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateHsmRequest"}, + "output":{"shape":"CreateHsmResponse"}, + "errors":[ + {"shape":"CloudHsmInternalFailureException"}, + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmInvalidRequestException"}, + {"shape":"CloudHsmResourceNotFoundException"}, + {"shape":"CloudHsmAccessDeniedException"} + ], + "documentation":"

Creates a new hardware security module (HSM) in the specified AWS CloudHSM cluster.

" + }, + "DeleteCluster":{ + "name":"DeleteCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteClusterRequest"}, + "output":{"shape":"DeleteClusterResponse"}, + "errors":[ + {"shape":"CloudHsmInternalFailureException"}, + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmResourceNotFoundException"}, + {"shape":"CloudHsmInvalidRequestException"}, + {"shape":"CloudHsmAccessDeniedException"} + ], + "documentation":"

Deletes the specified AWS CloudHSM cluster. Before you can delete a cluster, you must delete all HSMs in the cluster. To see if the cluster contains any HSMs, use DescribeClusters. To delete an HSM, use DeleteHsm.

" + }, + "DeleteHsm":{ + "name":"DeleteHsm", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteHsmRequest"}, + "output":{"shape":"DeleteHsmResponse"}, + "errors":[ + {"shape":"CloudHsmInternalFailureException"}, + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmResourceNotFoundException"}, + {"shape":"CloudHsmInvalidRequestException"}, + {"shape":"CloudHsmAccessDeniedException"} + ], + "documentation":"

Deletes the specified HSM. To specify an HSM, you can use its identifier (ID), the IP address of the HSM's elastic network interface (ENI), or the ID of the HSM's ENI. You need to specify only one of these values. To find these values, use DescribeClusters.

" + }, + "DescribeBackups":{ + "name":"DescribeBackups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeBackupsRequest"}, + "output":{"shape":"DescribeBackupsResponse"}, + "errors":[ + {"shape":"CloudHsmInternalFailureException"}, + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmResourceNotFoundException"}, + {"shape":"CloudHsmInvalidRequestException"}, + {"shape":"CloudHsmAccessDeniedException"} + ], + "documentation":"

Gets information about backups of AWS CloudHSM clusters.

This is a paginated operation, which means that each response might contain only a subset of all the backups. When the response contains only a subset of backups, it includes a NextToken value. Use this value in a subsequent DescribeBackups request to get more backups. When you receive a response with no NextToken (or an empty or null value), that means there are no more backups to get.

" + }, + "DescribeClusters":{ + "name":"DescribeClusters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeClustersRequest"}, + "output":{"shape":"DescribeClustersResponse"}, + "errors":[ + {"shape":"CloudHsmInternalFailureException"}, + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmInvalidRequestException"}, + {"shape":"CloudHsmAccessDeniedException"} + ], + "documentation":"

Gets information about AWS CloudHSM clusters.

This is a paginated operation, which means that each response might contain only a subset of all the clusters. When the response contains only a subset of clusters, it includes a NextToken value. Use this value in a subsequent DescribeClusters request to get more clusters. When you receive a response with no NextToken (or an empty or null value), that means there are no more clusters to get.

" + }, + "InitializeCluster":{ + "name":"InitializeCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"InitializeClusterRequest"}, + "output":{"shape":"InitializeClusterResponse"}, + "errors":[ + {"shape":"CloudHsmInternalFailureException"}, + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmResourceNotFoundException"}, + {"shape":"CloudHsmInvalidRequestException"}, + {"shape":"CloudHsmAccessDeniedException"} + ], + "documentation":"

Claims an AWS CloudHSM cluster by submitting the cluster certificate issued by your issuing certificate authority (CA) and the CA's root certificate. Before you can claim a cluster, you must sign the cluster's certificate signing request (CSR) with your issuing CA. To get the cluster's CSR, use DescribeClusters.

" + }, + "ListTags":{ + "name":"ListTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsRequest"}, + "output":{"shape":"ListTagsResponse"}, + "errors":[ + {"shape":"CloudHsmInternalFailureException"}, + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmResourceNotFoundException"}, + {"shape":"CloudHsmInvalidRequestException"}, + {"shape":"CloudHsmAccessDeniedException"} + ], + "documentation":"

Gets a list of tags for the specified AWS CloudHSM cluster.

This is a paginated operation, which means that each response might contain only a subset of all the tags. When the response contains only a subset of tags, it includes a NextToken value. Use this value in a subsequent ListTags request to get more tags. When you receive a response with no NextToken (or an empty or null value), that means there are no more tags to get.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"CloudHsmInternalFailureException"}, + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmResourceNotFoundException"}, + {"shape":"CloudHsmInvalidRequestException"}, + {"shape":"CloudHsmAccessDeniedException"} + ], + "documentation":"

Adds or overwrites one or more tags for the specified AWS CloudHSM cluster.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"CloudHsmInternalFailureException"}, + {"shape":"CloudHsmServiceException"}, + {"shape":"CloudHsmResourceNotFoundException"}, + {"shape":"CloudHsmInvalidRequestException"}, + {"shape":"CloudHsmAccessDeniedException"} + ], + "documentation":"

Removes the specified tag or tags from the specified AWS CloudHSM cluster.

" + } + }, + "shapes":{ + "Backup":{ + "type":"structure", + "required":["BackupId"], + "members":{ + "BackupId":{ + "shape":"BackupId", + "documentation":"

The identifier (ID) of the backup.

" + }, + "BackupState":{ + "shape":"BackupState", + "documentation":"

The state of the backup.

" + }, + "ClusterId":{ + "shape":"ClusterId", + "documentation":"

The identifier (ID) of the cluster that was backed up.

" + }, + "CreateTimestamp":{ + "shape":"Timestamp", + "documentation":"

The date and time when the backup was created.

" + } + }, + "documentation":"

Contains information about a backup of an AWS CloudHSM cluster.

" + }, + "BackupId":{ + "type":"string", + "pattern":"backup-[2-7a-zA-Z]{11,16}" + }, + "BackupPolicy":{ + "type":"string", + "enum":["DEFAULT"] + }, + "BackupState":{ + "type":"string", + "enum":[ + "CREATE_IN_PROGRESS", + "READY", + "DELETED" + ] + }, + "Backups":{ + "type":"list", + "member":{"shape":"Backup"} + }, + "Cert":{ + "type":"string", + "max":5000, + "pattern":"[a-zA-Z0-9+-/=\\s]*" + }, + "Certificates":{ + "type":"structure", + "members":{ + "ClusterCsr":{ + "shape":"Cert", + "documentation":"

The cluster's certificate signing request (CSR). The CSR exists only when the cluster's state is UNINITIALIZED.

" + }, + "HsmCertificate":{ + "shape":"Cert", + "documentation":"

The HSM certificate issued (signed) by the HSM hardware.

" + }, + "AwsHardwareCertificate":{ + "shape":"Cert", + "documentation":"

The HSM hardware certificate issued (signed) by AWS CloudHSM.

" + }, + "ManufacturerHardwareCertificate":{ + "shape":"Cert", + "documentation":"

The HSM hardware certificate issued (signed) by the hardware manufacturer.

" + }, + "ClusterCertificate":{ + "shape":"Cert", + "documentation":"

The cluster certificate issued (signed) by the issuing certificate authority (CA) of the cluster's owner.

" + } + }, + "documentation":"

Contains one or more certificates or a certificate signing request (CSR).

" + }, + "CloudHsmAccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"errorMessage"} + }, + "documentation":"

The request was rejected because the requester does not have permission to perform the requested operation.

", + "exception":true + }, + "CloudHsmInternalFailureException":{ + "type":"structure", + "members":{ + "Message":{"shape":"errorMessage"} + }, + "documentation":"

The request was rejected because of an AWS CloudHSM internal failure. The request can be retried.

", + "exception":true, + "fault":true + }, + "CloudHsmInvalidRequestException":{ + "type":"structure", + "members":{ + "Message":{"shape":"errorMessage"} + }, + "documentation":"

The request was rejected because it is not a valid request.

", + "exception":true + }, + "CloudHsmResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"errorMessage"} + }, + "documentation":"

The request was rejected because it refers to a resource that cannot be found.

", + "exception":true + }, + "CloudHsmServiceException":{ + "type":"structure", + "members":{ + "Message":{"shape":"errorMessage"} + }, + "documentation":"

The request was rejected because an error occurred.

", + "exception":true + }, + "Cluster":{ + "type":"structure", + "members":{ + "BackupPolicy":{ + "shape":"BackupPolicy", + "documentation":"

The cluster's backup policy.

" + }, + "ClusterId":{ + "shape":"ClusterId", + "documentation":"

The cluster's identifier (ID).

" + }, + "CreateTimestamp":{ + "shape":"Timestamp", + "documentation":"

The date and time when the cluster was created.

" + }, + "Hsms":{ + "shape":"Hsms", + "documentation":"

Contains information about the HSMs in the cluster.

" + }, + "HsmType":{ + "shape":"HsmType", + "documentation":"

The type of HSM that the cluster contains.

" + }, + "PreCoPassword":{ + "shape":"PreCoPassword", + "documentation":"

The default password for the cluster's Pre-Crypto Officer (PRECO) user.

" + }, + "SecurityGroup":{ + "shape":"SecurityGroup", + "documentation":"

The identifier (ID) of the cluster's security group.

" + }, + "SourceBackupId":{ + "shape":"BackupId", + "documentation":"

The identifier (ID) of the backup used to create the cluster. This value exists only when the cluster was created from a backup.

" + }, + "State":{ + "shape":"ClusterState", + "documentation":"

The cluster's state.

" + }, + "StateMessage":{ + "shape":"StateMessage", + "documentation":"

A description of the cluster's state.

" + }, + "SubnetMapping":{ + "shape":"ExternalSubnetMapping", + "documentation":"

A map of the cluster's subnets and their corresponding Availability Zones.

" + }, + "VpcId":{ + "shape":"VpcId", + "documentation":"

The identifier (ID) of the virtual private cloud (VPC) that contains the cluster.

" + }, + "Certificates":{ + "shape":"Certificates", + "documentation":"

Contains one or more certificates or a certificate signing request (CSR).

" + } + }, + "documentation":"

Contains information about an AWS CloudHSM cluster.

" + }, + "ClusterId":{ + "type":"string", + "pattern":"cluster-[2-7a-zA-Z]{11,16}" + }, + "ClusterState":{ + "type":"string", + "enum":[ + "CREATE_IN_PROGRESS", + "UNINITIALIZED", + "INITIALIZE_IN_PROGRESS", + "INITIALIZED", + "ACTIVE", + "UPDATE_IN_PROGRESS", + "DELETE_IN_PROGRESS", + "DELETED", + "DEGRADED" + ] + }, + "Clusters":{ + "type":"list", + "member":{"shape":"Cluster"} + }, + "CreateClusterRequest":{ + "type":"structure", + "required":[ + "SubnetIds", + "HsmType" + ], + "members":{ + "SubnetIds":{ + "shape":"SubnetIds", + "documentation":"

The identifiers (IDs) of the subnets where you are creating the cluster. You must specify at least one subnet. If you specify multiple subnets, they must meet the following criteria:

" + }, + "HsmType":{ + "shape":"HsmType", + "documentation":"

The type of HSM to use in the cluster. Currently the only allowed value is hsm1.medium.

" + }, + "SourceBackupId":{ + "shape":"BackupId", + "documentation":"

The identifier (ID) of the cluster backup to restore. Use this value to restore the cluster from a backup instead of creating a new cluster. To find the backup ID, use DescribeBackups.

" + } + } + }, + "CreateClusterResponse":{ + "type":"structure", + "members":{ + "Cluster":{ + "shape":"Cluster", + "documentation":"

Information about the cluster that was created.

" + } + } + }, + "CreateHsmRequest":{ + "type":"structure", + "required":[ + "ClusterId", + "AvailabilityZone" + ], + "members":{ + "ClusterId":{ + "shape":"ClusterId", + "documentation":"

The identifier (ID) of the HSM's cluster. To find the cluster ID, use DescribeClusters.

" + }, + "AvailabilityZone":{ + "shape":"ExternalAz", + "documentation":"

The Availability Zone where you are creating the HSM. To find the cluster's Availability Zones, use DescribeClusters.

" + }, + "IpAddress":{ + "shape":"IpAddress", + "documentation":"

The HSM's IP address. If you specify an IP address, use an available address from the subnet that maps to the Availability Zone where you are creating the HSM. If you don't specify an IP address, one is chosen for you from that subnet.

" + } + } + }, + "CreateHsmResponse":{ + "type":"structure", + "members":{ + "Hsm":{ + "shape":"Hsm", + "documentation":"

Information about the HSM that was created.

" + } + } + }, + "DeleteClusterRequest":{ + "type":"structure", + "required":["ClusterId"], + "members":{ + "ClusterId":{ + "shape":"ClusterId", + "documentation":"

The identifier (ID) of the cluster that you are deleting. To find the cluster ID, use DescribeClusters.

" + } + } + }, + "DeleteClusterResponse":{ + "type":"structure", + "members":{ + "Cluster":{ + "shape":"Cluster", + "documentation":"

Information about the cluster that was deleted.

" + } + } + }, + "DeleteHsmRequest":{ + "type":"structure", + "required":["ClusterId"], + "members":{ + "ClusterId":{ + "shape":"ClusterId", + "documentation":"

The identifier (ID) of the cluster that contains the HSM that you are deleting.

" + }, + "HsmId":{ + "shape":"HsmId", + "documentation":"

The identifier (ID) of the HSM that you are deleting.

" + }, + "EniId":{ + "shape":"EniId", + "documentation":"

The identifier (ID) of the elastic network interface (ENI) of the HSM that you are deleting.

" + }, + "EniIp":{ + "shape":"IpAddress", + "documentation":"

The IP address of the elastic network interface (ENI) of the HSM that you are deleting.

" + } + } + }, + "DeleteHsmResponse":{ + "type":"structure", + "members":{ + "HsmId":{ + "shape":"HsmId", + "documentation":"

The identifier (ID) of the HSM that was deleted.

" + } + } + }, + "DescribeBackupsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

The NextToken value that you received in the previous response. Use this value to get more backups.

" + }, + "MaxResults":{ + "shape":"MaxSize", + "documentation":"

The maximum number of backups to return in the response. When there are more backups than the number you specify, the response contains a NextToken value.

" + }, + "Filters":{ + "shape":"Filters", + "documentation":"

One or more filters to limit the items returned in the response.

Use the backupIds filter to return only the specified backups. Specify backups by their backup identifier (ID).

Use the clusterIds filter to return only the backups for the specified clusters. Specify clusters by their cluster identifier (ID).

Use the states filter to return only backups that match the specified state.

" + } + } + }, + "DescribeBackupsResponse":{ + "type":"structure", + "members":{ + "Backups":{ + "shape":"Backups", + "documentation":"

A list of backups.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

An opaque string that indicates that the response contains only a subset of backups. Use this value in a subsequent DescribeBackups request to get more backups.

" + } + } + }, + "DescribeClustersRequest":{ + "type":"structure", + "members":{ + "Filters":{ + "shape":"Filters", + "documentation":"

One or more filters to limit the items returned in the response.

Use the clusterIds filter to return only the specified clusters. Specify clusters by their cluster identifier (ID).

Use the vpcIds filter to return only the clusters in the specified virtual private clouds (VPCs). Specify VPCs by their VPC identifier (ID).

Use the states filter to return only clusters that match the specified state.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The NextToken value that you received in the previous response. Use this value to get more clusters.

" + }, + "MaxResults":{ + "shape":"MaxSize", + "documentation":"

The maximum number of clusters to return in the response. When there are more clusters than the number you specify, the response contains a NextToken value.

" + } + } + }, + "DescribeClustersResponse":{ + "type":"structure", + "members":{ + "Clusters":{ + "shape":"Clusters", + "documentation":"

A list of clusters.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

An opaque string that indicates that the response contains only a subset of clusters. Use this value in a subsequent DescribeClusters request to get more clusters.

" + } + } + }, + "EniId":{ + "type":"string", + "pattern":"eni-[0-9a-fA-F]{8}" + }, + "ExternalAz":{ + "type":"string", + "pattern":"[a-z]{2}(-(gov|isob|iso))?-(east|west|north|south|central){1,2}-\\d[a-z]" + }, + "ExternalSubnetMapping":{ + "type":"map", + "key":{"shape":"ExternalAz"}, + "value":{"shape":"SubnetId"} + }, + "Field":{ + "type":"string", + "pattern":"[a-zA-Z0-9_-]+" + }, + "Filters":{ + "type":"map", + "key":{"shape":"Field"}, + "value":{"shape":"Strings"} + }, + "Hsm":{ + "type":"structure", + "required":["HsmId"], + "members":{ + "AvailabilityZone":{ + "shape":"ExternalAz", + "documentation":"

The Availability Zone that contains the HSM.

" + }, + "ClusterId":{ + "shape":"ClusterId", + "documentation":"

The identifier (ID) of the cluster that contains the HSM.

" + }, + "SubnetId":{ + "shape":"SubnetId", + "documentation":"

The subnet that contains the HSM's elastic network interface (ENI).

" + }, + "EniId":{ + "shape":"EniId", + "documentation":"

The identifier (ID) of the HSM's elastic network interface (ENI).

" + }, + "EniIp":{ + "shape":"IpAddress", + "documentation":"

The IP address of the HSM's elastic network interface (ENI).

" + }, + "HsmId":{ + "shape":"HsmId", + "documentation":"

The HSM's identifier (ID).

" + }, + "State":{ + "shape":"HsmState", + "documentation":"

The HSM's state.

" + }, + "StateMessage":{ + "shape":"String", + "documentation":"

A description of the HSM's state.

" + } + }, + "documentation":"

Contains information about a hardware security module (HSM) in an AWS CloudHSM cluster.

" + }, + "HsmId":{ + "type":"string", + "pattern":"hsm-[2-7a-zA-Z]{11,16}" + }, + "HsmState":{ + "type":"string", + "enum":[ + "CREATE_IN_PROGRESS", + "ACTIVE", + "DEGRADED", + "DELETE_IN_PROGRESS", + "DELETED" + ] + }, + "HsmType":{ + "type":"string", + "pattern":"(hsm1\\.medium)" + }, + "Hsms":{ + "type":"list", + "member":{"shape":"Hsm"} + }, + "InitializeClusterRequest":{ + "type":"structure", + "required":[ + "ClusterId", + "SignedCert", + "TrustAnchor" + ], + "members":{ + "ClusterId":{ + "shape":"ClusterId", + "documentation":"

The identifier (ID) of the cluster that you are claiming. To find the cluster ID, use DescribeClusters.

" + }, + "SignedCert":{ + "shape":"Cert", + "documentation":"

The cluster certificate issued (signed) by your issuing certificate authority (CA). The certificate must be in PEM format.

" + }, + "TrustAnchor":{ + "shape":"Cert", + "documentation":"

The issuing certificate of the issuing certificate authority (CA) that issued (signed) the cluster certificate. This can be a root (self-signed) certificate or a certificate chain that begins with the certificate that issued the cluster certificate and ends with a root certificate. The certificate or certificate chain must be in PEM format.

" + } + } + }, + "InitializeClusterResponse":{ + "type":"structure", + "members":{ + "State":{ + "shape":"ClusterState", + "documentation":"

The cluster's state.

" + }, + "StateMessage":{ + "shape":"StateMessage", + "documentation":"

A description of the cluster's state.

" + } + } + }, + "IpAddress":{ + "type":"string", + "pattern":"\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}" + }, + "ListTagsRequest":{ + "type":"structure", + "required":["ResourceId"], + "members":{ + "ResourceId":{ + "shape":"ClusterId", + "documentation":"

The cluster identifier (ID) for the cluster whose tags you are getting. To find the cluster ID, use DescribeClusters.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The NextToken value that you received in the previous response. Use this value to get more tags.

" + }, + "MaxResults":{ + "shape":"MaxSize", + "documentation":"

The maximum number of tags to return in the response. When there are more tags than the number you specify, the response contains a NextToken value.

" + } + } + }, + "ListTagsResponse":{ + "type":"structure", + "required":["TagList"], + "members":{ + "TagList":{ + "shape":"TagList", + "documentation":"

A list of tags.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

An opaque string that indicates that the response contains only a subset of tags. Use this value in a subsequent ListTags request to get more tags.

" + } + } + }, + "MaxSize":{ + "type":"integer", + "max":100, + "min":1 + }, + "NextToken":{ + "type":"string", + "max":256, + "pattern":".*" + }, + "PreCoPassword":{ + "type":"string", + "max":32, + "min":7 + }, + "SecurityGroup":{ + "type":"string", + "pattern":"sg-[0-9a-fA-F]" + }, + "StateMessage":{ + "type":"string", + "max":300, + "pattern":".*" + }, + "String":{"type":"string"}, + "Strings":{ + "type":"list", + "member":{"shape":"String"} + }, + "SubnetId":{ + "type":"string", + "pattern":"subnet-[0-9a-fA-F]{8}" + }, + "SubnetIds":{ + "type":"list", + "member":{"shape":"SubnetId"}, + "max":10, + "min":1 + }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

The key of the tag.

" + }, + "Value":{ + "shape":"TagValue", + "documentation":"

The value of the tag.

" + } + }, + "documentation":"

Contains a tag. A tag is a key-value pair.

" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":1 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":50, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceId", + "TagList" + ], + "members":{ + "ResourceId":{ + "shape":"ClusterId", + "documentation":"

The cluster identifier (ID) for the cluster that you are tagging. To find the cluster ID, use DescribeClusters.

" + }, + "TagList":{ + "shape":"TagList", + "documentation":"

A list of one or more tags.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "Timestamp":{"type":"timestamp"}, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceId", + "TagKeyList" + ], + "members":{ + "ResourceId":{ + "shape":"ClusterId", + "documentation":"

The cluster identifier (ID) for the cluster whose tags you are removing. To find the cluster ID, use DescribeClusters.

" + }, + "TagKeyList":{ + "shape":"TagKeyList", + "documentation":"

A list of one or more tag keys for the tags that you are removing. Specify only the tag keys, not the tag values.

" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "VpcId":{ + "type":"string", + "pattern":"vpc-[0-9a-fA-F]" + }, + "errorMessage":{"type":"string"} + }, + "documentation":"

For more information about AWS CloudHSM, see AWS CloudHSM and the AWS CloudHSM User Guide.

" +} diff --git a/botocore/data/cloudwatch/2010-08-01/service-2.json b/botocore/data/cloudwatch/2010-08-01/service-2.json index 3bf15427..8a9bc291 100644 --- a/botocore/data/cloudwatch/2010-08-01/service-2.json +++ b/botocore/data/cloudwatch/2010-08-01/service-2.json @@ -139,7 +139,7 @@ {"shape":"InvalidParameterCombinationException"}, {"shape":"InternalServiceFault"} ], - "documentation":"

Gets statistics for the specified metric.

Amazon CloudWatch retains metric data as follows:

CloudWatch started retaining 5-minute and 1-hour metric data as of July 9, 2016.

The maximum number of data points returned from a single call is 1,440. If you request more than 1,440 data points, CloudWatch returns an error. To reduce the number of data points, you can narrow the specified time range and make multiple requests across adjacent time ranges, or you can increase the specified period. A period can be as short as one minute (60 seconds). Data points are not returned in chronological order.

CloudWatch aggregates data points based on the length of the period that you specify. For example, if you request statistics with a one-hour period, CloudWatch aggregates all data points with time stamps that fall within each one-hour period. Therefore, the number of values aggregated by CloudWatch is larger than the number of data points returned.

CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you can only retrieve percentile statistics for this data if one of the following conditions is true:

For a list of metrics and dimensions supported by AWS services, see the Amazon CloudWatch Metrics and Dimensions Reference in the Amazon CloudWatch User Guide.

" + "documentation":"

Gets statistics for the specified metric.

The maximum number of data points returned from a single call is 1,440. If you request more than 1,440 data points, CloudWatch returns an error. To reduce the number of data points, you can narrow the specified time range and make multiple requests across adjacent time ranges, or you can increase the specified period. Data points are not returned in chronological order.

CloudWatch aggregates data points based on the length of the period that you specify. For example, if you request statistics with a one-hour period, CloudWatch aggregates all data points with time stamps that fall within each one-hour period. Therefore, the number of values aggregated by CloudWatch is larger than the number of data points returned.

CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you can only retrieve percentile statistics for this data if one of the following conditions is true:

Amazon CloudWatch retains metric data as follows:

Data points that are initially published with a shorter period are aggregated together for long-term storage. For example, if you collect data using a period of 1 minute, the data remains available for 15 days with 1-minute resolution. After 15 days, this data is still available, but is aggregated and retrievable only with a resolution of 5 minutes. After 63 days, the data is further aggregated and is available with a resolution of 1 hour.

CloudWatch started retaining 5-minute and 1-hour metric data as of July 9, 2016.

For information about metrics and dimensions supported by AWS services, see the Amazon CloudWatch Metrics and Dimensions Reference in the Amazon CloudWatch User Guide.

" }, "ListDashboards":{ "name":"ListDashboards", @@ -190,7 +190,7 @@ {"shape":"DashboardInvalidInputError"}, {"shape":"InternalServiceFault"} ], - "documentation":"

Creates a dashboard if it does not already exist, or updates an existing dashboard. If you update a dashboard, the entire contents are replaced with what you specify here.

You can have up to 500 dashboards per account. All dashboards in your account are global, not region-specific.

To copy an existing dashboard, use GetDashboard, and then use the data returned within DashboardBody as the template for the new dashboard when you call PutDashboard to create the copy.

" + "documentation":"

Creates a dashboard if it does not already exist, or updates an existing dashboard. If you update a dashboard, the entire contents are replaced with what you specify here.

You can have up to 500 dashboards per account. All dashboards in your account are global, not region-specific.

A simple way to create a dashboard using PutDashboard is to copy an existing dashboard. To copy an existing dashboard using the console, you can load the dashboard and then use the View/edit source command in the Actions menu to display the JSON block for that dashboard. Another way to copy a dashboard is to use GetDashboard, and then use the data returned within DashboardBody as the template for the new dashboard when you call PutDashboard.

When you create a dashboard with PutDashboard, a good practice is to add a text widget at the top of the dashboard with a message that the dashboard was created by script and should not be changed in the console. This message could also point console users to the location of the DashboardBody script or the CloudFormation template used to create the dashboard.

" }, "PutMetricAlarm":{ "name":"PutMetricAlarm", @@ -750,7 +750,7 @@ }, "StartTime":{ "shape":"Timestamp", - "documentation":"

The time stamp that determines the first data point to return. Start times are evaluated relative to the time that CloudWatch receives the request.

The value specified is inclusive; results include data points with the specified time stamp. The time stamp must be in ISO 8601 UTC format (for example, 2016-10-03T23:00:00Z).

CloudWatch rounds the specified time stamp as follows:

" + "documentation":"

The time stamp that determines the first data point to return. Start times are evaluated relative to the time that CloudWatch receives the request.

The value specified is inclusive; results include data points with the specified time stamp. The time stamp must be in ISO 8601 UTC format (for example, 2016-10-03T23:00:00Z).

CloudWatch rounds the specified time stamp as follows:

If you set Period to 5, 10, or 30, the start time of your request is rounded down to the nearest time that corresponds to even 5-, 10-, or 30-second divisions of a minute. For example, if you make a query at (HH:mm:ss) 01:05:23 for the previous 10-second period, the start time of your request is rounded down and you receive data from 01:05:10 to 01:05:20. If you make a query at 15:07:17 for the previous 5 minutes of data, using a period of 5 seconds, you receive data timestamped between 15:02:15 and 15:07:15.

" }, "EndTime":{ "shape":"Timestamp", @@ -758,7 +758,7 @@ }, "Period":{ "shape":"Period", - "documentation":"

The granularity, in seconds, of the returned data points. A period can be as short as one minute (60 seconds) and must be a multiple of 60.

If the StartTime parameter specifies a time stamp that is greater than 15 days ago, you must specify the period as follows or no data points in that time range is returned:

" + "documentation":"

The granularity, in seconds, of the returned data points. For metrics with regular resolution, a period can be as short as one minute (60 seconds) and must be a multiple of 60. For high-resolution metrics that are collected at intervals of less than one minute, the period can be 1, 5, 10, 30, 60, or any multiple of 60. High-resolution metrics are those metrics stored by a PutMetricData call that includes a StorageResolution of 1 second.

If the StartTime parameter specifies a time stamp that is greater than 3 hours ago, you must specify the period as follows or no data points in that time range is returned:

" }, "Statistics":{ "shape":"Statistics", @@ -1092,7 +1092,7 @@ }, "EvaluateLowSampleCountPercentile":{ "shape":"EvaluateLowSampleCountPercentile", - "documentation":"

Used only for alarms based on percentiles. If ignore, the alarm state does not change during periods with too few data points to be statistically significant. If evaluate or this parameter is not used, the alarm will always be evaluated and possibly change state no matter how many data points are available.

" + "documentation":"

Used only for alarms based on percentiles. If ignore, the alarm state does not change during periods with too few data points to be statistically significant. If evaluate or this parameter is not used, the alarm is always evaluated and possibly changes state no matter how many data points are available.

" } }, "documentation":"

Represents an alarm.

", @@ -1159,7 +1159,10 @@ "shape":"StandardUnit", "documentation":"

The unit of the metric.

" }, - "StorageResolution":{"shape":"StorageResolution"} + "StorageResolution":{ + "shape":"StorageResolution", + "documentation":"

Valid values are 1 and 60. Setting this to 1 specifies this metric as a high-resolution metric, so that CloudWatch stores the metric with sub-minute resolution down to one second. Setting this to 60 specifies this metric as a regular-resolution metric, which CloudWatch stores at 1-minute resolution. Currently, high resolution is available only for custom metrics. For more information about high-resolution metrics, see High-Resolution Metrics in the Amazon CloudWatch User Guide.

This field is optional, if you do not specify it the default of 60 is used.

" + } }, "documentation":"

Encapsulates the information sent to either create a metric or add new values to be aggregated into an existing metric.

" }, @@ -1209,7 +1212,7 @@ "members":{ "DashboardName":{ "shape":"DashboardName", - "documentation":"

The name of the dashboard. If a dashboard with this name already exists, this call modifies that dashboard, replacing its current contents. Otherwise, a new dashboard is created. The maximum length is 255, and valid characters are A-Z, a-z, 0-9, \".\", \"-\", and \"_\".

" + "documentation":"

The name of the dashboard. If a dashboard with this name already exists, this call modifies that dashboard, replacing its current contents. Otherwise, a new dashboard is created. The maximum length is 255, and valid characters are A-Z, a-z, 0-9, \"-\", and \"_\".

" }, "DashboardBody":{ "shape":"DashboardBody", @@ -1284,7 +1287,7 @@ }, "Period":{ "shape":"Period", - "documentation":"

The period, in seconds, over which the specified statistic is applied. An alarm's total current evaluation period can be no longer than one day, so this number multiplied by EvaluationPeriods must be 86,400 or less.

" + "documentation":"

The period, in seconds, over which the specified statistic is applied. Valid values are 10, 30, and any multiple of 60.

Be sure to specify 10 or 30 only for metrics that are stored by a PutMetricData call with a StorageResolution of 1. If you specify a Period of 10 or 30 for a metric that does not have sub-minute resolution, the alarm still attempts to gather data at the period rate that you specify. In this case, it does not receive data for the attempts that do not correspond to a one-minute data resolution, and the alarm may often lapse into INSUFFICENT_DATA status. Specifying 10 or 30 also sets this alarm as a high-resolution alarm, which has a higher charge than other alarms. For more information about pricing, see Amazon CloudWatch Pricing.

An alarm's total current evaluation period can be no longer than one day, so Period multiplied by EvaluationPeriods cannot be more than 86,400 seconds.

" }, "Unit":{ "shape":"StandardUnit", @@ -1292,7 +1295,7 @@ }, "EvaluationPeriods":{ "shape":"EvaluationPeriods", - "documentation":"

The number of periods over which data is compared to the specified threshold. An alarm's total current evaluation period can be no longer than one day, so this number multiplied by Period must be 86,400 or less.

" + "documentation":"

The number of periods over which data is compared to the specified threshold. An alarm's total current evaluation period can be no longer than one day, so this number multiplied by Period cannot be more than 86,400 seconds.

" }, "Threshold":{ "shape":"Threshold", diff --git a/botocore/data/codebuild/2016-10-06/service-2.json b/botocore/data/codebuild/2016-10-06/service-2.json index 573e2840..6867ecfd 100644 --- a/botocore/data/codebuild/2016-10-06/service-2.json +++ b/botocore/data/codebuild/2016-10-06/service-2.json @@ -565,6 +565,7 @@ "GOLANG", "DOCKER", "ANDROID", + "DOTNET", "BASE" ] }, @@ -931,7 +932,8 @@ "CODECOMMIT", "CODEPIPELINE", "GITHUB", - "S3" + "S3", + "BITBUCKET" ] }, "StartBuildInput":{ diff --git a/botocore/data/codedeploy/2014-10-06/service-2.json b/botocore/data/codedeploy/2014-10-06/service-2.json index b9fcceec..37166796 100644 --- a/botocore/data/codedeploy/2014-10-06/service-2.json +++ b/botocore/data/codedeploy/2014-10-06/service-2.json @@ -240,7 +240,10 @@ {"shape":"InvalidAutoRollbackConfigException"}, {"shape":"InvalidLoadBalancerInfoException"}, {"shape":"InvalidDeploymentStyleException"}, - {"shape":"InvalidBlueGreenDeploymentConfigurationException"} + {"shape":"InvalidBlueGreenDeploymentConfigurationException"}, + {"shape":"InvalidEC2TagCombinationException"}, + {"shape":"InvalidOnPremisesTagCombinationException"}, + {"shape":"TagSetListLimitExceededException"} ], "documentation":"

Creates a deployment group to which application revisions will be deployed.

" }, @@ -683,7 +686,10 @@ {"shape":"InvalidAutoRollbackConfigException"}, {"shape":"InvalidLoadBalancerInfoException"}, {"shape":"InvalidDeploymentStyleException"}, - {"shape":"InvalidBlueGreenDeploymentConfigurationException"} + {"shape":"InvalidBlueGreenDeploymentConfigurationException"}, + {"shape":"InvalidEC2TagCombinationException"}, + {"shape":"InvalidOnPremisesTagCombinationException"}, + {"shape":"TagSetListLimitExceededException"} ], "documentation":"

Changes information about a deployment group.

" } @@ -1122,7 +1128,10 @@ }, "CreateDeploymentConfigInput":{ "type":"structure", - "required":["deploymentConfigName"], + "required":[ + "deploymentConfigName", + "minimumHealthyHosts" + ], "members":{ "deploymentConfigName":{ "shape":"DeploymentConfigName", @@ -1167,11 +1176,11 @@ }, "ec2TagFilters":{ "shape":"EC2TagFilterList", - "documentation":"

The Amazon EC2 tags on which to filter. The deployment group will include EC2 instances with any of the specified tags.

" + "documentation":"

The Amazon EC2 tags on which to filter. The deployment group will include EC2 instances with any of the specified tags. Cannot be used in the same call as ec2TagSet.

" }, "onPremisesInstanceTagFilters":{ "shape":"TagFilterList", - "documentation":"

The on-premises instance tags on which to filter. The deployment group will include on-premises instances with any of the specified tags.

" + "documentation":"

The on-premises instance tags on which to filter. The deployment group will include on-premises instances with any of the specified tags. Cannot be used in the same call as OnPremisesTagSet.

" }, "autoScalingGroups":{ "shape":"AutoScalingGroupNameList", @@ -1204,6 +1213,14 @@ "loadBalancerInfo":{ "shape":"LoadBalancerInfo", "documentation":"

Information about the load balancer used in a deployment.

" + }, + "ec2TagSet":{ + "shape":"EC2TagSet", + "documentation":"

Information about groups of tags applied to EC2 instances. The deployment group will include only EC2 instances identified by all the tag groups. Cannot be used in the same call as ec2TagFilters.

" + }, + "onPremisesTagSet":{ + "shape":"OnPremisesTagSet", + "documentation":"

Information about groups of tags applied to on-premises instances. The deployment group will include only on-premises instances identified by all the tag groups. Cannot be used in the same call as onPremisesInstanceTagFilters.

" } }, "documentation":"

Represents the input of a CreateDeploymentGroup operation.

" @@ -1450,11 +1467,11 @@ }, "ec2TagFilters":{ "shape":"EC2TagFilterList", - "documentation":"

The Amazon EC2 tags on which to filter.

" + "documentation":"

The Amazon EC2 tags on which to filter. The deployment group includes EC2 instances with any of the specified tags.

" }, "onPremisesInstanceTagFilters":{ "shape":"TagFilterList", - "documentation":"

The on-premises instance tags on which to filter.

" + "documentation":"

The on-premises instance tags on which to filter. The deployment group includes on-premises instances with any of the specified tags.

" }, "autoScalingGroups":{ "shape":"AutoScalingGroupList", @@ -1499,6 +1516,14 @@ "lastAttemptedDeployment":{ "shape":"LastDeploymentInfo", "documentation":"

Information about the most recent attempted deployment to the deployment group.

" + }, + "ec2TagSet":{ + "shape":"EC2TagSet", + "documentation":"

Information about groups of tags applied to an EC2 instance. The deployment group includes only EC2 instances identified by all the tag groups. Cannot be used in the same call as ec2TagFilters.

" + }, + "onPremisesTagSet":{ + "shape":"OnPremisesTagSet", + "documentation":"

Information about groups of tags applied to an on-premises instance. The deployment group includes only on-premises instances identified by all the tag groups. Cannot be used in the same call as onPremisesInstanceTagFilters.

" } }, "documentation":"

Information about a deployment group.

" @@ -1840,15 +1865,29 @@ "KEY_AND_VALUE" ] }, + "EC2TagSet":{ + "type":"structure", + "members":{ + "ec2TagSetList":{ + "shape":"EC2TagSetList", + "documentation":"

A list containing other lists of EC2 instance tag groups. In order for an instance to be included in the deployment group, it must be identified by all the tag groups in the list.

" + } + }, + "documentation":"

Information about groups of EC2 instance tags.

" + }, + "EC2TagSetList":{ + "type":"list", + "member":{"shape":"EC2TagFilterList"} + }, "ELBInfo":{ "type":"structure", "members":{ "name":{ "shape":"ELBName", - "documentation":"

For blue/green deployments, the name of the load balancer that will be used to route traffic from original instances to replacement instances in a blue/green deployment. For in-place deployments, the name of the load balancer that instances are deregistered from so they are not serving traffic during a deployment, and then re-registered with after the deployment completes.

" + "documentation":"

For blue/green deployments, the name of the load balancer that will be used to route traffic from original instances to replacement instances in a blue/green deployment. For in-place deployments, the name of the load balancer that instances are deregistered from, so they are not serving traffic during a deployment, and then re-registered with after the deployment completes.

" } }, - "documentation":"

Information about a load balancer in Elastic Load Balancing to use in a deployment.

" + "documentation":"

Information about a load balancer in Elastic Load Balancing to use in a deployment. Instances are registered directly with a load balancer, and traffic is routed to the load balancer.

" }, "ELBInfoList":{ "type":"list", @@ -2428,6 +2467,13 @@ "documentation":"

An invalid deployment style was specified. Valid deployment types include \"IN_PLACE\" and \"BLUE_GREEN\". Valid deployment options include \"WITH_TRAFFIC_CONTROL\" and \"WITHOUT_TRAFFIC_CONTROL\".

", "exception":true }, + "InvalidEC2TagCombinationException":{ + "type":"structure", + "members":{ + }, + "documentation":"

A call was submitted that specified both Ec2TagFilters and Ec2TagSet, but only one of these data types can be used in a single call.

", + "exception":true + }, "InvalidEC2TagException":{ "type":"structure", "members":{ @@ -2505,6 +2551,13 @@ "documentation":"

The next token was specified in an invalid format.

", "exception":true }, + "InvalidOnPremisesTagCombinationException":{ + "type":"structure", + "members":{ + }, + "documentation":"

A call was submitted that specified both OnPremisesTagFilters and OnPremisesTagSet, but only one of these data types can be used in a single call.

", + "exception":true + }, "InvalidOperationException":{ "type":"structure", "members":{ @@ -2942,10 +2995,14 @@ "members":{ "elbInfoList":{ "shape":"ELBInfoList", - "documentation":"

An array containing information about the load balancer in Elastic Load Balancing to use in a deployment.

" + "documentation":"

An array containing information about the load balancer to use for load balancing in a deployment. In Elastic Load Balancing, load balancers are used with Classic Load Balancers.

" + }, + "targetGroupInfoList":{ + "shape":"TargetGroupInfoList", + "documentation":"

An array containing information about the target group to use for load balancing in a deployment. In Elastic Load Balancing, target groups are used with Application Load Balancers.

" } }, - "documentation":"

Information about the load balancer used in a deployment.

" + "documentation":"

Information about the Elastic Load Balancing load balancer or target group used in a deployment.

" }, "LogTail":{"type":"string"}, "Message":{"type":"string"}, @@ -2980,6 +3037,20 @@ }, "NextToken":{"type":"string"}, "NullableBoolean":{"type":"boolean"}, + "OnPremisesTagSet":{ + "type":"structure", + "members":{ + "onPremisesTagSetList":{ + "shape":"OnPremisesTagSetList", + "documentation":"

A list containing other lists of on-premises instance tag groups. In order for an instance to be included in the deployment group, it must be identified by all the tag groups in the list.

" + } + }, + "documentation":"

Information about groups of on-premises instance tags.

" + }, + "OnPremisesTagSetList":{ + "type":"list", + "member":{"shape":"TagFilterList"} + }, "RegisterApplicationRevisionInput":{ "type":"structure", "required":[ @@ -3284,16 +3355,42 @@ "documentation":"

A tag was not specified.

", "exception":true }, + "TagSetListLimitExceededException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The number of tag groups included in the tag set list exceeded the maximum allowed limit of 3.

", + "exception":true + }, + "TargetGroupInfo":{ + "type":"structure", + "members":{ + "name":{ + "shape":"TargetGroupName", + "documentation":"

For blue/green deployments, the name of the target group that instances in the original environment are deregistered from, and instances in the replacement environment registered with. For in-place deployments, the name of the target group that instances are deregistered from, so they are not serving traffic during a deployment, and then re-registered with after the deployment completes.

" + } + }, + "documentation":"

Information about a target group in Elastic Load Balancing to use in a deployment. Instances are registered as targets in a target group, and traffic is routed to the target group.

" + }, + "TargetGroupInfoList":{ + "type":"list", + "member":{"shape":"TargetGroupInfo"} + }, + "TargetGroupName":{"type":"string"}, "TargetInstances":{ "type":"structure", "members":{ "tagFilters":{ "shape":"EC2TagFilterList", - "documentation":"

The tag filter key, type, and value used to identify Amazon EC2 instances in a replacement environment for a blue/green deployment.

" + "documentation":"

The tag filter key, type, and value used to identify Amazon EC2 instances in a replacement environment for a blue/green deployment. Cannot be used in the same call as ec2TagSet.

" }, "autoScalingGroups":{ "shape":"AutoScalingGroupNameList", "documentation":"

The names of one or more Auto Scaling groups to identify a replacement environment for a blue/green deployment.

" + }, + "ec2TagSet":{ + "shape":"EC2TagSet", + "documentation":"

Information about the groups of EC2 instance tags that an instance must be identified by in order for it to be included in the replacement environment for a blue/green deployment. Cannot be used in the same call as tagFilters.

" } }, "documentation":"

Information about the instances to be used in the replacement environment in a blue/green deployment.

" @@ -3446,6 +3543,14 @@ "loadBalancerInfo":{ "shape":"LoadBalancerInfo", "documentation":"

Information about the load balancer used in a deployment.

" + }, + "ec2TagSet":{ + "shape":"EC2TagSet", + "documentation":"

Information about groups of tags applied to on-premises instances. The deployment group will include only EC2 instances identified by all the tag groups.

" + }, + "onPremisesTagSet":{ + "shape":"OnPremisesTagSet", + "documentation":"

Information about an on-premises instance tag set. The deployment group will include only on-premises instances identified by all the tag groups.

" } }, "documentation":"

Represents the input of an UpdateDeploymentGroup operation.

" diff --git a/botocore/data/cognito-idp/2016-04-18/service-2.json b/botocore/data/cognito-idp/2016-04-18/service-2.json index 154cef31..37e9a069 100644 --- a/botocore/data/cognito-idp/2016-04-18/service-2.json +++ b/botocore/data/cognito-idp/2016-04-18/service-2.json @@ -132,6 +132,25 @@ ], "documentation":"

Deletes the user attributes in a user pool as an administrator. Works on any user.

Requires developer credentials.

" }, + "AdminDisableProviderForUser":{ + "name":"AdminDisableProviderForUser", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AdminDisableProviderForUserRequest"}, + "output":{"shape":"AdminDisableProviderForUserResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"UserNotFoundException"}, + {"shape":"AliasExistsException"}, + {"shape":"InternalErrorException"} + ], + "documentation":"

Disables the user from signing in with the specified external (SAML or social) identity provider. If the user to disable is a Cognito User Pools native username + password user, they are not permitted to use their password to sign-in. If the user to disable is a linked external IdP user, any link between that user and an existing user is removed. The next time the external user (no longer attached to the previously linked DestinationUser) signs in, they must create a new user account. See AdminLinkProviderForUser.

This action is enabled only for admin access and requires developer credentials.

The ProviderName must match the value specified when creating an IdP for the pool.

To disable a native username + password user, the ProviderName value must be Cognito and the ProviderAttributeName must be Cognito_Subject, with the ProviderAttributeValue being the name that is used in the user pool for the user.

The ProviderAttributeName must always be Cognito_Subject for social identity providers. The ProviderAttributeValue must always be the exact subject that was used when the user was originally linked as a source user.

For de-linking a SAML identity, there are two scenarios. If the linked identity has not yet been used to sign-in, the ProviderAttributeName and ProviderAttributeValue must be the same values that were used for the SourceUser when the identities were originally linked in the AdminLinkProviderForUser call. (If the linking was done with ProviderAttributeName set to Cognito_Subject, the same applies here). However, if the user has already signed in, the ProviderAttributeName must be Cognito_Subject and ProviderAttributeValue must be the subject of the SAML assertion.

" + }, "AdminDisableUser":{ "name":"AdminDisableUser", "http":{ @@ -249,6 +268,25 @@ ], "documentation":"

Initiates the authentication flow, as an administrator.

Requires developer credentials.

" }, + "AdminLinkProviderForUser":{ + "name":"AdminLinkProviderForUser", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AdminLinkProviderForUserRequest"}, + "output":{"shape":"AdminLinkProviderForUserResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"UserNotFoundException"}, + {"shape":"AliasExistsException"}, + {"shape":"InternalErrorException"} + ], + "documentation":"

Links an existing user account in a user pool (DestinationUser) to an identity from an external identity provider (SourceUser) based on a specified attribute name and value from the external identity provider. This allows you to create a link from the existing user account to an external federated user identity that has not yet been used to sign in, so that the federated user identity can be used to sign in as the existing user account.

For example, if there is an existing user with a username and password, this API links that user to a federated user identity, so that when the federated user identity is used, the user signs in as the existing user account.

Because this API allows a user with an external federated identity to sign in as an existing user in the user pool, it is critical that it only be used with external identity providers and provider attributes that have been trusted by the application owner.

See also AdminDisableProviderForUser.

This action is enabled only for admin access and requires developer credentials.

" + }, "AdminListDevices":{ "name":"AdminListDevices", "http":{ @@ -574,6 +612,24 @@ ], "documentation":"

Creates an identity provider for a user pool.

" }, + "CreateResourceServer":{ + "name":"CreateResourceServer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateResourceServerRequest"}, + "output":{"shape":"CreateResourceServerResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalErrorException"} + ], + "documentation":"

Creates a new OAuth2.0 resource server and defines custom scopes in it.

" + }, "CreateUserImportJob":{ "name":"CreateUserImportJob", "http":{ @@ -683,6 +739,22 @@ ], "documentation":"

Deletes an identity provider for a user pool.

" }, + "DeleteResourceServer":{ + "name":"DeleteResourceServer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteResourceServerRequest"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ], + "documentation":"

Deletes a resource server.

" + }, "DeleteUser":{ "name":"DeleteUser", "http":{ @@ -700,7 +772,7 @@ {"shape":"UserNotConfirmedException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Allows a user to delete one's self.

", + "documentation":"

Allows a user to delete himself or herself.

", "authtype":"none" }, "DeleteUserAttributes":{ @@ -790,6 +862,23 @@ ], "documentation":"

Gets information about a specific identity provider.

" }, + "DescribeResourceServer":{ + "name":"DescribeResourceServer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeResourceServerRequest"}, + "output":{"shape":"DescribeResourceServerResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ], + "documentation":"

Describes a resource server.

" + }, "DescribeUserImportJob":{ "name":"DescribeUserImportJob", "http":{ @@ -978,6 +1067,23 @@ ], "documentation":"

Gets the specified identity provider.

" }, + "GetUICustomization":{ + "name":"GetUICustomization", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetUICustomizationRequest"}, + "output":{"shape":"GetUICustomizationResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ], + "documentation":"

Gets the UI Customization information for a particular app client's app UI, if there is something set. If nothing is set for the particular client, but there is an existing pool level customization (app clientId will be ALL), then that is returned. If nothing is present, then an empty shape is returned.

" + }, "GetUser":{ "name":"GetUser", "http":{ @@ -1126,6 +1232,23 @@ ], "documentation":"

Lists information about all identity providers for a user pool.

" }, + "ListResourceServers":{ + "name":"ListResourceServers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListResourceServersRequest"}, + "output":{"shape":"ListResourceServersResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ], + "documentation":"

Lists the resource servers for a user pool.

" + }, "ListUserImportJobs":{ "name":"ListUserImportJobs", "http":{ @@ -1268,6 +1391,23 @@ ], "documentation":"

Responds to the authentication challenge.

" }, + "SetUICustomization":{ + "name":"SetUICustomization", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetUICustomizationRequest"}, + "output":{"shape":"SetUICustomizationResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ], + "documentation":"

Sets the UI customization information for a user pool's built-in app UI.

You can specify app UI customization settings for a single client (with a specific clientId) or for all clients (by setting the clientId to ALL). If you specify ALL, the default configuration will be used for every client that has no UI customization set previously. If you specify UI customization settings for a particular client, it will no longer fall back to the ALL configuration.

To use this API, your user pool must have a domain associated with it. Otherwise, there is no place to host the app's pages, and the service will throw an error.

" + }, "SetUserSettings":{ "name":"SetUserSettings", "http":{ @@ -1407,6 +1547,23 @@ ], "documentation":"

Updates identity provider information for a user pool.

" }, + "UpdateResourceServer":{ + "name":"UpdateResourceServer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateResourceServerRequest"}, + "output":{"shape":"UpdateResourceServerResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalErrorException"} + ], + "documentation":"

Updates the name and scopes of resource server. All other fields are read-only.

" + }, "UpdateUserAttributes":{ "name":"UpdateUserAttributes", "http":{ @@ -1642,7 +1799,7 @@ "members":{ "User":{ "shape":"UserType", - "documentation":"

The user returned in the request to create a new user.

" + "documentation":"

The newly created user.

" } }, "documentation":"

Represents the response from the server to the request to create the user.

" @@ -1699,6 +1856,28 @@ }, "documentation":"

Represents the request to delete a user as an administrator.

" }, + "AdminDisableProviderForUserRequest":{ + "type":"structure", + "required":[ + "UserPoolId", + "User" + ], + "members":{ + "UserPoolId":{ + "shape":"StringType", + "documentation":"

The user pool ID for the user pool.

" + }, + "User":{ + "shape":"ProviderUserIdentifierType", + "documentation":"

The user to be disabled.

" + } + } + }, + "AdminDisableProviderForUserResponse":{ + "type":"structure", + "members":{ + } + }, "AdminDisableUserRequest":{ "type":"structure", "required":[ @@ -1910,6 +2089,33 @@ }, "documentation":"

Initiates the authentication response, as an administrator.

" }, + "AdminLinkProviderForUserRequest":{ + "type":"structure", + "required":[ + "UserPoolId", + "DestinationUser", + "SourceUser" + ], + "members":{ + "UserPoolId":{ + "shape":"StringType", + "documentation":"

The user pool ID for the user pool.

" + }, + "DestinationUser":{ + "shape":"ProviderUserIdentifierType", + "documentation":"

The existing user in the user pool to be linked to the external identity provider user account. Can be a native (Username + Password) Cognito User Pools user or a federated user (for example, a SAML or Facebook user). If the user doesn't exist, an exception is thrown. This is the user that is returned when the new user (with the linked identity provider attribute) signs in.

The ProviderAttributeValue for the DestinationUser must match the username for the user in the user pool. The ProviderAttributeName will always be ignored.

" + }, + "SourceUser":{ + "shape":"ProviderUserIdentifierType", + "documentation":"

An external identity provider account for a user who does not currently exist yet in the user pool. This user must be a federated user (for example, a SAML or Facebook user), not another native user.

If the SourceUser is a federated social identity provider user (Facebook, Google, or Login with Amazon), you must set the ProviderAttributeName to Cognito_Subject. For social identity providers, the ProviderName will be Facebook, Google, or LoginWithAmazon, and Cognito will automatically parse the Facebook, Google, and Login with Amazon tokens for id, sub, and user_id, respectively. The ProviderAttributeValue for the user must be the same value as the id, sub, or user_id value found in the social identity provider token.

For SAML, the ProviderAttributeName can be any value that matches a claim in the SAML assertion. If you wish to link SAML users based on the subject of the SAML assertion, you should map the subject to a claim through the SAML identity provider and submit that claim name as the ProviderAttributeName. If you set ProviderAttributeName to Cognito_Subject, Cognito will automatically parse the default unique identifier found in the subject from the SAML token.

" + } + } + }, + "AdminLinkProviderForUserResponse":{ + "type":"structure", + "members":{ + } + }, "AdminListDevicesRequest":{ "type":"structure", "required":[ @@ -2244,9 +2450,14 @@ "type":"list", "member":{"shape":"AttributeType"} }, + "AttributeMappingKeyType":{ + "type":"string", + "max":32, + "min":1 + }, "AttributeMappingType":{ "type":"map", - "key":{"shape":"CustomAttributeNameType"}, + "key":{"shape":"AttributeMappingKeyType"}, "value":{"shape":"StringType"} }, "AttributeNameListType":{ @@ -2325,6 +2536,8 @@ "documentation":"

The result type of the authentication result.

" }, "BooleanType":{"type":"boolean"}, + "CSSType":{"type":"string"}, + "CSSVersionType":{"type":"string"}, "CallbackURLsListType":{ "type":"list", "member":{"shape":"RedirectUrlType"}, @@ -2524,7 +2737,7 @@ "members":{ "ClientId":{ "shape":"ClientIdType", - "documentation":"

The ID of the client associated with the user pool.

" + "documentation":"

The app client ID of the app associated with the user pool.

" }, "SecretHash":{ "shape":"SecretHashType", @@ -2561,7 +2774,7 @@ "members":{ "ClientId":{ "shape":"ClientIdType", - "documentation":"

The ID of the client associated with the user pool.

" + "documentation":"

The ID of the app client associated with the user pool.

" }, "SecretHash":{ "shape":"SecretHashType", @@ -2646,7 +2859,7 @@ "documentation":"

The user pool ID.

" }, "ProviderName":{ - "shape":"ProviderNameType", + "shape":"ProviderNameTypeV1", "documentation":"

The identity provider name.

" }, "ProviderType":{ @@ -2677,6 +2890,42 @@ } } }, + "CreateResourceServerRequest":{ + "type":"structure", + "required":[ + "UserPoolId", + "Identifier", + "Name" + ], + "members":{ + "UserPoolId":{ + "shape":"UserPoolIdType", + "documentation":"

The user pool ID for the user pool.

" + }, + "Identifier":{ + "shape":"ResourceServerIdentifierType", + "documentation":"

A unique resource server identifier for the resource server. This could be an HTTPS endpoint where the resource server is located. For example, https://my-weather-api.example.com.

" + }, + "Name":{ + "shape":"ResourceServerNameType", + "documentation":"

A friendly name for the resource server.

" + }, + "Scopes":{ + "shape":"ResourceServerScopeListType", + "documentation":"

A list of scopes. Each scope is map, where the keys are name and description.

" + } + } + }, + "CreateResourceServerResponse":{ + "type":"structure", + "required":["ResourceServer"], + "members":{ + "ResourceServer":{ + "shape":"ResourceServerType", + "documentation":"

The newly created resource server.

" + } + } + }, "CreateUserImportJobRequest":{ "type":"structure", "required":[ @@ -2848,6 +3097,10 @@ "shape":"EmailVerificationSubjectType", "documentation":"

A string representing the email verification subject.

" }, + "VerificationMessageTemplate":{ + "shape":"VerificationMessageTemplateType", + "documentation":"

The template for the verification message that the user sees when the app requests permission to access the user's information.

" + }, "SmsAuthenticationMessage":{ "shape":"SmsVerificationMessageType", "documentation":"

A string representing the SMS authentication message.

" @@ -2906,6 +3159,13 @@ "min":1 }, "DateType":{"type":"timestamp"}, + "DefaultEmailOptionType":{ + "type":"string", + "enum":[ + "CONFIRM_WITH_LINK", + "CONFIRM_WITH_CODE" + ] + }, "DeleteGroupRequest":{ "type":"structure", "required":[ @@ -2940,6 +3200,23 @@ } } }, + "DeleteResourceServerRequest":{ + "type":"structure", + "required":[ + "UserPoolId", + "Identifier" + ], + "members":{ + "UserPoolId":{ + "shape":"UserPoolIdType", + "documentation":"

The user pool ID for the user pool that hosts the resource server.

" + }, + "Identifier":{ + "shape":"ResourceServerIdentifierType", + "documentation":"

The identifier for the resource server.

" + } + } + }, "DeleteUserAttributesRequest":{ "type":"structure", "required":[ @@ -2977,7 +3254,7 @@ }, "ClientId":{ "shape":"ClientIdType", - "documentation":"

The ID of the client associated with the user pool.

" + "documentation":"

The app client ID of the app associated with the user pool.

" } }, "documentation":"

Represents the request to delete a user pool client.

" @@ -3064,6 +3341,33 @@ } } }, + "DescribeResourceServerRequest":{ + "type":"structure", + "required":[ + "UserPoolId", + "Identifier" + ], + "members":{ + "UserPoolId":{ + "shape":"UserPoolIdType", + "documentation":"

The user pool ID for the user pool that hosts the resource server.

" + }, + "Identifier":{ + "shape":"ResourceServerIdentifierType", + "documentation":"

The identifier for the resource server

" + } + } + }, + "DescribeResourceServerResponse":{ + "type":"structure", + "required":["ResourceServer"], + "members":{ + "ResourceServer":{ + "shape":"ResourceServerType", + "documentation":"

The resource server.

" + } + } + }, "DescribeUserImportJobRequest":{ "type":"structure", "required":[ @@ -3105,7 +3409,7 @@ }, "ClientId":{ "shape":"ClientIdType", - "documentation":"

The ID of the client associated with the user pool.

" + "documentation":"

The app client ID of the app associated with the user pool.

" } }, "documentation":"

Represents the request to describe a user pool client.

" @@ -3280,13 +3584,15 @@ "CREATING", "DELETING", "UPDATING", - "ACTIVE" + "ACTIVE", + "FAILED" ] }, "DomainType":{ "type":"string", - "max":1024, - "min":1 + "max":63, + "min":1, + "pattern":"^[a-z0-9](?:[a-z0-9\\-]{0,61}[a-z0-9])?$" }, "DomainVersionType":{ "type":"string", @@ -3319,12 +3625,24 @@ }, "documentation":"

The email configuration type.

" }, + "EmailVerificationMessageByLinkType":{ + "type":"string", + "max":20000, + "min":6, + "pattern":"[\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}\\s*]*\\{##[\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}\\s*]*##\\}[\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}\\s*]*" + }, "EmailVerificationMessageType":{ "type":"string", "max":20000, "min":6, "pattern":"[\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}\\s*]*\\{####\\}[\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}\\s*]*" }, + "EmailVerificationSubjectByLinkType":{ + "type":"string", + "max":140, + "min":1, + "pattern":"[\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}\\s]+" + }, "EmailVerificationSubjectType":{ "type":"string", "max":140, @@ -3506,6 +3824,30 @@ } } }, + "GetUICustomizationRequest":{ + "type":"structure", + "required":["UserPoolId"], + "members":{ + "UserPoolId":{ + "shape":"UserPoolIdType", + "documentation":"

The user pool ID for the user pool.

" + }, + "ClientId":{ + "shape":"ClientIdType", + "documentation":"

The client ID for the client app.

" + } + } + }, + "GetUICustomizationResponse":{ + "type":"structure", + "required":["UICustomization"], + "members":{ + "UICustomization":{ + "shape":"UICustomizationType", + "documentation":"

The UI customization information.

" + } + } + }, "GetUserAttributeVerificationCodeRequest":{ "type":"structure", "required":[ @@ -3676,7 +4018,12 @@ }, "IdentityProviderTypeType":{ "type":"string", - "enum":["SAML"] + "enum":[ + "SAML", + "Facebook", + "Google", + "LoginWithAmazon" + ] }, "IdpIdentifierType":{ "type":"string", @@ -3690,6 +4037,8 @@ "max":50, "min":0 }, + "ImageFileType":{"type":"blob"}, + "ImageUrlType":{"type":"string"}, "InitiateAuthRequest":{ "type":"structure", "required":[ @@ -3990,6 +4339,43 @@ "max":60, "min":1 }, + "ListResourceServersLimitType":{ + "type":"integer", + "max":50, + "min":1 + }, + "ListResourceServersRequest":{ + "type":"structure", + "required":["UserPoolId"], + "members":{ + "UserPoolId":{ + "shape":"UserPoolIdType", + "documentation":"

The user pool ID for the user pool.

" + }, + "MaxResults":{ + "shape":"ListResourceServersLimitType", + "documentation":"

The maximum number of resource servers to return.

" + }, + "NextToken":{ + "shape":"PaginationKeyType", + "documentation":"

A pagination token.

" + } + } + }, + "ListResourceServersResponse":{ + "type":"structure", + "required":["ResourceServers"], + "members":{ + "ResourceServers":{ + "shape":"ResourceServersListType", + "documentation":"

The resource servers.

" + }, + "NextToken":{ + "shape":"PaginationKeyType", + "documentation":"

A pagination token.

" + } + } + }, "ListUserImportJobsRequest":{ "type":"structure", "required":[ @@ -4399,6 +4785,30 @@ "min":1, "pattern":"[\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}]+" }, + "ProviderNameTypeV1":{ + "type":"string", + "max":32, + "min":1, + "pattern":"[^_][\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}][^_]+" + }, + "ProviderUserIdentifierType":{ + "type":"structure", + "members":{ + "ProviderName":{ + "shape":"ProviderNameType", + "documentation":"

The name of the provider, for example, Facebook, Google, or Login with Amazon.

" + }, + "ProviderAttributeName":{ + "shape":"StringType", + "documentation":"

The name of the provider attribute to link to, for example, NameID.

" + }, + "ProviderAttributeValue":{ + "shape":"StringType", + "documentation":"

The value of the provider attribute to link to, for example, xxxxx_account.

" + } + }, + "documentation":"

A container for information about an identity provider for a user pool.

" + }, "ProvidersListType":{ "type":"list", "member":{"shape":"ProviderDescription"}, @@ -4469,6 +4879,78 @@ "documentation":"

This exception is thrown when the Amazon Cognito service cannot find the requested resource.

", "exception":true }, + "ResourceServerIdentifierType":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[\\x21\\x23-\\x5B\\x5D-\\x7E]+" + }, + "ResourceServerNameType":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[\\w\\s+=,.@-]+" + }, + "ResourceServerScopeDescriptionType":{ + "type":"string", + "max":256, + "min":1 + }, + "ResourceServerScopeListType":{ + "type":"list", + "member":{"shape":"ResourceServerScopeType"}, + "max":25 + }, + "ResourceServerScopeNameType":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[\\x21\\x23-\\x2E\\x30-\\x5B\\x5D-\\x7E]+" + }, + "ResourceServerScopeType":{ + "type":"structure", + "required":[ + "ScopeName", + "ScopeDescription" + ], + "members":{ + "ScopeName":{ + "shape":"ResourceServerScopeNameType", + "documentation":"

The name of the scope.

" + }, + "ScopeDescription":{ + "shape":"ResourceServerScopeDescriptionType", + "documentation":"

A description of the scope.

" + } + }, + "documentation":"

A resource server scope.

" + }, + "ResourceServerType":{ + "type":"structure", + "members":{ + "UserPoolId":{ + "shape":"UserPoolIdType", + "documentation":"

The user pool ID for the user pool that hosts the resource server.

" + }, + "Identifier":{ + "shape":"ResourceServerIdentifierType", + "documentation":"

The identifier for the resource server.

" + }, + "Name":{ + "shape":"ResourceServerNameType", + "documentation":"

The name of the resource server.

" + }, + "Scopes":{ + "shape":"ResourceServerScopeListType", + "documentation":"

A list of scopes that are defined for the resource server.

" + } + }, + "documentation":"

A container for information about a resource server for a user pool.

" + }, + "ResourceServersListType":{ + "type":"list", + "member":{"shape":"ResourceServerType"} + }, "RespondToAuthChallengeRequest":{ "type":"structure", "required":[ @@ -4576,7 +5058,8 @@ }, "ScopeListType":{ "type":"list", - "member":{"shape":"ScopeType"} + "member":{"shape":"ScopeType"}, + "max":25 }, "ScopeType":{ "type":"string", @@ -4605,6 +5088,38 @@ "max":2048, "min":20 }, + "SetUICustomizationRequest":{ + "type":"structure", + "required":["UserPoolId"], + "members":{ + "UserPoolId":{ + "shape":"UserPoolIdType", + "documentation":"

The user pool ID for the user pool.

" + }, + "ClientId":{ + "shape":"ClientIdType", + "documentation":"

The client ID for the client app.

" + }, + "CSS":{ + "shape":"CSSType", + "documentation":"

The CSS values in the UI customization.

" + }, + "ImageFile":{ + "shape":"ImageFileType", + "documentation":"

The uploaded logo image for the UI customization.

" + } + } + }, + "SetUICustomizationResponse":{ + "type":"structure", + "required":["UICustomization"], + "members":{ + "UICustomization":{ + "shape":"UICustomizationType", + "documentation":"

The UI customization information.

" + } + } + }, "SetUserSettingsRequest":{ "type":"structure", "required":[ @@ -4816,6 +5331,40 @@ "documentation":"

This exception is thrown when the user has made too many requests for a given operation.

", "exception":true }, + "UICustomizationType":{ + "type":"structure", + "members":{ + "UserPoolId":{ + "shape":"UserPoolIdType", + "documentation":"

The user pool ID for the user pool.

" + }, + "ClientId":{ + "shape":"ClientIdType", + "documentation":"

The client ID for the client app.

" + }, + "ImageUrl":{ + "shape":"ImageUrlType", + "documentation":"

The logo image for the UI customization.

" + }, + "CSS":{ + "shape":"CSSType", + "documentation":"

The CSS values in the UI customization.

" + }, + "CSSVersion":{ + "shape":"CSSVersionType", + "documentation":"

The CSS version number.

" + }, + "LastModifiedDate":{ + "shape":"DateType", + "documentation":"

The last-modified date for the UI customization.

" + }, + "CreationDate":{ + "shape":"DateType", + "documentation":"

The creation date for the UI customization.

" + } + }, + "documentation":"

A container for the UI customization information for a user pool's built-in app UI.

" + }, "UnexpectedLambdaException":{ "type":"structure", "members":{ @@ -4951,6 +5500,42 @@ } } }, + "UpdateResourceServerRequest":{ + "type":"structure", + "required":[ + "UserPoolId", + "Identifier", + "Name" + ], + "members":{ + "UserPoolId":{ + "shape":"UserPoolIdType", + "documentation":"

The user pool ID for the user pool.

" + }, + "Identifier":{ + "shape":"ResourceServerIdentifierType", + "documentation":"

The identifier for the resource server.

" + }, + "Name":{ + "shape":"ResourceServerNameType", + "documentation":"

The name of the resource server.

" + }, + "Scopes":{ + "shape":"ResourceServerScopeListType", + "documentation":"

The scope values to be set for the resource server.

" + } + } + }, + "UpdateResourceServerResponse":{ + "type":"structure", + "required":["ResourceServer"], + "members":{ + "ResourceServer":{ + "shape":"ResourceServerType", + "documentation":"

The resource server.

" + } + } + }, "UpdateUserAttributesRequest":{ "type":"structure", "required":[ @@ -5024,7 +5609,7 @@ }, "LogoutURLs":{ "shape":"LogoutURLsListType", - "documentation":"

A list ofallowed logout URLs for the identity providers.

" + "documentation":"

A list of allowed logout URLs for the identity providers.

" }, "DefaultRedirectURI":{ "shape":"RedirectUrlType", @@ -5087,6 +5672,10 @@ "shape":"EmailVerificationSubjectType", "documentation":"

The subject of the email verification message.

" }, + "VerificationMessageTemplate":{ + "shape":"VerificationMessageTemplateType", + "documentation":"

The template for verification messages.

" + }, "SmsAuthenticationMessage":{ "shape":"SmsVerificationMessageType", "documentation":"

The contents of the SMS authentication message.

" @@ -5336,7 +5925,7 @@ }, "LogoutURLs":{ "shape":"LogoutURLsListType", - "documentation":"

A list ofallowed logout URLs for the identity providers.

" + "documentation":"

A list of allowed logout URLs for the identity providers.

" }, "DefaultRedirectURI":{ "shape":"RedirectUrlType", @@ -5356,7 +5945,7 @@ "box":true } }, - "documentation":"

A user pool of the client type.

" + "documentation":"

Contains information about a user pool client.

" }, "UserPoolDescriptionType":{ "type":"structure", @@ -5494,6 +6083,10 @@ "shape":"EmailVerificationSubjectType", "documentation":"

The subject of the email verification message.

" }, + "VerificationMessageTemplate":{ + "shape":"VerificationMessageTemplateType", + "documentation":"

The template for verification messages.

" + }, "SmsAuthenticationMessage":{ "shape":"SmsVerificationMessageType", "documentation":"

The contents of the SMS authentication message.

" @@ -5616,6 +6209,36 @@ "type":"list", "member":{"shape":"UserType"} }, + "VerificationMessageTemplateType":{ + "type":"structure", + "members":{ + "SmsMessage":{ + "shape":"SmsVerificationMessageType", + "documentation":"

The SMS message template.

" + }, + "EmailMessage":{ + "shape":"EmailVerificationMessageType", + "documentation":"

The email message template.

" + }, + "EmailSubject":{ + "shape":"EmailVerificationSubjectType", + "documentation":"

The subject line for the email message template.

" + }, + "EmailMessageByLink":{ + "shape":"EmailVerificationMessageByLinkType", + "documentation":"

The email message template for sending a confirmation link to the user.

" + }, + "EmailSubjectByLink":{ + "shape":"EmailVerificationSubjectByLinkType", + "documentation":"

The subject line for the email message template for sending a confirmation link to the user.

" + }, + "DefaultEmailOption":{ + "shape":"DefaultEmailOptionType", + "documentation":"

The default email option.

" + } + }, + "documentation":"

The template for verification messages.

" + }, "VerifiedAttributeType":{ "type":"string", "enum":[ diff --git a/botocore/data/config/2014-11-12/service-2.json b/botocore/data/config/2014-11-12/service-2.json index 3258b407..bdea5c42 100644 --- a/botocore/data/config/2014-11-12/service-2.json +++ b/botocore/data/config/2014-11-12/service-2.json @@ -239,6 +239,21 @@ ], "documentation":"

Returns the number of resources that are compliant and the number that are noncompliant. You can specify one or more resource types to get these numbers for each resource type. The maximum number returned is 100.

" }, + "GetDiscoveredResourceCounts":{ + "name":"GetDiscoveredResourceCounts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDiscoveredResourceCountsRequest"}, + "output":{"shape":"GetDiscoveredResourceCountsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InvalidLimitException"}, + {"shape":"InvalidNextTokenException"} + ], + "documentation":"

Returns the resource types, the number of each resource type, and the total number of resources that AWS Config is recording in this region for your AWS account.

Example

  1. AWS Config is recording three resource types in the US East (Ohio) Region for your account: 25 EC2 instances, 20 IAM users, and 15 S3 buckets.

  2. You make a call to the GetDiscoveredResourceCounts action and specify that you want all resource types.

  3. AWS Config returns the following:

    • The resource types (EC2 instances, IAM users, and S3 buckets)

    • The number of each resource type (25, 20, and 15)

    • The total number of all resources (60)

The response is paginated. By default, AWS Config lists 100 ResourceCount objects on each page. You can customize this number with the limit parameter. The response includes a nextToken string. To get the next page of results, run the request again and specify the string for the nextToken parameter.

If you make a call to the GetDiscoveredResourceCounts action, you may not immediately receive resource counts in the following situations:

It may take a few minutes for AWS Config to record and count your resources. Wait a few minutes and then retry the GetDiscoveredResourceCounts action.

" + }, "GetResourceConfigHistory":{ "name":"GetResourceConfigHistory", "http":{ @@ -255,7 +270,7 @@ {"shape":"NoAvailableConfigurationRecorderException"}, {"shape":"ResourceNotDiscoveredException"} ], - "documentation":"

Returns a list of configuration items for the specified resource. The list contains details about each state of the resource during the specified time interval.

The response is paginated, and by default, AWS Config returns a limit of 10 configuration items per page. You can customize this number with the limit parameter. The response includes a nextToken string, and to get the next page of results, run the request again and enter this string for the nextToken parameter.

Each call to the API is limited to span a duration of seven days. It is likely that the number of records returned is smaller than the specified limit. In such cases, you can make another call, using the nextToken.

" + "documentation":"

Returns a list of configuration items for the specified resource. The list contains details about each state of the resource during the specified time interval.

The response is paginated. By default, AWS Config returns a limit of 10 configuration items per page. You can customize this number with the limit parameter. The response includes a nextToken string. To get the next page of results, run the request again and specify the string for the nextToken parameter.

Each call to the API is limited to span a duration of seven days. It is likely that the number of records returned is smaller than the specified limit. In such cases, you can make another call, using the nextToken.

" }, "ListDiscoveredResources":{ "name":"ListDiscoveredResources", @@ -271,7 +286,7 @@ {"shape":"InvalidNextTokenException"}, {"shape":"NoAvailableConfigurationRecorderException"} ], - "documentation":"

Accepts a resource type and returns a list of resource identifiers for the resources of that type. A resource identifier includes the resource type, ID, and (if available) the custom resource name. The results consist of resources that AWS Config has discovered, including those that AWS Config is not currently recording. You can narrow the results to include only resources that have specific resource IDs or a resource name.

You can specify either resource IDs or a resource name but not both in the same request.

The response is paginated, and by default AWS Config lists 100 resource identifiers on each page. You can customize this number with the limit parameter. The response includes a nextToken string, and to get the next page of results, run the request again and enter this string for the nextToken parameter.

" + "documentation":"

Accepts a resource type and returns a list of resource identifiers for the resources of that type. A resource identifier includes the resource type, ID, and (if available) the custom resource name. The results consist of resources that AWS Config has discovered, including those that AWS Config is not currently recording. You can narrow the results to include only resources that have specific resource IDs or a resource name.

You can specify either resource IDs or a resource name but not both in the same request.

The response is paginated. By default, AWS Config lists 100 resource identifiers on each page. You can customize this number with the limit parameter. The response includes a nextToken string. To get the next page of results, run the request again and specify the string for the nextToken parameter.

" }, "PutConfigRule":{ "name":"PutConfigRule", @@ -1437,6 +1452,40 @@ }, "documentation":"

" }, + "GetDiscoveredResourceCountsRequest":{ + "type":"structure", + "members":{ + "resourceTypes":{ + "shape":"ResourceTypes", + "documentation":"

The comma-separated list that specifies the resource types that you want the AWS Config to return. For example, (\"AWS::EC2::Instance\", \"AWS::IAM::User\").

If a value for resourceTypes is not specified, AWS Config returns all resource types that AWS Config is recording in the region for your account.

If the configuration recorder is turned off, AWS Config returns an empty list of ResourceCount objects. If the configuration recorder is not recording a specific resource type (for example, S3 buckets), that resource type is not returned in the list of ResourceCount objects.

" + }, + "limit":{ + "shape":"Limit", + "documentation":"

The maximum number of ResourceCount objects returned on each page. The default is 100. You cannot specify a limit greater than 100. If you specify 0, AWS Config uses the default.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

" + } + } + }, + "GetDiscoveredResourceCountsResponse":{ + "type":"structure", + "members":{ + "totalDiscoveredResources":{ + "shape":"Long", + "documentation":"

The total number of resources that AWS Config is recording in the region for your account. If you specify resource types in the request, AWS Config returns only the total number of resources for those resource types.

Example

  1. AWS Config is recording three resource types in the US East (Ohio) Region for your account: 25 EC2 instances, 20 IAM users, and 15 S3 buckets, for a total of 60 resources.

  2. You make a call to the GetDiscoveredResourceCounts action and specify the resource type, \"AWS::EC2::Instances\" in the request.

  3. AWS Config returns 25 for totalDiscoveredResources.

" + }, + "resourceCounts":{ + "shape":"ResourceCounts", + "documentation":"

The list of ResourceCount objects. Each object is listed in descending order by the number of resources.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The string that you use in a subsequent request to get the next page of results in a paginated response.

" + } + } + }, "GetResourceConfigHistoryRequest":{ "type":"structure", "required":[ @@ -1647,6 +1696,7 @@ }, "documentation":"

" }, + "Long":{"type":"long"}, "MaxNumberOfConfigRulesExceededException":{ "type":"structure", "members":{ @@ -1876,6 +1926,24 @@ "member":{"shape":"Relationship"} }, "RelationshipName":{"type":"string"}, + "ResourceCount":{ + "type":"structure", + "members":{ + "resourceType":{ + "shape":"ResourceType", + "documentation":"

The resource type, for example \"AWS::EC2::Instance\".

" + }, + "count":{ + "shape":"Long", + "documentation":"

The number of resources.

" + } + }, + "documentation":"

An object that contains the resource type and the number of resources.

" + }, + "ResourceCounts":{ + "type":"list", + "member":{"shape":"ResourceCount"} + }, "ResourceCreationTime":{"type":"timestamp"}, "ResourceDeletionTime":{"type":"timestamp"}, "ResourceId":{"type":"string"}, @@ -1961,7 +2029,8 @@ "AWS::Redshift::ClusterSecurityGroup", "AWS::Redshift::ClusterSubnetGroup", "AWS::Redshift::EventSubscription", - "AWS::CloudWatch::Alarm" + "AWS::CloudWatch::Alarm", + "AWS::CloudFormation::Stack" ] }, "ResourceTypeList":{ @@ -2014,7 +2083,7 @@ }, "SourceIdentifier":{ "shape":"StringWithCharLimit256", - "documentation":"

For AWS Config managed rules, a predefined identifier from a list. For example, IAM_PASSWORD_POLICY is a managed rule. To reference a managed rule, see Using AWS Managed Config Rules.

For custom rules, the identifier is the Amazon Resource Name (ARN) of the rule's AWS Lambda function, such as arn:aws:lambda:us-east-1:123456789012:function:custom_rule_name.

" + "documentation":"

For AWS Config managed rules, a predefined identifier from a list. For example, IAM_PASSWORD_POLICY is a managed rule. To reference a managed rule, see Using AWS Managed Config Rules.

For custom rules, the identifier is the Amazon Resource Name (ARN) of the rule's AWS Lambda function, such as arn:aws:lambda:us-east-2:123456789012:function:custom_rule_name.

" }, "SourceDetails":{ "shape":"SourceDetails", diff --git a/botocore/data/dynamodb/2012-08-10/service-2.json b/botocore/data/dynamodb/2012-08-10/service-2.json old mode 100755 new mode 100644 index eeeac573..60a87e48 --- a/botocore/data/dynamodb/2012-08-10/service-2.json +++ b/botocore/data/dynamodb/2012-08-10/service-2.json @@ -189,7 +189,7 @@ {"shape":"ItemCollectionSizeLimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

Creates a new item, or replaces an old item with a new item. If an item that has the same primary key as the new item already exists in the specified table, the new item completely replaces the existing item. You can perform a conditional put operation (add a new item if one with the specified primary key doesn't exist), or replace an existing item if it has certain attribute values.

In addition to putting an item, you can also return the item's attribute values in the same operation, using the ReturnValues parameter.

When you add an item, the primary key attribute(s) are the only required attributes. Attribute values cannot be null. String and Binary type attributes must have lengths greater than zero. Set type attributes cannot be empty. Requests with empty values will be rejected with a ValidationException exception.

To prevent a new item from replacing an existing item, use a conditional expression that contains the attribute_not_exists function with the name of the attribute being used as the partition key for the table. Since every record must contain that attribute, the attribute_not_exists function will only succeed if no matching item exists.

For more information about PutItem, see Working with Items in the Amazon DynamoDB Developer Guide.

" + "documentation":"

Creates a new item, or replaces an old item with a new item. If an item that has the same primary key as the new item already exists in the specified table, the new item completely replaces the existing item. You can perform a conditional put operation (add a new item if one with the specified primary key doesn't exist), or replace an existing item if it has certain attribute values. You can return the item's attribute values in the same operation, using the ReturnValues parameter.

This topic provides general information about the PutItem API.

For information on how to call the PutItem API using the AWS SDK in specific languages, see the following:

When you add an item, the primary key attribute(s) are the only required attributes. Attribute values cannot be null. String and Binary type attributes must have lengths greater than zero. Set type attributes cannot be empty. Requests with empty values will be rejected with a ValidationException exception.

To prevent a new item from replacing an existing item, use a conditional expression that contains the attribute_not_exists function with the name of the attribute being used as the partition key for the table. Since every record must contain that attribute, the attribute_not_exists function will only succeed if no matching item exists.

For more information about PutItem, see Working with Items in the Amazon DynamoDB Developer Guide.

" }, "Query":{ "name":"Query", @@ -204,7 +204,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerError"} ], - "documentation":"

A Query operation uses the primary key of a table or a secondary index to directly access items from that table or index.

Use the KeyConditionExpression parameter to provide a specific value for the partition key. The Query operation will return all of the items from the table or index with that partition key value. You can optionally narrow the scope of the Query operation by specifying a sort key value and a comparison operator in KeyConditionExpression. You can use the ScanIndexForward parameter to get results in forward or reverse order, by sort key.

Queries that do not return results consume the minimum number of read capacity units for that type of read operation.

If the total number of items meeting the query criteria exceeds the result set size limit of 1 MB, the query stops and results are returned to the user with the LastEvaluatedKey element to continue the query in a subsequent operation. Unlike a Scan operation, a Query operation never returns both an empty result set and a LastEvaluatedKey value. LastEvaluatedKey is only provided if you have used the Limit parameter, or if the result set exceeds 1 MB (prior to applying a filter).

You can query a table, a local secondary index, or a global secondary index. For a query on a table or on a local secondary index, you can set the ConsistentRead parameter to true and obtain a strongly consistent result. Global secondary indexes support eventually consistent reads only, so do not specify ConsistentRead when querying a global secondary index.

" + "documentation":"

The Query operation finds items based on primary key values. You can query any table or secondary index that has a composite primary key (a partition key and a sort key).

Use the KeyConditionExpression parameter to provide a specific value for the partition key. The Query operation will return all of the items from the table or index with that partition key value. You can optionally narrow the scope of the Query operation by specifying a sort key value and a comparison operator in KeyConditionExpression. To further refine the Query results, you can optionally provide a FilterExpression. A FilterExpression determines which items within the results should be returned to you. All of the other results are discarded.

A Query operation always returns a result set. If no matching items are found, the result set will be empty. Queries that do not return results consume the minimum number of read capacity units for that type of read operation.

DynamoDB calculates the number of read capacity units consumed based on item size, not on the amount of data that is returned to an application. The number of capacity units consumed will be the same whether you request all of the attributes (the default behavior) or just some of them (using a projection expression). The number will also be the same whether or not you use a FilterExpression.

Query results are always sorted by the sort key value. If the data type of the sort key is Number, the results are returned in numeric order; otherwise, the results are returned in order of UTF-8 bytes. By default, the sort order is ascending. To reverse the order, set the ScanIndexForward parameter to false.

A single Query operation will read up to the maximum number of items set (if using the Limit parameter) or a maximum of 1 MB of data and then apply any filtering to the results using FilterExpression. If LastEvaluatedKey is present in the response, you will need to paginate the result set. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide.

FilterExpression is applied after a Query finishes, but before the results are returned. A FilterExpression cannot contain partition key or sort key attributes. You need to specify those attributes in the KeyConditionExpression.

A Query operation can return an empty result set and a LastEvaluatedKey if all the items read for the page of results are filtered out.

You can query a table, a local secondary index, or a global secondary index. For a query on a table or on a local secondary index, you can set the ConsistentRead parameter to true and obtain a strongly consistent result. Global secondary indexes support eventually consistent reads only, so do not specify ConsistentRead when querying a global secondary index.

" }, "Scan":{ "name":"Scan", @@ -219,7 +219,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerError"} ], - "documentation":"

The Scan operation returns one or more items and item attributes by accessing every item in a table or a secondary index. To have DynamoDB return fewer items, you can provide a FilterExpression operation.

If the total number of scanned items exceeds the maximum data set size limit of 1 MB, the scan stops and results are returned to the user as a LastEvaluatedKey value to continue the scan in a subsequent operation. The results also include the number of items exceeding the limit. A scan can result in no table data meeting the filter criteria.

By default, Scan operations proceed sequentially; however, for faster performance on a large table or secondary index, applications can request a parallel Scan operation by providing the Segment and TotalSegments parameters. For more information, see Parallel Scan in the Amazon DynamoDB Developer Guide.

By default, Scan uses eventually consistent reads when accessing the data in a table; therefore, the result set might not include the changes to data in the table immediately before the operation began. If you need a consistent copy of the data, as of the time that the Scan begins, you can set the ConsistentRead parameter to true.

" + "documentation":"

The Scan operation returns one or more items and item attributes by accessing every item in a table or a secondary index. To have DynamoDB return fewer items, you can provide a FilterExpression operation.

If the total number of scanned items exceeds the maximum data set size limit of 1 MB, the scan stops and results are returned to the user as a LastEvaluatedKey value to continue the scan in a subsequent operation. The results also include the number of items exceeding the limit. A scan can result in no table data meeting the filter criteria.

A single Scan operation will read up to the maximum number of items set (if using the Limit parameter) or a maximum of 1 MB of data and then apply any filtering to the results using FilterExpression. If LastEvaluatedKey is present in the response, you will need to paginate the result set. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide.

Scan operations proceed sequentially; however, for faster performance on a large table or secondary index, applications can request a parallel Scan operation by providing the Segment and TotalSegments parameters. For more information, see Parallel Scan in the Amazon DynamoDB Developer Guide.

Scan uses eventually consistent reads when accessing the data in a table; therefore, the result set might not include the changes to data in the table immediately before the operation began. If you need a consistent copy of the data, as of the time that the Scan begins, you can set the ConsistentRead parameter to true.

" }, "TagResource":{ "name":"TagResource", @@ -298,7 +298,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

Specify the lifetime of individual table items. The database automatically removes the item at the expiration of the item. The UpdateTimeToLive method will enable or disable TTL for the specified table. A successful UpdateTimeToLive call returns the current TimeToLiveSpecification; it may take up to one hour for the change to fully process.

TTL compares the current time in epoch time format to the time stored in the TTL attribute of an item. If the epoch time value stored in the attribute is less than the current time, the item is marked as expired and subsequently deleted.

The epoch time format is the number of seconds elapsed since 12:00:00 AM January 1st, 1970 UTC.

DynamoDB deletes expired items on a best-effort basis to ensure availability of throughput for other data operations.

DynamoDB typically deletes expired items within two days of expiration. The exact duration within which an item gets deleted after expiration is specific to the nature of the workload. Items that have expired and not been deleted will still show up in reads, queries, and scans.

As items are deleted, they are removed from any Local Secondary Index and Global Secondary Index immediately in the same eventually consistent way as a standard delete operation.

For more information, see Time To Live in the Amazon DynamoDB Developer Guide.

" + "documentation":"

The UpdateTimeToLive method will enable or disable TTL for the specified table. A successful UpdateTimeToLive call returns the current TimeToLiveSpecification; it may take up to one hour for the change to fully process. Any additional UpdateTimeToLive calls for the same table during this one hour duration result in a ValidationException.

TTL compares the current time in epoch time format to the time stored in the TTL attribute of an item. If the epoch time value stored in the attribute is less than the current time, the item is marked as expired and subsequently deleted.

The epoch time format is the number of seconds elapsed since 12:00:00 AM January 1st, 1970 UTC.

DynamoDB deletes expired items on a best-effort basis to ensure availability of throughput for other data operations.

DynamoDB typically deletes expired items within two days of expiration. The exact duration within which an item gets deleted after expiration is specific to the nature of the workload. Items that have expired and not been deleted will still show up in reads, queries, and scans.

As items are deleted, they are removed from any Local Secondary Index and Global Secondary Index immediately in the same eventually consistent way as a standard delete operation.

For more information, see Time To Live in the Amazon DynamoDB Developer Guide.

" } }, "shapes":{ @@ -464,7 +464,7 @@ "members":{ "RequestItems":{ "shape":"BatchWriteItemRequestMap", - "documentation":"

A map of one or more table names and, for each table, a list of operations to be performed (DeleteRequest or PutRequest). Each element in the map consists of the following:

" + "documentation":"

A map of one or more table names and, for each table, a list of operations to be performed (DeleteRequest or PutRequest). Each element in the map consists of the following:

" }, "ReturnConsumedCapacity":{"shape":"ReturnConsumedCapacity"}, "ReturnItemCollectionMetrics":{ @@ -707,7 +707,7 @@ }, "Expected":{ "shape":"ExpectedAttributeMap", - "documentation":"

This is a legacy parameter. Use ConditionExpresssion instead. For more information, see Expected in the Amazon DynamoDB Developer Guide.

" + "documentation":"

This is a legacy parameter. Use ConditionExpression instead. For more information, see Expected in the Amazon DynamoDB Developer Guide.

" }, "ConditionalOperator":{ "shape":"ConditionalOperator", @@ -1468,7 +1468,7 @@ }, "Expected":{ "shape":"ExpectedAttributeMap", - "documentation":"

This is a legacy parameter. Use ConditionExpresssion instead. For more information, see Expected in the Amazon DynamoDB Developer Guide.

" + "documentation":"

This is a legacy parameter. Use ConditionExpression instead. For more information, see Expected in the Amazon DynamoDB Developer Guide.

" }, "ReturnValues":{ "shape":"ReturnValue", @@ -2082,7 +2082,7 @@ }, "Expected":{ "shape":"ExpectedAttributeMap", - "documentation":"

This is a legacy parameter. Use ConditionExpresssion instead. For more information, see Expected in the Amazon DynamoDB Developer Guide.

" + "documentation":"

This is a legacy parameter. Use ConditionExpression instead. For more information, see Expected in the Amazon DynamoDB Developer Guide.

" }, "ConditionalOperator":{ "shape":"ConditionalOperator", @@ -2090,7 +2090,7 @@ }, "ReturnValues":{ "shape":"ReturnValue", - "documentation":"

Use ReturnValues if you want to get the item attributes as they appeared either before or after they were updated. For UpdateItem, the valid values are:

There is no additional cost associated with requesting a return value aside from the small network and processing overhead of receiving a larger response. No Read Capacity Units are consumed.

Values returned are strongly consistent

" + "documentation":"

Use ReturnValues if you want to get the item attributes as they appear before or after they are updated. For UpdateItem, the valid values are:

There is no additional cost associated with requesting a return value aside from the small network and processing overhead of receiving a larger response. No read capacity units are consumed.

The values returned are strongly consistent.

" }, "ReturnConsumedCapacity":{"shape":"ReturnConsumedCapacity"}, "ReturnItemCollectionMetrics":{ @@ -2121,7 +2121,7 @@ "members":{ "Attributes":{ "shape":"AttributeMap", - "documentation":"

A map of attribute values as they appeared before the UpdateItem operation. This map only appears if ReturnValues was specified as something other than NONE in the request. Each element represents one attribute.

" + "documentation":"

A map of attribute values as they appear before or after the UpdateItem operation, as determined by the ReturnValues parameter.

The Attributes map is only present if ReturnValues was specified as something other than NONE in the request. Each element represents one attribute.

" }, "ConsumedCapacity":{ "shape":"ConsumedCapacity", diff --git a/botocore/data/ec2/2016-11-15/service-2.json b/botocore/data/ec2/2016-11-15/service-2.json index 86beeb23..98aa7765 100644 --- a/botocore/data/ec2/2016-11-15/service-2.json +++ b/botocore/data/ec2/2016-11-15/service-2.json @@ -39,7 +39,7 @@ }, "input":{"shape":"AllocateAddressRequest"}, "output":{"shape":"AllocateAddressResult"}, - "documentation":"

Acquires an Elastic IP address.

An Elastic IP address is for use either in the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Allocates an Elastic IP address.

An Elastic IP address is for use either in the EC2-Classic platform or in a VPC. By default, you can allocate 5 Elastic IP addresses for EC2-Classic per region and 5 Elastic IP addresses for EC2-VPC per region.

If you release an Elastic IP address for use in a VPC, you might be able to recover it. To recover an Elastic IP address that you released, specify it in the Address parameter. Note that you cannot recover an Elastic IP address that you released after it is allocated to another AWS account.

For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

" }, "AllocateHosts":{ "name":"AllocateHosts", @@ -314,6 +314,16 @@ "output":{"shape":"CreateCustomerGatewayResult"}, "documentation":"

Provides information to AWS about your VPN customer gateway device. The customer gateway is the appliance at your end of the VPN connection. (The device on the AWS side of the VPN connection is the virtual private gateway.) You must provide the Internet-routable IP address of the customer gateway's external interface. The IP address must be static and may be behind a device performing network address translation (NAT).

For devices that use Border Gateway Protocol (BGP), you can also provide the device's BGP Autonomous System Number (ASN). You can use an existing ASN assigned to your network. If you don't have an ASN already, you can use a private ASN (in the 64512 - 65534 range).

Amazon EC2 supports all 2-byte ASN numbers in the range of 1 - 65534, with the exception of 7224, which is reserved in the us-east-1 region, and 9059, which is reserved in the eu-west-1 region.

For more information about VPN customer gateways, see Adding a Hardware Virtual Private Gateway to Your VPC in the Amazon Virtual Private Cloud User Guide.

You cannot create more than one customer gateway with the same VPN type, IP address, and BGP ASN parameter values. If you run an identical request more than one time, the first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent requests do not create new customer gateway resources.

" }, + "CreateDefaultVpc":{ + "name":"CreateDefaultVpc", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDefaultVpcRequest"}, + "output":{"shape":"CreateDefaultVpcResult"}, + "documentation":"

Creates a default VPC with a size /16 IPv4 CIDR block and a default subnet in each Availability Zone. For more information about the components of a default VPC, see Default VPC and Default Subnets in the Amazon Virtual Private Cloud User Guide. You cannot specify the components of the default VPC yourself.

You can create a default VPC if you deleted your previous default VPC. You cannot have more than one default VPC per region.

If your account supports EC2-Classic, you cannot use this action to create a default VPC in a region that supports EC2-Classic. If you want a default VPC in a region that supports EC2-Classic, see \"I really want a default VPC for my existing EC2 account. Is that possible?\" in the Default VPCs FAQ.

" + }, "CreateDhcpOptions":{ "name":"CreateDhcpOptions", "http":{ @@ -847,7 +857,7 @@ "requestUri":"/" }, "input":{"shape":"DeregisterImageRequest"}, - "documentation":"

Deregisters the specified AMI. After you deregister an AMI, it can't be used to launch new instances.

This command does not delete the AMI.

" + "documentation":"

Deregisters the specified AMI. After you deregister an AMI, it can't be used to launch new instances; however, it doesn't affect any instances that you've already launched from the AMI. You'll continue to incur usage costs for those instances until you terminate them.

When you deregister an Amazon EBS-backed AMI, it doesn't affect the snapshot that was created for the root volume of the instance during the AMI creation process. When you deregister an instance store-backed AMI, it doesn't affect the files that you uploaded to Amazon S3 when you created the AMI.

" }, "DescribeAccountAttributes":{ "name":"DescribeAccountAttributes", @@ -939,6 +949,16 @@ "output":{"shape":"DescribeEgressOnlyInternetGatewaysResult"}, "documentation":"

Describes one or more of your egress-only Internet gateways.

" }, + "DescribeElasticGpus":{ + "name":"DescribeElasticGpus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeElasticGpusRequest"}, + "output":{"shape":"DescribeElasticGpusResult"}, + "documentation":"

Describes the Elastic GPUs associated with your instances. For more information about Elastic GPUs, see Amazon EC2 Elastic GPUs.

" + }, "DescribeExportTasks":{ "name":"DescribeExportTasks", "http":{ @@ -2038,7 +2058,7 @@ "requestUri":"/" }, "input":{"shape":"ReleaseAddressRequest"}, - "documentation":"

Releases the specified Elastic IP address.

After releasing an Elastic IP address, it is released to the IP address pool and might be unavailable to you. Be sure to update your DNS records and any servers or devices that communicate with the address. If you attempt to release an Elastic IP address that you already released, you'll get an AuthFailure error if the address is already allocated to another AWS account.

[EC2-Classic, default VPC] Releasing an Elastic IP address automatically disassociates it from any instance that it's associated with. To disassociate an Elastic IP address without releasing it, use DisassociateAddress.

[Nondefault VPC] You must use DisassociateAddress to disassociate the Elastic IP address before you try to release it. Otherwise, Amazon EC2 returns an error (InvalidIPAddress.InUse).

" + "documentation":"

Releases the specified Elastic IP address.

[EC2-Classic, default VPC] Releasing an Elastic IP address automatically disassociates it from any instance that it's associated with. To disassociate an Elastic IP address without releasing it, use DisassociateAddress.

[Nondefault VPC] You must use DisassociateAddress to disassociate the Elastic IP address before you can release it. Otherwise, Amazon EC2 returns an error (InvalidIPAddress.InUse).

After releasing an Elastic IP address, it is released to the IP address pool. Be sure to update your DNS records and any servers or devices that communicate with the address. If you attempt to release an Elastic IP address that you already released, you'll get an AuthFailure error if the address is already allocated to another AWS account.

[EC2-VPC] After you release an Elastic IP address for use in a VPC, you might be able to recover it. For more information, see AllocateAddress.

" }, "ReleaseHosts":{ "name":"ReleaseHosts", @@ -2189,7 +2209,7 @@ "requestUri":"/" }, "input":{"shape":"RevokeSecurityGroupIngressRequest"}, - "documentation":"

Removes one or more ingress rules from a security group. The values that you specify in the revoke request (for example, ports) must match the existing rule's values for the rule to be removed.

Each rule consists of the protocol and the CIDR range or source security group. For the TCP and UDP protocols, you must also specify the destination port or range of ports. For the ICMP protocol, you must also specify the ICMP type and code.

Rule changes are propagated to instances within the security group as quickly as possible. However, a small delay might occur.

" + "documentation":"

Removes one or more ingress rules from a security group. The values that you specify in the revoke request (for example, ports) must match the existing rule's values for the rule to be removed.

[EC2-Classic security groups only] If the values you specify do not match the existing rule's values, no error is returned. Use DescribeSecurityGroups to verify that the rule has been removed.

Each rule consists of the protocol and the CIDR range or source security group. For the TCP and UDP protocols, you must also specify the destination port or range of ports. For the ICMP protocol, you must also specify the ICMP type and code.

Rule changes are propagated to instances within the security group as quickly as possible. However, a small delay might occur.

" }, "RunInstances":{ "name":"RunInstances", @@ -2495,6 +2515,10 @@ "shape":"DomainType", "documentation":"

Set to vpc to allocate the address for use with instances in a VPC.

Default: The address is for use with instances in EC2-Classic.

" }, + "Address":{ + "shape":"String", + "documentation":"

[EC2-VPC] The Elastic IP address to recover.

" + }, "DryRun":{ "shape":"Boolean", "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", @@ -4106,6 +4130,27 @@ }, "documentation":"

Contains the output of CreateCustomerGateway.

" }, + "CreateDefaultVpcRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + }, + "documentation":"

Contains the parameters for CreateDefaultVpc.

" + }, + "CreateDefaultVpcResult":{ + "type":"structure", + "members":{ + "Vpc":{ + "shape":"Vpc", + "documentation":"

Information about the VPC.

", + "locationName":"vpc" + } + }, + "documentation":"

Contains the output of CreateDefaultVpc.

" + }, "CreateDhcpOptionsRequest":{ "type":"structure", "required":["DhcpConfigurations"], @@ -4554,7 +4599,8 @@ "shape":"Boolean", "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" } - } + }, + "documentation":"

Contains the parameters for CreateNetworkInterfacePermission.

" }, "CreateNetworkInterfacePermissionResult":{ "type":"structure", @@ -4564,7 +4610,8 @@ "documentation":"

Information about the permission for the network interface.

", "locationName":"interfacePermission" } - } + }, + "documentation":"

Contains the output of CreateNetworkInterfacePermission.

" }, "CreateNetworkInterfaceRequest":{ "type":"structure", @@ -5513,7 +5560,8 @@ "shape":"Boolean", "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" } - } + }, + "documentation":"

Contains the parameters for DeleteNetworkInterfacePermission.

" }, "DeleteNetworkInterfacePermissionResult":{ "type":"structure", @@ -5523,7 +5571,8 @@ "documentation":"

Returns true if the request succeeds, otherwise returns an error.

", "locationName":"return" } - } + }, + "documentation":"

Contains the output for DeleteNetworkInterfacePermission.

" }, "DeleteNetworkInterfaceRequest":{ "type":"structure", @@ -6150,6 +6199,53 @@ } } }, + "DescribeElasticGpusRequest":{ + "type":"structure", + "members":{ + "ElasticGpuIds":{ + "shape":"ElasticGpuIdSet", + "documentation":"

One or more Elastic GPU IDs.

", + "locationName":"ElasticGpuId" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

One or more filters.

", + "locationName":"Filter" + }, + "MaxResults":{ + "shape":"Integer", + "documentation":"

The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value. This value can be between 5 and 1000.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The token to request the next page of results.

" + } + } + }, + "DescribeElasticGpusResult":{ + "type":"structure", + "members":{ + "ElasticGpuSet":{ + "shape":"ElasticGpuSet", + "documentation":"

Information about the Elastic GPUs.

", + "locationName":"elasticGpuSet" + }, + "MaxResults":{ + "shape":"Integer", + "documentation":"

The total number of items to return. If the total number of items available is more than the value specified in max-items then a Next-Token will be provided in the output that you can use to resume pagination.

", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", + "locationName":"nextToken" + } + } + }, "DescribeExportTasksRequest":{ "type":"structure", "members":{ @@ -6996,7 +7092,8 @@ "shape":"Integer", "documentation":"

The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value. If this parameter is not specified, up to 50 results are returned by default.

" } - } + }, + "documentation":"

Contains the parameters for DescribeNetworkInterfacePermissions.

" }, "DescribeNetworkInterfacePermissionsResult":{ "type":"structure", @@ -7011,7 +7108,8 @@ "documentation":"

The token to use to retrieve the next page of results.

", "locationName":"nextToken" } - } + }, + "documentation":"

Contains the output for DescribeNetworkInterfacePermissions.

" }, "DescribeNetworkInterfacesRequest":{ "type":"structure", @@ -9150,6 +9248,126 @@ "locationName":"item" } }, + "ElasticGpuAssociation":{ + "type":"structure", + "members":{ + "ElasticGpuId":{ + "shape":"String", + "documentation":"

The ID of the Elastic GPU.

", + "locationName":"elasticGpuId" + }, + "ElasticGpuAssociationId":{ + "shape":"String", + "documentation":"

The ID of the association.

", + "locationName":"elasticGpuAssociationId" + }, + "ElasticGpuAssociationState":{ + "shape":"String", + "documentation":"

The state of the association between the instance and the Elastic GPU.

", + "locationName":"elasticGpuAssociationState" + }, + "ElasticGpuAssociationTime":{ + "shape":"String", + "documentation":"

The time the Elastic GPU was associated with the instance.

", + "locationName":"elasticGpuAssociationTime" + } + }, + "documentation":"

Describes the association between an instance and an Elastic GPU.

" + }, + "ElasticGpuAssociationList":{ + "type":"list", + "member":{ + "shape":"ElasticGpuAssociation", + "locationName":"item" + } + }, + "ElasticGpuHealth":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"ElasticGpuStatus", + "documentation":"

The health status.

", + "locationName":"status" + } + }, + "documentation":"

Describes the status of an Elastic GPU.

" + }, + "ElasticGpuIdSet":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, + "ElasticGpuSet":{ + "type":"list", + "member":{"shape":"ElasticGpus"} + }, + "ElasticGpuSpecification":{ + "type":"structure", + "required":["Type"], + "members":{ + "Type":{ + "shape":"String", + "documentation":"

The type of Elastic GPU.

" + } + }, + "documentation":"

A specification for an Elastic GPU.

" + }, + "ElasticGpuSpecifications":{ + "type":"list", + "member":{ + "shape":"ElasticGpuSpecification", + "locationName":"item" + } + }, + "ElasticGpuState":{ + "type":"string", + "enum":["ATTACHED"] + }, + "ElasticGpuStatus":{ + "type":"string", + "enum":[ + "OK", + "IMPAIRED" + ] + }, + "ElasticGpus":{ + "type":"structure", + "members":{ + "ElasticGpuId":{ + "shape":"String", + "documentation":"

The ID of the Elastic GPU.

", + "locationName":"elasticGpuId" + }, + "AvailabilityZone":{ + "shape":"String", + "documentation":"

The Availability Zone in the which the Elastic GPU resides.

", + "locationName":"availabilityZone" + }, + "ElasticGpuType":{ + "shape":"String", + "documentation":"

The type of Elastic GPU.

", + "locationName":"elasticGpuType" + }, + "ElasticGpuHealth":{ + "shape":"ElasticGpuHealth", + "documentation":"

The status of the Elastic GPU.

", + "locationName":"elasticGpuHealth" + }, + "ElasticGpuState":{ + "shape":"ElasticGpuState", + "documentation":"

The state of the Elastic GPU.

", + "locationName":"elasticGpuState" + }, + "InstanceId":{ + "shape":"String", + "documentation":"

The ID of the instance to which the Elastic GPU is attached.

", + "locationName":"instanceId" + } + }, + "documentation":"

Describes an Elastic GPU.

" + }, "EnableVgwRoutePropagationRequest":{ "type":"structure", "required":[ @@ -10148,7 +10366,10 @@ }, "HostReservationSet":{ "type":"list", - "member":{"shape":"HostReservation"} + "member":{ + "shape":"HostReservation", + "locationName":"item" + } }, "HostTenancy":{ "type":"string", @@ -11258,6 +11479,11 @@ "documentation":"

Indicates whether this is a Spot instance or a Scheduled Instance.

", "locationName":"instanceLifecycle" }, + "ElasticGpuAssociations":{ + "shape":"ElasticGpuAssociationList", + "documentation":"

The Elastic GPU associated with the instance.

", + "locationName":"elasticGpuAssociationSet" + }, "NetworkInterfaces":{ "shape":"InstanceNetworkInterfaceList", "documentation":"

[EC2-VPC] One or more network interfaces for the instance.

", @@ -16334,6 +16560,10 @@ "documentation":"

[EC2-VPC] The primary IPv4 address. You must specify a value from the IPv4 address range of the subnet.

Only one private IP address can be designated as primary. You can't specify this option if you've specified the option to designate a private IP address as the primary IP address in a network interface specification. You cannot specify this option if you're launching more than one instance in the request.

", "locationName":"privateIpAddress" }, + "ElasticGpuSpecification":{ + "shape":"ElasticGpuSpecifications", + "documentation":"

An Elastic GPU to associate with the instance.

" + }, "TagSpecifications":{ "shape":"TagSpecificationList", "documentation":"

The tags to apply to the resources during launch. You can tag instances and volumes. The specified tags are applied to all instances or volumes that are created during launch.

", @@ -17407,6 +17637,11 @@ "shape":"Double", "documentation":"

The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms (instances or a performance characteristic such as vCPUs, memory, or I/O).

If the target capacity divided by this value is not a whole number, we round the number of instances to the next whole number. If this value is not specified, the default is 1.

", "locationName":"weightedCapacity" + }, + "TagSpecifications":{ + "shape":"SpotFleetTagSpecificationList", + "documentation":"

The tags to apply during creation.

", + "locationName":"tagSpecificationSet" } }, "documentation":"

Describes the launch specification for one or more Spot instances.

" @@ -17543,6 +17778,29 @@ "locationName":"item" } }, + "SpotFleetTagSpecification":{ + "type":"structure", + "members":{ + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

The type of resource. Currently, the only resource type that is supported is instance.

", + "locationName":"resourceType" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags.

", + "locationName":"tag" + } + }, + "documentation":"

The tags for a Spot fleet resource.

" + }, + "SpotFleetTagSpecificationList":{ + "type":"list", + "member":{ + "shape":"SpotFleetTagSpecification", + "locationName":"item" + } + }, "SpotInstanceRequest":{ "type":"structure", "members":{ diff --git a/botocore/data/efs/2015-02-01/service-2.json b/botocore/data/efs/2015-02-01/service-2.json index 6b13e1ba..23ea7204 100644 --- a/botocore/data/efs/2015-02-01/service-2.json +++ b/botocore/data/efs/2015-02-01/service-2.json @@ -223,6 +223,14 @@ "PerformanceMode":{ "shape":"PerformanceMode", "documentation":"

The PerformanceMode of the file system. We recommend generalPurpose performance mode for most file systems. File systems using the maxIO performance mode can scale to higher levels of aggregate throughput and operations per second with a tradeoff of slightly higher latencies for most file operations. This can't be changed after the file system has been created.

" + }, + "Encrypted":{ + "shape":"Encrypted", + "documentation":"

A boolean value that, if true, creates an encrypted file system. When creating an encrypted file system, you have the option of specifying a CreateFileSystemRequest$KmsKeyId for an existing AWS Key Management Service (AWS KMS) customer master key (CMK). If you don't specify a CMK, then the default CMK for Amazon EFS, /aws/elasticfilesystem, is used to protect the encrypted file system.

" + }, + "KmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

The id of the AWS KMS CMK that will be used to protect the encrypted file system. This parameter is only required if you want to use a non-default CMK. If this parameter is not specified, the default CMK for Amazon EFS is used. This id can be in one of the following formats:

Note that if the KmsKeyId is specified, the CreateFileSystemRequest$Encrypted parameter must be set to true.

" } } }, @@ -496,6 +504,7 @@ }, "documentation":"

" }, + "Encrypted":{"type":"boolean"}, "ErrorCode":{ "type":"string", "min":1 @@ -564,6 +573,14 @@ "PerformanceMode":{ "shape":"PerformanceMode", "documentation":"

The PerformanceMode of the file system.

" + }, + "Encrypted":{ + "shape":"Encrypted", + "documentation":"

A boolean value that, if true, indicates that the file system is encrypted.

" + }, + "KmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

The id of an AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to protect the encrypted file system.

" } }, "documentation":"

Description of the file system.

" @@ -670,6 +687,11 @@ "error":{"httpStatusCode":409}, "exception":true }, + "KmsKeyId":{ + "type":"string", + "max":2048, + "min":1 + }, "LifeCycleState":{ "type":"string", "enum":[ @@ -885,7 +907,7 @@ "ErrorCode":{"shape":"ErrorCode"}, "Message":{"shape":"ErrorMessage"} }, - "documentation":"

", + "documentation":"

", "error":{"httpStatusCode":400}, "exception":true } diff --git a/botocore/data/elasticbeanstalk/2010-12-01/service-2.json b/botocore/data/elasticbeanstalk/2010-12-01/service-2.json old mode 100755 new mode 100644 index f32feb44..c5d5f325 --- a/botocore/data/elasticbeanstalk/2010-12-01/service-2.json +++ b/botocore/data/elasticbeanstalk/2010-12-01/service-2.json @@ -840,7 +840,7 @@ }, "NextToken":{ "shape":"Token", - "documentation":"

For a paginated request, the token that you can pass in a subsequent request to get the next page.

" + "documentation":"

In a paginated request, the token that you can pass in a subsequent request to get the next response page.

" } }, "documentation":"

Result message wrapping a list of application version descriptions.

" @@ -1196,7 +1196,7 @@ }, "PlatformArn":{ "shape":"PlatformArn", - "documentation":"

The ARN of the custom platform.

" + "documentation":"

The ARN of the platform.

" }, "Options":{ "shape":"ConfigurationOptionDescriptionsList", @@ -1214,7 +1214,7 @@ }, "PlatformArn":{ "shape":"PlatformArn", - "documentation":"

The ARN of the custom platform.

" + "documentation":"

The ARN of the platform.

" }, "ApplicationName":{ "shape":"ApplicationName", @@ -1397,7 +1397,7 @@ }, "EnvironmentName":{ "shape":"EnvironmentName", - "documentation":"

A unique name for the deployment environment. Used in the application URL.

Constraint: Must be from 4 to 40 characters in length. The name can contain only letters, numbers, and hyphens. It cannot start or end with a hyphen. This name must be unique in your account. If the specified name already exists, AWS Elastic Beanstalk returns an InvalidParameterValue error.

Default: If the CNAME parameter is not specified, the environment name becomes part of the CNAME, and therefore part of the visible URL for your application.

" + "documentation":"

A unique name for the deployment environment. Used in the application URL.

Constraint: Must be from 4 to 40 characters in length. The name can contain only letters, numbers, and hyphens. It cannot start or end with a hyphen. This name must be unique within a region in your account. If the specified name already exists in the region, AWS Elastic Beanstalk returns an InvalidParameterValue error.

Default: If the CNAME parameter is not specified, the environment name becomes part of the CNAME, and therefore part of the visible URL for your application.

" }, "GroupName":{ "shape":"GroupName", @@ -1433,7 +1433,7 @@ }, "PlatformArn":{ "shape":"PlatformArn", - "documentation":"

The ARN of the custom platform.

" + "documentation":"

The ARN of the platform.

" }, "OptionSettings":{ "shape":"ConfigurationOptionSettingsList", @@ -1638,7 +1638,7 @@ }, "DeploymentTime":{ "shape":"DeploymentTimestamp", - "documentation":"

For in-progress deployments, the time that the deloyment started.

For completed deployments, the time that the deployment ended.

" + "documentation":"

For in-progress deployments, the time that the deployment started.

For completed deployments, the time that the deployment ended.

" } }, "documentation":"

Information about an application version deployment.

" @@ -1657,11 +1657,11 @@ }, "MaxRecords":{ "shape":"MaxRecords", - "documentation":"

Specify a maximum number of application versions to paginate in the request.

" + "documentation":"

For a paginated request. Specify a maximum number of application versions to include in each response.

If no MaxRecords is specified, all available application versions are retrieved in a single response.

" }, "NextToken":{ "shape":"Token", - "documentation":"

Specify a next token to retrieve the next page in a paginated request.

" + "documentation":"

For a paginated request. Specify a token from a previous response page to retrieve the next response page. All other parameter values must be identical to the ones specified in the initial request.

If no NextToken is specified, the first page is retrieved.

" } }, "documentation":"

Request to describe application versions.

" @@ -1885,6 +1885,14 @@ "IncludedDeletedBackTo":{ "shape":"IncludeDeletedBackTo", "documentation":"

If specified when IncludeDeleted is set to true, then environments deleted after this date are displayed.

" + }, + "MaxRecords":{ + "shape":"MaxRecords", + "documentation":"

For a paginated request. Specify a maximum number of environments to include in each response.

If no MaxRecords is specified, all available environments are retrieved in a single response.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

For a paginated request. Specify a token from a previous response page to retrieve the next response page. All other parameter values must be identical to the ones specified in the initial request.

If no NextToken is specified, the first page is retrieved.

" } }, "documentation":"

Request to describe one or more environments.

" @@ -2018,6 +2026,7 @@ "exception":true }, "EndpointURL":{"type":"string"}, + "EnvironmentArn":{"type":"string"}, "EnvironmentDescription":{ "type":"structure", "members":{ @@ -2043,7 +2052,7 @@ }, "PlatformArn":{ "shape":"PlatformArn", - "documentation":"

The ARN of the custom platform.

" + "documentation":"

The ARN of the platform.

" }, "TemplateName":{ "shape":"ConfigurationTemplateName", @@ -2096,6 +2105,10 @@ "EnvironmentLinks":{ "shape":"EnvironmentLinks", "documentation":"

A list of links to other environments in the same group.

" + }, + "EnvironmentArn":{ + "shape":"EnvironmentArn", + "documentation":"

The environment's Amazon Resource Name (ARN), which can be used in other API reuqests that require an ARN.

" } }, "documentation":"

Describes the properties of an environment.

" @@ -2110,6 +2123,10 @@ "Environments":{ "shape":"EnvironmentDescriptionsList", "documentation":"

Returns an EnvironmentDescription list.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

In a paginated request, the token that you can pass in a subsequent request to get the next response page.

" } }, "documentation":"

Result message containing a list of environment descriptions.

" @@ -2330,7 +2347,7 @@ }, "PlatformArn":{ "shape":"PlatformArn", - "documentation":"

The ARN of the custom platform.

" + "documentation":"

The ARN of the platform.

" }, "RequestId":{ "shape":"RequestId", diff --git a/botocore/data/emr/2009-03-31/service-2.json b/botocore/data/emr/2009-03-31/service-2.json index 9fce8e75..ac8eef9c 100644 --- a/botocore/data/emr/2009-03-31/service-2.json +++ b/botocore/data/emr/2009-03-31/service-2.json @@ -233,7 +233,7 @@ {"shape":"InternalServerException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Provides information about the cluster instances that Amazon EMR provisions on behalf of a user when it creates the cluster. For example, this operation indicates when the EC2 instances reach the Ready state, when instances become available to Amazon EMR to use for jobs, and the IP addresses for cluster instances, etc.

" + "documentation":"

Provides information for all active EC2 instances and EC2 instances terminated in the last 30 days, up to a maximum of 2,000. EC2 instances in any of the following states are considered active: AWAITING_FULFILLMENT, PROVISIONING, BOOTSTRAPPING, RUNNING.

" }, "ListSecurityConfigurations":{ "name":"ListSecurityConfigurations", @@ -524,7 +524,7 @@ "documentation":"

This option is for advanced users only. This is meta information about third-party applications that third-party vendors use for testing purposes.

" } }, - "documentation":"

An application is any Amazon or third-party software that you can add to the cluster. This structure contains a list of strings that indicates the software to use with the cluster and accepts a user argument list. Amazon EMR accepts and forwards the argument list to the corresponding installation script as bootstrap action argument. For more information, see Using the MapR Distribution for Hadoop. Currently supported values are:

In Amazon EMR releases 4.0 and greater, the only accepted parameter is the application name. To pass arguments to applications, you supply a configuration for each application.

" + "documentation":"

An application is any Amazon or third-party software that you can add to the cluster. This structure contains a list of strings that indicates the software to use with the cluster and accepts a user argument list. Amazon EMR accepts and forwards the argument list to the corresponding installation script as bootstrap action argument. For more information, see Using the MapR Distribution for Hadoop. Currently supported values are:

In Amazon EMR releases 4.x and later, the only accepted parameter is the application name. To pass arguments to applications, you supply a configuration for each application.

" }, "ApplicationList":{ "type":"list", @@ -789,7 +789,7 @@ }, "ReleaseLabel":{ "shape":"String", - "documentation":"

The release label for the Amazon EMR release. For Amazon EMR 3.x and 2.x AMIs, use amiVersion instead instead of ReleaseLabel.

" + "documentation":"

The release label for the Amazon EMR release.

" }, "AutoTerminate":{ "shape":"Boolean", @@ -825,7 +825,7 @@ }, "Configurations":{ "shape":"ConfigurationList", - "documentation":"

Amazon EMR releases 4.x or later.

The list of Configurations supplied to the EMR cluster.

" + "documentation":"

Applies only to Amazon EMR releases 4.x and later. The list of Configurations supplied to the EMR cluster.

" }, "SecurityConfiguration":{ "shape":"XmlString", @@ -838,6 +838,18 @@ "ScaleDownBehavior":{ "shape":"ScaleDownBehavior", "documentation":"

The way that individual Amazon EC2 instances terminate when an automatic scale-in activity occurs or an instance group is resized. TERMINATE_AT_INSTANCE_HOUR indicates that Amazon EMR terminates nodes at the instance-hour boundary, regardless of when the request to terminate the instance was submitted. This option is only available with Amazon EMR 5.1.0 and later and is the default for clusters created using that version. TERMINATE_AT_TASK_COMPLETION indicates that Amazon EMR blacklists and drains tasks from nodes before terminating the Amazon EC2 instances, regardless of the instance-hour boundary. With either behavior, Amazon EMR removes the least active nodes first and blocks instance termination if it could lead to HDFS corruption. TERMINATE_AT_TASK_COMPLETION is available only in Amazon EMR version 4.1.0 and later, and is the default for versions of Amazon EMR earlier than 5.1.0.

" + }, + "CustomAmiId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

Available only in Amazon EMR version 5.7.0 and later. The ID of a custom Amazon EBS-backed Linux AMI if the cluster uses a custom AMI.

" + }, + "EbsRootVolumeSize":{ + "shape":"Integer", + "documentation":"

The size, in GiB, of the EBS root device volume of the Linux AMI that is used for each EC2 instance. Available in Amazon EMR version 4.x and later.

" + }, + "RepoUpgradeOnBoot":{ + "shape":"RepoUpgradeOnBoot", + "documentation":"

Applies only when CustomAmiID is used. Specifies the type of updates that are applied from the Amazon Linux AMI package repositories when an instance boots using the AMI.

" } }, "documentation":"

The detailed description of the cluster.

" @@ -875,6 +887,7 @@ "INTERNAL_ERROR", "VALIDATION_ERROR", "INSTANCE_FAILURE", + "INSTANCE_FLEET_TIMEOUT", "BOOTSTRAP_FAILURE", "USER_REQUEST", "STEP_FAILURE", @@ -1248,7 +1261,7 @@ }, "RequestedEc2SubnetIds":{ "shape":"XmlStringMaxLen256List", - "documentation":"

Applies to clusters configured with the instance fleets option. Specifies the unique identifier of one or more Amazon EC2 subnets in which to launch EC2 cluster instances. Amazon EMR chooses the EC2 subnet with the best performance and cost characteristics from among the list of RequestedEc2SubnetIds and launches all cluster instances within that subnet. If this value is not specified, and the account supports EC2-Classic networks, the cluster launches instances in the EC2-Classic network and uses Requested

" + "documentation":"

Applies to clusters configured with the instance fleets option. Specifies the unique identifier of one or more Amazon EC2 subnets in which to launch EC2 cluster instances. Subnets must exist within the same VPC. Amazon EMR chooses the EC2 subnet with the best fit from among the list of RequestedEc2SubnetIds, and then launches all cluster instances within that Subnet. If this value is not specified, and the account and region support EC2-Classic networks, the cluster launches instances in the EC2-Classic network and uses RequestedEc2AvailabilityZones instead of this setting. If EC2-Classic is not supported, and no Subnet is specified, Amazon EMR chooses the subnet for you. RequestedEc2SubnetIDs and RequestedEc2AvailabilityZones cannot be specified together.

" }, "Ec2AvailabilityZone":{ "shape":"String", @@ -1256,7 +1269,7 @@ }, "RequestedEc2AvailabilityZones":{ "shape":"XmlStringMaxLen256List", - "documentation":"

Applies to clusters configured with the The list of availability zones to choose from. The service will choose the availability zone with the best mix of available capacity and lowest cost to launch the cluster. If you do not specify this value, the cluster is launched in any availability zone that the customer account has access to.

" + "documentation":"

Applies to clusters configured with the instance fleets option. Specifies one or more Availability Zones in which to launch EC2 cluster instances when the EC2-Classic network configuration is supported. Amazon EMR chooses the Availability Zone with the best fit from among the list of RequestedEc2AvailabilityZones, and then launches all cluster instances within that Availability Zone. If you do not specify this value, Amazon EMR chooses the Availability Zone for you. RequestedEc2SubnetIDs and RequestedEc2AvailabilityZones cannot be specified together.

" }, "IamInstanceProfile":{ "shape":"String", @@ -2041,7 +2054,7 @@ }, "WeightedCapacity":{ "shape":"WholeNumber", - "documentation":"

The number of units that a provisioned instance of this type provides toward fulfilling the target capacities defined in InstanceFleetConfig. This value is 1 for a master instance fleet, and must be greater than 0 for core and task instance fleets.

" + "documentation":"

The number of units that a provisioned instance of this type provides toward fulfilling the target capacities defined in InstanceFleetConfig. This value is 1 for a master instance fleet, and must be 1 or greater for core and task instance fleets. Defaults to 1 if not specified.

" }, "BidPrice":{ "shape":"XmlStringMaxLen256", @@ -2049,7 +2062,7 @@ }, "BidPriceAsPercentageOfOnDemandPrice":{ "shape":"NonNegativeDouble", - "documentation":"

The bid price, as a percentage of On-Demand price, for each EC2 Spot instance as defined by InstanceType. Expressed as a number between 0 and 1000 (for example, 20 specifies 20%). If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%.

" + "documentation":"

The bid price, as a percentage of On-Demand price, for each EC2 Spot instance as defined by InstanceType. Expressed as a number (for example, 20 specifies 20%). If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%.

" }, "EbsConfiguration":{ "shape":"EbsConfiguration", @@ -2162,7 +2175,7 @@ }, "AmiVersion":{ "shape":"XmlStringMaxLen256", - "documentation":"

The version of the AMI used to initialize Amazon EC2 instances in the job flow. For a list of AMI versions currently supported by Amazon EMR, see AMI Versions Supported in EMR in the Amazon EMR Developer Guide.

" + "documentation":"

Used only for version 2.x and 3.x of Amazon EMR. The version of the AMI used to initialize Amazon EC2 instances in the job flow. For a list of AMI versions supported by Amazon EMR, see AMI Versions Supported in EMR in the Amazon EMR Developer Guide.

" }, "ExecutionStatusDetail":{ "shape":"JobFlowExecutionStatusDetail", @@ -2811,6 +2824,13 @@ }, "documentation":"

This output indicates the result of removing tags from a resource.

" }, + "RepoUpgradeOnBoot":{ + "type":"string", + "enum":[ + "SECURITY", + "NONE" + ] + }, "ResourceId":{"type":"string"}, "RunJobFlowInput":{ "type":"structure", @@ -2833,11 +2853,11 @@ }, "AmiVersion":{ "shape":"XmlStringMaxLen256", - "documentation":"

For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and greater, use ReleaseLabel.

The version of the Amazon Machine Image (AMI) to use when launching Amazon EC2 instances in the job flow. The following values are valid:

If the AMI supports multiple versions of Hadoop (for example, AMI 1.0 supports both Hadoop 0.18 and 0.20) you can use the JobFlowInstancesConfig HadoopVersion parameter to modify the version of Hadoop from the defaults shown above.

For details about the AMI versions currently supported by Amazon Elastic MapReduce, see AMI Versions Supported in Elastic MapReduce in the Amazon Elastic MapReduce Developer Guide.

Previously, the EMR AMI version API parameter options allowed you to use latest for the latest AMI version rather than specify a numerical value. Some regions no longer support this deprecated option as they only have a newer release label version of EMR, which requires you to specify an EMR release label release (EMR 4.x or later).

" + "documentation":"

For Amazon EMR AMI versions 3.x and 2.x. For Amazon EMR releases 4.0 and later, the Linux AMI is determined by the ReleaseLabel specified or by CustomAmiID. The version of the Amazon Machine Image (AMI) to use when launching Amazon EC2 instances in the job flow. For details about the AMI versions currently supported in EMR version 3.x and 2.x, see AMI Versions Supported in EMR in the Amazon EMR Developer Guide.

If the AMI supports multiple versions of Hadoop (for example, AMI 1.0 supports both Hadoop 0.18 and 0.20), you can use the JobFlowInstancesConfig HadoopVersion parameter to modify the version of Hadoop from the defaults shown above.

Previously, the EMR AMI version API parameter options allowed you to use latest for the latest AMI version rather than specify a numerical value. Some regions no longer support this deprecated option as they only have a newer release label version of EMR, which requires you to specify an EMR release label release (EMR 4.x or later).

" }, "ReleaseLabel":{ "shape":"XmlStringMaxLen256", - "documentation":"

Amazon EMR releases 4.x or later.

The release label for the Amazon EMR release. For Amazon EMR 3.x and 2.x AMIs, use amiVersion instead instead of ReleaseLabel.

" + "documentation":"

The release label for the Amazon EMR release. For Amazon EMR 3.x and 2.x AMIs, use AmiVersion instead.

" }, "Instances":{ "shape":"JobFlowInstancesConfig", @@ -2853,19 +2873,19 @@ }, "SupportedProducts":{ "shape":"SupportedProductsList", - "documentation":"

For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and greater, use Applications.

A list of strings that indicates third-party software to use. For more information, see Use Third Party Applications with Amazon EMR. Currently supported values are:

" + "documentation":"

For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and later, use Applications.

A list of strings that indicates third-party software to use. For more information, see Use Third Party Applications with Amazon EMR. Currently supported values are:

" }, "NewSupportedProducts":{ "shape":"NewSupportedProductsList", - "documentation":"

For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and greater, use Applications.

A list of strings that indicates third-party software to use with the job flow that accepts a user argument list. EMR accepts and forwards the argument list to the corresponding installation script as bootstrap action arguments. For more information, see \"Launch a Job Flow on the MapR Distribution for Hadoop\" in the Amazon EMR Developer Guide. Supported values are:

" + "documentation":"

For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and later, use Applications.

A list of strings that indicates third-party software to use with the job flow that accepts a user argument list. EMR accepts and forwards the argument list to the corresponding installation script as bootstrap action arguments. For more information, see \"Launch a Job Flow on the MapR Distribution for Hadoop\" in the Amazon EMR Developer Guide. Supported values are:

" }, "Applications":{ "shape":"ApplicationList", - "documentation":"

Amazon EMR releases 4.x or later.

A list of applications for the cluster. Valid values are: \"Hadoop\", \"Hive\", \"Mahout\", \"Pig\", and \"Spark.\" They are case insensitive.

" + "documentation":"

For Amazon EMR releases 4.0 and later. A list of applications for the cluster. Valid values are: \"Hadoop\", \"Hive\", \"Mahout\", \"Pig\", and \"Spark.\" They are case insensitive.

" }, "Configurations":{ "shape":"ConfigurationList", - "documentation":"

Amazon EMR releases 4.x or later.

The list of configurations supplied for the EMR cluster you are creating.

" + "documentation":"

For Amazon EMR releases 4.0 and later. The list of configurations supplied for the EMR cluster you are creating.

" }, "VisibleToAllUsers":{ "shape":"Boolean", @@ -2894,6 +2914,18 @@ "ScaleDownBehavior":{ "shape":"ScaleDownBehavior", "documentation":"

Specifies the way that individual Amazon EC2 instances terminate when an automatic scale-in activity occurs or an instance group is resized. TERMINATE_AT_INSTANCE_HOUR indicates that Amazon EMR terminates nodes at the instance-hour boundary, regardless of when the request to terminate the instance was submitted. This option is only available with Amazon EMR 5.1.0 and later and is the default for clusters created using that version. TERMINATE_AT_TASK_COMPLETION indicates that Amazon EMR blacklists and drains tasks from nodes before terminating the Amazon EC2 instances, regardless of the instance-hour boundary. With either behavior, Amazon EMR removes the least active nodes first and blocks instance termination if it could lead to HDFS corruption. TERMINATE_AT_TASK_COMPLETION available only in Amazon EMR version 4.1.0 and later, and is the default for versions of Amazon EMR earlier than 5.1.0.

" + }, + "CustomAmiId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

Available only in Amazon EMR version 5.7.0 and later. The ID of a custom Amazon EBS-backed Linux AMI. If specified, Amazon EMR uses this AMI when it launches cluster EC2 instances. For more information about custom AMIs in Amazon EMR, see Using a Custom AMI in the Amazon EMR Management Guide. If omitted, the cluster uses the base Linux AMI for the ReleaseLabel specified. For Amazon EMR versions 2.x and 3.x, use AmiVersion instead.

For information about creating a custom AMI, see Creating an Amazon EBS-Backed Linux AMI in the Amazon Elastic Compute Cloud User Guide for Linux Instances. For information about finding an AMI ID, see Finding a Linux AMI.

" + }, + "EbsRootVolumeSize":{ + "shape":"Integer", + "documentation":"

The size, in GiB, of the EBS root device volume of the Linux AMI that is used for each EC2 instance. Available in Amazon EMR version 4.x and later.

" + }, + "RepoUpgradeOnBoot":{ + "shape":"RepoUpgradeOnBoot", + "documentation":"

Applies only when CustomAmiID is used. Specifies which updates from the Amazon Linux AMI package repositories to apply automatically when the instance boots using the AMI. If omitted, the default is SECURITY, which indicates that only security updates are applied. If NONE is specified, no updates are applied, and all updates must be applied manually.

" } }, "documentation":"

Input to the RunJobFlow operation.

" @@ -3109,7 +3141,7 @@ }, "TimeoutAction":{ "shape":"SpotProvisioningTimeoutAction", - "documentation":"

The action to take when TargetSpotCapacity has not been fulfilled when the TimeoutDurationMinutes has expired. Spot instances are not uprovisioned within the Spot provisioining timeout. Valid values are TERMINATE_CLUSTER and SWITCH_TO_ON_DEMAND to fulfill the remaining capacity.

" + "documentation":"

The action to take when TargetSpotCapacity has not been fulfilled when the TimeoutDurationMinutes has expired. Spot instances are not uprovisioned within the Spot provisioining timeout. Valid values are TERMINATE_CLUSTER and SWITCH_TO_ON_DEMAND. SWITCH_TO_ON_DEMAND specifies that if no Spot instances are available, On-Demand Instances should be provisioned to fulfill any remaining Spot capacity.

" }, "BlockDurationMinutes":{ "shape":"WholeNumber", diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index 204f5b1d..cdcabfd3 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -163,6 +163,7 @@ "batch" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-southeast-1" : { }, "ap-southeast-2" : { }, "eu-central-1" : { }, "eu-west-1" : { }, @@ -240,6 +241,14 @@ "us-west-2" : { } } }, + "cloudhsmv2" : { + "endpoints" : { + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, "cloudsearch" : { "endpoints" : { "ap-northeast-1" : { }, @@ -326,6 +335,8 @@ "codepipeline" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ca-central-1" : { }, @@ -341,9 +352,14 @@ }, "codestar" : { "endpoints" : { + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, "us-east-1" : { }, "us-east-2" : { }, + "us-west-1" : { }, "us-west-2" : { } } }, @@ -352,6 +368,7 @@ "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, + "ap-southeast-1" : { }, "ap-southeast-2" : { }, "eu-central-1" : { }, "eu-west-1" : { }, @@ -366,6 +383,7 @@ "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, + "ap-southeast-1" : { }, "ap-southeast-2" : { }, "eu-central-1" : { }, "eu-west-1" : { }, @@ -380,6 +398,7 @@ "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, + "ap-southeast-1" : { }, "ap-southeast-2" : { }, "eu-central-1" : { }, "eu-west-1" : { }, @@ -495,6 +514,7 @@ "eu-central-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-2" : { } @@ -618,6 +638,7 @@ "elasticfilesystem" : { "endpoints" : { "ap-southeast-2" : { }, + "eu-central-1" : { }, "eu-west-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -775,6 +796,11 @@ "us-west-2" : { } } }, + "glue" : { + "endpoints" : { + "us-east-1" : { } + } + }, "greengrass" : { "defaults" : { "protocols" : [ "https" ] @@ -976,6 +1002,11 @@ "us-west-2" : { } } }, + "mgh" : { + "endpoints" : { + "us-west-2" : { } + } + }, "mobileanalytics" : { "endpoints" : { "us-east-1" : { } @@ -1250,7 +1281,6 @@ "ap-northeast-2" : { }, "ap-south-1" : { }, "ap-southeast-2" : { }, - "ca-central-1" : { }, "eu-central-1" : { }, "eu-west-1" : { }, "us-east-1" : { }, @@ -1265,6 +1295,7 @@ "eu-central-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-1" : { }, @@ -1340,6 +1371,7 @@ "ap-southeast-2" : { }, "eu-central-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-2" : { } @@ -1507,6 +1539,7 @@ "ap-northeast-1" : { }, "eu-west-1" : { }, "us-east-1" : { }, + "us-west-1" : { }, "us-west-2" : { } } }, @@ -1566,6 +1599,18 @@ } }, "services" : { + "application-autoscaling" : { + "defaults" : { + "credentialScope" : { + "service" : "application-autoscaling" + }, + "hostname" : "autoscaling.{region}.amazonaws.com", + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "cn-north-1" : { } + } + }, "autoscaling" : { "defaults" : { "protocols" : [ "http", "https" ] @@ -1676,6 +1721,16 @@ "isRegionalized" : false, "partitionEndpoint" : "aws-cn-global" }, + "iot" : { + "defaults" : { + "credentialScope" : { + "service" : "execute-api" + } + }, + "endpoints" : { + "cn-north-1" : { } + } + }, "kinesis" : { "endpoints" : { "cn-north-1" : { } @@ -1783,6 +1838,16 @@ } }, "services" : { + "acm" : { + "endpoints" : { + "us-gov-west-1" : { } + } + }, + "apigateway" : { + "endpoints" : { + "us-gov-west-1" : { } + } + }, "autoscaling" : { "endpoints" : { "us-gov-west-1" : { @@ -1983,4 +2048,4 @@ } } ], "version" : 3 -} +} \ No newline at end of file diff --git a/botocore/data/firehose/2015-08-04/paginators-1.json b/botocore/data/firehose/2015-08-04/paginators-1.json new file mode 100644 index 00000000..ea142457 --- /dev/null +++ b/botocore/data/firehose/2015-08-04/paginators-1.json @@ -0,0 +1,3 @@ +{ + "pagination": {} +} diff --git a/botocore/data/firehose/2015-08-04/service-2.json b/botocore/data/firehose/2015-08-04/service-2.json index 8ec8c84a..0a539165 100644 --- a/botocore/data/firehose/2015-08-04/service-2.json +++ b/botocore/data/firehose/2015-08-04/service-2.json @@ -25,7 +25,7 @@ {"shape":"LimitExceededException"}, {"shape":"ResourceInUseException"} ], - "documentation":"

Creates a delivery stream.

By default, you can create up to 20 delivery streams per region.

This is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING. After the delivery stream is created, its status is ACTIVE and it now accepts data. Attempts to send data to a delivery stream that is not in the ACTIVE state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream.

A delivery stream is configured with a single destination: Amazon S3, Amazon Elasticsearch Service, or Amazon Redshift. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration, S3DestinationConfiguration, ElasticsearchDestinationConfiguration, or RedshiftDestinationConfiguration.

When you specify S3DestinationConfiguration, you can also provide the following optional values: BufferingHints, EncryptionConfiguration, and CompressionFormat. By default, if no BufferingHints value is provided, Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. Note that BufferingHints is a hint, so there are some cases where the service cannot adhere to these conditions strictly; for example, record boundaries are such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3.

A few notes about Amazon Redshift as a destination:

Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Firehose principal to assume the role, and the role should have permissions that allows the service to deliver the data. For more information, see Amazon S3 Bucket Access in the Amazon Kinesis Firehose Developer Guide.

" + "documentation":"

Creates a delivery stream.

By default, you can create up to 20 delivery streams per region.

This is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING. After the delivery stream is created, its status is ACTIVE and it now accepts data. Attempts to send data to a delivery stream that is not in the ACTIVE state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream.

A Kinesis Firehose delivery stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis stream as input, set the DeliveryStreamType parameter to KinesisStreamAsSource, and provide the Kinesis stream ARN and role ARN in the KinesisStreamSourceConfiguration parameter.

A delivery stream is configured with a single destination: Amazon S3, Amazon ES, or Amazon Redshift. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration, S3DestinationConfiguration, ElasticsearchDestinationConfiguration, or RedshiftDestinationConfiguration.

When you specify S3DestinationConfiguration, you can also provide the following optional values: BufferingHints, EncryptionConfiguration, and CompressionFormat. By default, if no BufferingHints value is provided, Kinesis Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. Note that BufferingHints is a hint, so there are some cases where the service cannot adhere to these conditions strictly; for example, record boundaries are such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3.

A few notes about Amazon Redshift as a destination:

Kinesis Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Kinesis Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Amazon S3 Bucket Access in the Amazon Kinesis Firehose Developer Guide.

" }, "DeleteDeliveryStream":{ "name":"DeleteDeliveryStream", @@ -54,6 +54,21 @@ ], "documentation":"

Describes the specified delivery stream and gets the status. For example, after your delivery stream is created, call DescribeDeliveryStream to see if the delivery stream is ACTIVE and therefore ready for data to be sent to it.

" }, + "GetKinesisStream":{ + "name":"GetKinesisStream", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetKinesisStreamInput"}, + "output":{"shape":"GetKinesisStreamOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"InvalidStreamTypeException"} + ], + "internalonly":true + }, "ListDeliveryStreams":{ "name":"ListDeliveryStreams", "http":{ @@ -77,7 +92,7 @@ {"shape":"InvalidArgumentException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Writes a single data record into an Amazon Kinesis Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers.

By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. Note that if you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Firehose Limits.

You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data, for example, a segment from a log file, geographic location data, web site clickstream data, etc.

Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application(s) to parse individual data items when reading the data from the destination.

The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation.

If the PutRecord operation throws a ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.

Data records sent to Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

" + "documentation":"

Writes a single data record into an Amazon Kinesis Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers.

By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. Note that if you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Firehose Limits.

You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data, for example, a segment from a log file, geographic location data, website clickstream data, and so on.

Kinesis Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.

The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation.

If the PutRecord operation throws a ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.

Data records sent to Kinesis Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

" }, "PutRecordBatch":{ "name":"PutRecordBatch", @@ -92,7 +107,7 @@ {"shape":"InvalidArgumentException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers.

By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. Note that if you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits, see Amazon Kinesis Firehose Limits.

Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before 64-bit encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed.

You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data, for example, a segment from a log file, geographic location data, web site clickstream data, and so on.

Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application(s) to parse individual data items when reading the data from the destination.

The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Each entry in the RequestResponses array provides additional information about the processed record, and directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Firehose attempts to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records.

A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailable or InternalFailure. ErrorMessage provides more detailed information about the error.

If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination.

If PutRecordBatch throws ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.

Data records sent to Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

" + "documentation":"

Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers.

By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits, see Amazon Kinesis Firehose Limits.

Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before 64-bit encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed.

You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, web site clickstream data, and so on.

Kinesis Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.

The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Kinesis Firehose attempts to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records.

A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailable or InternalFailure. ErrorMessage provides more detailed information about the error.

If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination.

If PutRecordBatch throws ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.

Data records sent to Kinesis Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

" }, "UpdateDestination":{ "name":"UpdateDestination", @@ -108,7 +123,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

Updates the specified destination of the specified delivery stream.

You can use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are usually effective within a few minutes.

Note that switching between Amazon ES and other services is not supported. For an Amazon ES destination, you can only update to another Amazon ES destination.

If the destination type is the same, Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified then the existing EncryptionConfiguration is maintained on the destination.

If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Firehose does not merge any parameters. In this case, all parameters must be specified.

Firehose uses CurrentDeliveryStreamVersionId to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using DescribeDeliveryStream. You should use the new version ID to set CurrentDeliveryStreamVersionId in the next call.

" + "documentation":"

Updates the specified destination of the specified delivery stream.

You can use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are usually effective within a few minutes.

Note that switching between Amazon ES and other services is not supported. For an Amazon ES destination, you can only update to another Amazon ES destination.

If the destination type is the same, Kinesis Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified, then the existing EncryptionConfiguration is maintained on the destination.

If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Kinesis Firehose does not merge any parameters. In this case, all parameters must be specified.

Kinesis Firehose uses CurrentDeliveryStreamVersionId to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using DescribeDeliveryStream. Use the new version ID to set CurrentDeliveryStreamVersionId in the next call.

" } }, "shapes":{ @@ -118,6 +133,7 @@ "min":1, "pattern":"arn:.*" }, + "AccessKeyId":{"type":"string"}, "BooleanObject":{"type":"boolean"}, "BucketARN":{ "type":"string", @@ -137,7 +153,7 @@ "documentation":"

Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.

" } }, - "documentation":"

Describes hints for the buffering to perform before delivering data to the destination. Please note that these options are treated as hints, and therefore Firehose may choose to use different values when it is optimal.

" + "documentation":"

Describes hints for the buffering to perform before delivering data to the destination. Please note that these options are treated as hints, and therefore Kinesis Firehose may choose to use different values when it is optimal.

" }, "CloudWatchLoggingOptions":{ "type":"structure", @@ -155,7 +171,7 @@ "documentation":"

The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.

" } }, - "documentation":"

Describes the CloudWatch logging options for your delivery stream.

" + "documentation":"

Describes the Amazon CloudWatch logging options for your delivery stream.

" }, "ClusterJDBCURL":{ "type":"string", @@ -196,7 +212,7 @@ }, "CopyOptions":{ "shape":"CopyOptions", - "documentation":"

Optional parameters to use with the Amazon Redshift COPY command. For more information, see the \"Optional Parameters\" section of Amazon Redshift COPY command. Some possible examples that would apply to Firehose are as follows:

delimiter '\\t' lzop; - fields are delimited with \"\\t\" (TAB character) and compressed using lzop.

delimiter '| - fields are delimited with \"|\" (this is the default delimiter).

delimiter '|' escape - the delimiter should be escaped.

fixedwidth 'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6' - fields are fixed width in the source, with each width specified after every column in the table.

JSON 's3://mybucket/jsonpaths.txt' - data is in JSON format, and the path specified is the format of the data.

For more examples, see Amazon Redshift COPY command examples.

" + "documentation":"

Optional parameters to use with the Amazon Redshift COPY command. For more information, see the \"Optional Parameters\" section of Amazon Redshift COPY command. Some possible examples that would apply to Kinesis Firehose are as follows:

delimiter '\\t' lzop; - fields are delimited with \"\\t\" (TAB character) and compressed using lzop.

delimiter '|' - fields are delimited with \"|\" (this is the default delimiter).

delimiter '|' escape - the delimiter should be escaped.

fixedwidth 'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6' - fields are fixed width in the source, with each width specified after every column in the table.

JSON 's3://mybucket/jsonpaths.txt' - data is in JSON format, and the path specified is the format of the data.

For more examples, see Amazon Redshift COPY command examples.

" } }, "documentation":"

Describes a COPY command for Amazon Redshift.

" @@ -208,7 +224,15 @@ "members":{ "DeliveryStreamName":{ "shape":"DeliveryStreamName", - "documentation":"

The name of the delivery stream. This name must be unique per AWS account in the same region. You can have multiple delivery streams with the same name if they are in different accounts or different regions.

" + "documentation":"

The name of the delivery stream. This name must be unique per AWS account in the same region. If the delivery streams are in different accounts or different regions, you can have multiple delivery streams with the same name.

" + }, + "DeliveryStreamType":{ + "shape":"DeliveryStreamType", + "documentation":"

The delivery stream type. This parameter can be one of the following values:

" + }, + "KinesisStreamSourceConfiguration":{ + "shape":"KinesisStreamSourceConfiguration", + "documentation":"

When a Kinesis stream is used as the source for the delivery stream, a KinesisStreamSourceConfiguration containing the Kinesis stream ARN and the role ARN for the source stream.

" }, "S3DestinationConfiguration":{ "shape":"S3DestinationConfiguration", @@ -263,6 +287,7 @@ "members":{ } }, + "DeliveryStartTimestamp":{"type":"timestamp"}, "DeliveryStreamARN":{ "type":"string", "max":512, @@ -275,6 +300,7 @@ "DeliveryStreamName", "DeliveryStreamARN", "DeliveryStreamStatus", + "DeliveryStreamType", "VersionId", "Destinations", "HasMoreDestinations" @@ -292,6 +318,10 @@ "shape":"DeliveryStreamStatus", "documentation":"

The status of the delivery stream.

" }, + "DeliveryStreamType":{ + "shape":"DeliveryStreamType", + "documentation":"

The delivery stream type. This can be one of the following values:

" + }, "VersionId":{ "shape":"DeliveryStreamVersionId", "documentation":"

Each time the destination is updated for a delivery stream, the version ID is changed, and the current version ID is required when updating the destination. This is so that the service knows it is applying the changes to the correct version of the delivery stream.

" @@ -304,6 +334,10 @@ "shape":"Timestamp", "documentation":"

The date and time that the delivery stream was last updated.

" }, + "Source":{ + "shape":"SourceDescription", + "documentation":"

If the DeliveryStreamType parameter is KinesisStreamAsSource, a SourceDescription object describing the source Kinesis stream.

" + }, "Destinations":{ "shape":"DestinationDescriptionList", "documentation":"

The destinations.

" @@ -333,6 +367,13 @@ "ACTIVE" ] }, + "DeliveryStreamType":{ + "type":"string", + "enum":[ + "DirectPut", + "KinesisStreamAsSource" + ] + }, "DeliveryStreamVersionId":{ "type":"string", "max":50, @@ -353,7 +394,7 @@ }, "ExclusiveStartDestinationId":{ "shape":"DestinationId", - "documentation":"

The ID of the destination to start returning the destination information. Currently Firehose supports one destination per delivery stream.

" + "documentation":"

The ID of the destination to start returning the destination information. Currently, Kinesis Firehose supports one destination per delivery stream.

" } } }, @@ -444,7 +485,7 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Amazon S3 Bucket Access.

" + "documentation":"

The ARN of the IAM role to be assumed by Kinesis Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Amazon S3 Bucket Access.

" }, "DomainARN":{ "shape":"ElasticsearchDomainARN", @@ -460,7 +501,7 @@ }, "IndexRotationPeriod":{ "shape":"ElasticsearchIndexRotationPeriod", - "documentation":"

The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. For more information, see Index Rotation for Amazon Elasticsearch Service Destination. The default value is OneDay.

" + "documentation":"

The Elasticsearch index rotation period. Index rotation appends a time stamp to the IndexName to facilitate the expiration of old data. For more information, see Index Rotation for Amazon Elasticsearch Service Destination. The default value is OneDay.

" }, "BufferingHints":{ "shape":"ElasticsearchBufferingHints", @@ -468,15 +509,15 @@ }, "RetryOptions":{ "shape":"ElasticsearchRetryOptions", - "documentation":"

The retry behavior in the event that Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).

" + "documentation":"

The retry behavior in case Kinesis Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).

" }, "S3BackupMode":{ "shape":"ElasticsearchS3BackupMode", - "documentation":"

Defines how documents should be delivered to Amazon S3. When set to FailedDocumentsOnly, Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with elasticsearch-failed/ appended to the key prefix. When set to AllDocuments, Firehose delivers all incoming records to Amazon S3, and also writes failed documents with elasticsearch-failed/ appended to the prefix. For more information, see Amazon S3 Backup for Amazon Elasticsearch Service Destination. Default value is FailedDocumentsOnly.

" + "documentation":"

Defines how documents should be delivered to Amazon S3. When set to FailedDocumentsOnly, Kinesis Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with elasticsearch-failed/ appended to the key prefix. When set to AllDocuments, Kinesis Firehose delivers all incoming records to Amazon S3, and also writes failed documents with elasticsearch-failed/ appended to the prefix. For more information, see Amazon S3 Backup for Amazon Elasticsearch Service Destination. Default value is FailedDocumentsOnly.

" }, "S3Configuration":{ "shape":"S3DestinationConfiguration", - "documentation":"

The configuration for the intermediate Amazon S3 location from which Amazon ES obtains data.

" + "documentation":"

The configuration for the backup Amazon S3 location.

" }, "ProcessingConfiguration":{ "shape":"ProcessingConfiguration", @@ -544,7 +585,7 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Amazon S3 Bucket Access.

" + "documentation":"

The ARN of the IAM role to be assumed by Kinesis Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Amazon S3 Bucket Access.

" }, "DomainARN":{ "shape":"ElasticsearchDomainARN", @@ -560,7 +601,7 @@ }, "IndexRotationPeriod":{ "shape":"ElasticsearchIndexRotationPeriod", - "documentation":"

The Elasticsearch index rotation period. Index rotation appends a timestamp to IndexName to facilitate the expiration of old data. For more information, see Index Rotation for Amazon Elasticsearch Service Destination. Default value is OneDay.

" + "documentation":"

The Elasticsearch index rotation period. Index rotation appends a time stamp to IndexName to facilitate the expiration of old data. For more information, see Index Rotation for Amazon Elasticsearch Service Destination. Default value is OneDay.

" }, "BufferingHints":{ "shape":"ElasticsearchBufferingHints", @@ -568,7 +609,7 @@ }, "RetryOptions":{ "shape":"ElasticsearchRetryOptions", - "documentation":"

The retry behavior in the event that Firehose is unable to deliver documents to Amazon ES. Default value is 300 (5 minutes).

" + "documentation":"

The retry behavior in case Kinesis Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).

" }, "S3Update":{ "shape":"S3DestinationUpdate", @@ -616,10 +657,10 @@ "members":{ "DurationInSeconds":{ "shape":"ElasticsearchRetryDurationInSeconds", - "documentation":"

After an initial failure to deliver to Amazon ES, the total amount of time during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.

" + "documentation":"

After an initial failure to deliver to Amazon ES, the total amount of time during which Kinesis Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.

" } }, - "documentation":"

Configures retry behavior in the event that Firehose is unable to deliver documents to Amazon ES.

" + "documentation":"

Configures retry behavior in case Kinesis Firehose is unable to deliver documents to Amazon ES.

" }, "ElasticsearchS3BackupMode":{ "type":"string", @@ -638,7 +679,7 @@ "members":{ "NoEncryptionConfig":{ "shape":"NoEncryptionConfig", - "documentation":"

Specifically override existing encryption information to ensure no encryption is used.

" + "documentation":"

Specifically override existing encryption information to ensure that no encryption is used.

" }, "KMSEncryptionConfig":{ "shape":"KMSEncryptionConfig", @@ -666,7 +707,7 @@ }, "Prefix":{ "shape":"Prefix", - "documentation":"

The \"YYYY/MM/DD/HH\" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Firehose Developer Guide.

" + "documentation":"

The \"YYYY/MM/DD/HH\" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. If the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Firehose Developer Guide.

" }, "BufferingHints":{ "shape":"BufferingHints", @@ -719,7 +760,7 @@ }, "Prefix":{ "shape":"Prefix", - "documentation":"

The \"YYYY/MM/DD/HH\" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Firehose Developer Guide.

" + "documentation":"

The \"YYYY/MM/DD/HH\" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. If the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Firehose Developer Guide.

" }, "BufferingHints":{ "shape":"BufferingHints", @@ -765,7 +806,7 @@ }, "Prefix":{ "shape":"Prefix", - "documentation":"

The \"YYYY/MM/DD/HH\" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Firehose Developer Guide.

" + "documentation":"

The \"YYYY/MM/DD/HH\" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. If the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Firehose Developer Guide.

" }, "BufferingHints":{ "shape":"BufferingHints", @@ -798,6 +839,21 @@ }, "documentation":"

Describes an update for a destination in Amazon S3.

" }, + "FirehoseSource":{"type":"string"}, + "GetKinesisStreamInput":{ + "type":"structure", + "required":["DeliveryStreamARN"], + "members":{ + "DeliveryStreamARN":{"shape":"DeliveryStreamARN"} + } + }, + "GetKinesisStreamOutput":{ + "type":"structure", + "members":{ + "KinesisStreamARN":{"shape":"KinesisStreamARN"}, + "CredentialsForReadingKinesisStream":{"shape":"SessionCredentials"} + } + }, "IntervalInSeconds":{ "type":"integer", "max":900, @@ -811,7 +867,15 @@ "documentation":"

A message that provides information about the error.

" } }, - "documentation":"

The specified input parameter has an value that is not valid.

", + "documentation":"

The specified input parameter has a value that is not valid.

", + "exception":true + }, + "InvalidStreamTypeException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"}, + "source":{"shape":"FirehoseSource"} + }, "exception":true }, "KMSEncryptionConfig":{ @@ -825,6 +889,48 @@ }, "documentation":"

Describes an encryption key for a destination in Amazon S3.

" }, + "KinesisStreamARN":{ + "type":"string", + "max":512, + "min":1, + "pattern":"arn:.*" + }, + "KinesisStreamSourceConfiguration":{ + "type":"structure", + "required":[ + "KinesisStreamARN", + "RoleARN" + ], + "members":{ + "KinesisStreamARN":{ + "shape":"KinesisStreamARN", + "documentation":"

The ARN of the source Kinesis stream.

" + }, + "RoleARN":{ + "shape":"RoleARN", + "documentation":"

The ARN of the role that provides access to the source Kinesis stream.

" + } + }, + "documentation":"

The stream and role ARNs for a Kinesis stream used as the source for a delivery stream.

" + }, + "KinesisStreamSourceDescription":{ + "type":"structure", + "members":{ + "KinesisStreamARN":{ + "shape":"KinesisStreamARN", + "documentation":"

The ARN of the source Kinesis stream.

" + }, + "RoleARN":{ + "shape":"RoleARN", + "documentation":"

The ARN of the role used by the source Kinesis stream.

" + }, + "DeliveryStartTimestamp":{ + "shape":"DeliveryStartTimestamp", + "documentation":"

Kinesis Firehose starts retrieving records from the Kinesis stream starting with this time stamp.

" + } + }, + "documentation":"

Details about a Kinesis stream used as the source for a Kinesis Firehose delivery stream.

" + }, "LimitExceededException":{ "type":"structure", "members":{ @@ -843,6 +949,10 @@ "shape":"ListDeliveryStreamsInputLimit", "documentation":"

The maximum number of delivery streams to list.

" }, + "DeliveryStreamType":{ + "shape":"DeliveryStreamType", + "documentation":"

The delivery stream type. This can be one of the following values:

This parameter is optional. If this parameter is omitted, delivery streams of all types are returned.

" + }, "ExclusiveStartDeliveryStreamName":{ "shape":"DeliveryStreamName", "documentation":"

The name of the delivery stream to start the list with.

" @@ -1097,7 +1207,7 @@ }, "RetryOptions":{ "shape":"RedshiftRetryOptions", - "documentation":"

The retry behavior in the event that Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).

" + "documentation":"

The retry behavior in case Kinesis Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).

" }, "S3Configuration":{ "shape":"S3DestinationConfiguration", @@ -1150,7 +1260,7 @@ }, "RetryOptions":{ "shape":"RedshiftRetryOptions", - "documentation":"

The retry behavior in the event that Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).

" + "documentation":"

The retry behavior in case Kinesis Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).

" }, "S3DestinationDescription":{ "shape":"S3DestinationDescription", @@ -1200,7 +1310,7 @@ }, "RetryOptions":{ "shape":"RedshiftRetryOptions", - "documentation":"

The retry behavior in the event that Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).

" + "documentation":"

The retry behavior in case Kinesis Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).

" }, "S3Update":{ "shape":"S3DestinationUpdate", @@ -1235,10 +1345,10 @@ "members":{ "DurationInSeconds":{ "shape":"RedshiftRetryDurationInSeconds", - "documentation":"

The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Firehose does not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer than the current value.

" + "documentation":"

The length of time during which Kinesis Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Kinesis Firehose does not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer than the current value.

" } }, - "documentation":"

Configures retry behavior in the event that Firehose is unable to deliver documents to Amazon Redshift.

" + "documentation":"

Configures retry behavior in case Kinesis Firehose is unable to deliver documents to Amazon Redshift.

" }, "RedshiftS3BackupMode":{ "type":"string", @@ -1299,7 +1409,7 @@ }, "Prefix":{ "shape":"Prefix", - "documentation":"

The \"YYYY/MM/DD/HH\" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Firehose Developer Guide.

" + "documentation":"

The \"YYYY/MM/DD/HH\" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. If the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Firehose Developer Guide.

" }, "BufferingHints":{ "shape":"BufferingHints", @@ -1340,7 +1450,7 @@ }, "Prefix":{ "shape":"Prefix", - "documentation":"

The \"YYYY/MM/DD/HH\" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Firehose Developer Guide.

" + "documentation":"

The \"YYYY/MM/DD/HH\" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. If the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Firehose Developer Guide.

" }, "BufferingHints":{ "shape":"BufferingHints", @@ -1374,7 +1484,7 @@ }, "Prefix":{ "shape":"Prefix", - "documentation":"

The \"YYYY/MM/DD/HH\" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Firehose Developer Guide.

" + "documentation":"

The \"YYYY/MM/DD/HH\" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. If the prefix ends with a slash, it appears as a folder in the S3 bucket. For more information, see Amazon S3 Object Name Format in the Amazon Kinesis Firehose Developer Guide.

" }, "BufferingHints":{ "shape":"BufferingHints", @@ -1395,6 +1505,7 @@ }, "documentation":"

Describes an update for a destination in Amazon S3.

" }, + "SecretAccessKey":{"type":"string"}, "ServiceUnavailableException":{ "type":"structure", "members":{ @@ -1407,11 +1518,37 @@ "exception":true, "fault":true }, + "SessionCredentials":{ + "type":"structure", + "required":[ + "AccessKeyId", + "SecretAccessKey", + "SessionToken", + "Expiration" + ], + "members":{ + "AccessKeyId":{"shape":"AccessKeyId"}, + "SecretAccessKey":{"shape":"SecretAccessKey"}, + "SessionToken":{"shape":"SessionToken"}, + "Expiration":{"shape":"Timestamp"} + } + }, + "SessionToken":{"type":"string"}, "SizeInMBs":{ "type":"integer", "max":128, "min":1 }, + "SourceDescription":{ + "type":"structure", + "members":{ + "KinesisStreamSourceDescription":{ + "shape":"KinesisStreamSourceDescription", + "documentation":"

The KinesisStreamSourceDescription value for the source Kinesis stream.

" + } + }, + "documentation":"

Details about a Kinesis stream used as the source for a Kinesis Firehose delivery stream.

" + }, "Timestamp":{"type":"timestamp"}, "UpdateDestinationInput":{ "type":"structure", @@ -1427,7 +1564,7 @@ }, "CurrentDeliveryStreamVersionId":{ "shape":"DeliveryStreamVersionId", - "documentation":"

Obtain this value from the VersionId result of DeliveryStreamDescription. This value is required, and helps the service to perform conditional operations. For example, if there is a interleaving update and this value is null, then the update destination fails. After the update is successful, the VersionId value is updated. The service then performs a merge of the old configuration with the new configuration.

" + "documentation":"

Obtain this value from the VersionId result of DeliveryStreamDescription. This value is required, and helps the service to perform conditional operations. For example, if there is an interleaving update and this value is null, then the update destination fails. After the update is successful, the VersionId value is updated. The service then performs a merge of the old configuration with the new configuration.

" }, "DestinationId":{ "shape":"DestinationId", @@ -1463,5 +1600,5 @@ "sensitive":true } }, - "documentation":"Amazon Kinesis Firehose API Reference

Amazon Kinesis Firehose is a fully-managed service that delivers real-time streaming data to destinations such as Amazon Simple Storage Service (Amazon S3), Amazon Elasticsearch Service (Amazon ES), and Amazon Redshift.

" + "documentation":"Amazon Kinesis Firehose API Reference

Amazon Kinesis Firehose is a fully managed service that delivers real-time streaming data to destinations such as Amazon Simple Storage Service (Amazon S3), Amazon Elasticsearch Service (Amazon ES), and Amazon Redshift.

" } diff --git a/botocore/data/gamelift/2015-10-01/service-2.json b/botocore/data/gamelift/2015-10-01/service-2.json index d583c42a..86c7f1d1 100644 --- a/botocore/data/gamelift/2015-10-01/service-2.json +++ b/botocore/data/gamelift/2015-10-01/service-2.json @@ -11,6 +11,22 @@ "uid":"gamelift-2015-10-01" }, "operations":{ + "AcceptMatch":{ + "name":"AcceptMatch", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AcceptMatchInput"}, + "output":{"shape":"AcceptMatchOutput"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"NotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"UnsupportedRegionException"} + ], + "documentation":"

Registers a player's acceptance or rejection of a proposed FlexMatch match. A matchmaking configuration may require player acceptance; if so, then matches built with that configuration cannot be completed unless all players accept the proposed match within a specified time limit.

When FlexMatch builds a match, all the matchmaking tickets involved in the proposed match are placed into status REQUIRES_ACCEPTANCE. This is a trigger for your game to get acceptance from all players in the ticket. Acceptances are only valid for tickets when they are in this status; all other acceptances result in an error.

To register acceptance, specify the ticket ID, a response, and one or more players. Once all players have registered acceptance, the matchmaking tickets advance to status PLACING, where a new game session is created for the match.

If any player rejects the match, or if acceptances are not received before a specified timeout, the proposed match is dropped. The matchmaking tickets are then handled in one of two ways: For tickets where all players accepted the match, the ticket status is returned to SEARCHING to find a new match. For tickets where one or more players failed to accept the match, the ticket status is set to FAILED, and processing is terminated. A new matchmaking request for these players can be submitted as needed.

Matchmaking-related operations include:

" + }, "CreateAlias":{ "name":"CreateAlias", "http":{ @@ -60,7 +76,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Creates a new fleet to run your game servers. A fleet is a set of Amazon Elastic Compute Cloud (Amazon EC2) instances, each of which can run multiple server processes to host game sessions. You configure a fleet to create instances with certain hardware specifications (see Amazon EC2 Instance Types for more information), and deploy a specified game build to each instance. A newly created fleet passes through several statuses; once it reaches the ACTIVE status, it can begin hosting game sessions.

To create a new fleet, you must specify the following: (1) fleet name, (2) build ID of an uploaded game build, (3) an EC2 instance type, and (4) a run-time configuration that describes which server processes to run on each instance in the fleet. (Although the run-time configuration is not a required parameter, the fleet cannot be successfully activated without it.)

You can also configure the new fleet with the following settings:

If you use Amazon CloudWatch for metrics, you can add the new fleet to a metric group. This allows you to view aggregated metrics for a set of fleets. Once you specify a metric group, the new fleet's metrics are included in the metric group's data.

If the CreateFleet call is successful, Amazon GameLift performs the following tasks:

Fleet-related operations include:

" + "documentation":"

Creates a new fleet to run your game servers. A fleet is a set of Amazon Elastic Compute Cloud (Amazon EC2) instances, each of which can run multiple server processes to host game sessions. You configure a fleet to create instances with certain hardware specifications (see Amazon EC2 Instance Types for more information), and deploy a specified game build to each instance. A newly created fleet passes through several statuses; once it reaches the ACTIVE status, it can begin hosting game sessions.

To create a new fleet, you must specify the following: (1) fleet name, (2) build ID of an uploaded game build, (3) an EC2 instance type, and (4) a run-time configuration that describes which server processes to run on each instance in the fleet. (Although the run-time configuration is not a required parameter, the fleet cannot be successfully activated without it.)

You can also configure the new fleet with the following settings:

If you use Amazon CloudWatch for metrics, you can add the new fleet to a metric group. This allows you to view aggregated metrics for a set of fleets. Once you specify a metric group, the new fleet's metrics are included in the metric group's data.

If the CreateFleet call is successful, Amazon GameLift performs the following tasks:

Fleet-related operations include:

" }, "CreateGameSession":{ "name":"CreateGameSession", @@ -82,7 +98,7 @@ {"shape":"LimitExceededException"}, {"shape":"IdempotentParameterMismatchException"} ], - "documentation":"

Creates a multiplayer game session for players. This action creates a game session record and assigns an available server process in the specified fleet to host the game session. A fleet must have an ACTIVE status before a game session can be created in it.

To create a game session, specify either fleet ID or alias ID and indicate a maximum number of players to allow in the game session. You can also provide a name and game-specific properties for this game session. If successful, a GameSession object is returned containing game session properties, including a game session ID with the custom string you provided.

Idempotency tokens. You can add a token that uniquely identifies game session requests. This is useful for ensuring that game session requests are idempotent. Multiple requests with the same idempotency token are processed only once; subsequent requests return the original result. All response values are the same with the exception of game session status, which may change.

Resource creation limits. If you are creating a game session on a fleet with a resource creation limit policy in force, then you must specify a creator ID. Without this ID, Amazon GameLift has no way to evaluate the policy for this new game session request.

By default, newly created game sessions allow new players to join. Use UpdateGameSession to change the game session's player session creation policy.

Available in Amazon GameLift Local.

Game-session-related operations include:

" + "documentation":"

Creates a multiplayer game session for players. This action creates a game session record and assigns an available server process in the specified fleet to host the game session. A fleet must have an ACTIVE status before a game session can be created in it.

To create a game session, specify either fleet ID or alias ID and indicate a maximum number of players to allow in the game session. You can also provide a name and game-specific properties for this game session. If successful, a GameSession object is returned containing the game session properties and other settings you specified.

Idempotency tokens. You can add a token that uniquely identifies game session requests. This is useful for ensuring that game session requests are idempotent. Multiple requests with the same idempotency token are processed only once; subsequent requests return the original result. All response values are the same with the exception of game session status, which may change.

Resource creation limits. If you are creating a game session on a fleet with a resource creation limit policy in force, then you must specify a creator ID. Without this ID, Amazon GameLift has no way to evaluate the policy for this new game session request.

Player acceptance policy. By default, newly created game sessions are open to new players. You can restrict new player access by using UpdateGameSession to change the game session's player session creation policy.

Game session logs. Logs are retained for all active game sessions for 14 days. To access the logs, call GetGameSessionLogUrl to download the log files.

Available in Amazon GameLift Local.

Game-session-related operations include:

" }, "CreateGameSessionQueue":{ "name":"CreateGameSessionQueue", @@ -100,6 +116,38 @@ ], "documentation":"

Establishes a new queue for processing requests to place new game sessions. A queue identifies where new game sessions can be hosted -- by specifying a list of destinations (fleets or aliases) -- and how long requests can wait in the queue before timing out. You can set up a queue to try to place game sessions on fleets in multiple regions. To add placement requests to a queue, call StartGameSessionPlacement and reference the queue name.

Destination order. When processing a request for a game session, Amazon GameLift tries each destination in order until it finds one with available resources to host the new game session. A queue's default order is determined by how destinations are listed. The default order is overridden when a game session placement request provides player latency information. Player latency information enables Amazon GameLift to prioritize destinations where players report the lowest average latency, as a result placing the new game session where the majority of players will have the best possible gameplay experience.

Player latency policies. For placement requests containing player latency information, use player latency policies to protect individual players from very high latencies. With a latency cap, even when a destination can deliver a low latency for most players, the game is not placed where any individual player is reporting latency higher than a policy's maximum. A queue can have multiple latency policies, which are enforced consecutively starting with the policy with the lowest latency cap. Use multiple policies to gradually relax latency controls; for example, you might set a policy with a low latency cap for the first 60 seconds, a second policy with a higher cap for the next 60 seconds, etc.

To create a new queue, provide a name, timeout value, a list of destinations and, if desired, a set of latency policies. If successful, a new queue object is returned.

Queue-related operations include:

" }, + "CreateMatchmakingConfiguration":{ + "name":"CreateMatchmakingConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateMatchmakingConfigurationInput"}, + "output":{"shape":"CreateMatchmakingConfigurationOutput"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"LimitExceededException"}, + {"shape":"NotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"UnsupportedRegionException"} + ], + "documentation":"

Defines a new matchmaking configuration for use with FlexMatch. A matchmaking configuration sets out guidelines for matching players and getting the matches into games. You can set up multiple matchmaking configurations to handle the scenarios needed for your game. Each matchmaking request (StartMatchmaking) specifies a configuration for the match and provides player attributes to support the configuration being used.

To create a matchmaking configuration, at a minimum you must specify the following: configuration name; a rule set that governs how to evaluate players and find acceptable matches; a game session queue to use when placing a new game session for the match; and the maximum time allowed for a matchmaking attempt.

Player acceptance -- In each configuration, you have the option to require that all players accept participation in a proposed match. To enable this feature, set AcceptanceRequired to true and specify a time limit for player acceptance. Players have the option to accept or reject a proposed match, and a match does not move ahead to game session placement unless all matched players accept.

Matchmaking status notification -- There are two ways to track the progress of matchmaking tickets: (1) polling ticket status with DescribeMatchmaking; or (2) receiving notifications with Amazon Simple Notification Service (SNS). To use notifications, you first need to set up an SNS topic to receive the notifications, and provide the topic ARN in the matchmaking configuration (see Setting up Notifications for Matchmaking). Since notifications promise only \"best effort\" delivery, we recommend calling DescribeMatchmaking if no notifications are received within 30 seconds.

Operations related to match configurations and rule sets include:

" + }, + "CreateMatchmakingRuleSet":{ + "name":"CreateMatchmakingRuleSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateMatchmakingRuleSetInput"}, + "output":{"shape":"CreateMatchmakingRuleSetOutput"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServiceException"}, + {"shape":"UnsupportedRegionException"} + ], + "documentation":"

Creates a new rule set for FlexMatch matchmaking. A rule set describes the type of match to create, such as the number and size of teams, and sets the parameters for acceptable player matches, such as minimum skill level or character type. Rule sets are used in matchmaking configurations, which define how matchmaking requests are handled. Each MatchmakingConfiguration uses one rule set; you can set up multiple rule sets to handle the scenarios that suit your game (such as for different game modes), and create a separate matchmaking configuration for each rule set. See additional information on rule set content in the MatchmakingRuleSet structure. For help creating rule sets, including useful examples, see the topic Adding FlexMatch to Your Game.

Once created, matchmaking rule sets cannot be changed or deleted, so we recommend checking the rule set syntax using ValidateMatchmakingRuleSetbefore creating the rule set.

To create a matchmaking rule set, provide the set of rules and a unique name. Rule sets must be defined in the same region as the matchmaking configuration they will be used with. Rule sets cannot be edited or deleted. If you need to change a rule set, create a new one with the necessary edits and then update matchmaking configurations to use the new rule set.

Operations related to match configurations and rule sets include:

" + }, "CreatePlayerSession":{ "name":"CreatePlayerSession", "http":{ @@ -200,6 +248,22 @@ ], "documentation":"

Deletes a game session queue. This action means that any StartGameSessionPlacement requests that reference this queue will fail. To delete a queue, specify the queue name.

Queue-related operations include:

" }, + "DeleteMatchmakingConfiguration":{ + "name":"DeleteMatchmakingConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteMatchmakingConfigurationInput"}, + "output":{"shape":"DeleteMatchmakingConfigurationOutput"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"NotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"UnsupportedRegionException"} + ], + "documentation":"

Permanently removes a FlexMatch matchmaking configuration. To delete, specify the configuration name. A matchmaking configuration cannot be deleted if it is being used in any active matchmaking tickets.

Operations related to match configurations and rule sets include:

" + }, "DeleteScalingPolicy":{ "name":"DeleteScalingPolicy", "http":{ @@ -424,6 +488,52 @@ ], "documentation":"

Retrieves information about a fleet's instances, including instance IDs. Use this action to get details on all instances in the fleet or get details on one specific instance.

To get a specific instance, specify fleet ID and instance ID. To get all instances in a fleet, specify a fleet ID only. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, an Instance object is returned for each result.

" }, + "DescribeMatchmaking":{ + "name":"DescribeMatchmaking", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeMatchmakingInput"}, + "output":{"shape":"DescribeMatchmakingOutput"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServiceException"}, + {"shape":"UnsupportedRegionException"} + ], + "documentation":"

Retrieves a set of one or more matchmaking tickets. Use this operation to retrieve ticket information, including status and--once a successful match is made--acquire connection information for the resulting new game session.

You can use this operation to track the progress of matchmaking requests (through polling) as an alternative to using event notifications. See more details on tracking matchmaking requests through polling or notifications in StartMatchmaking.

You can request data for a one or a list of ticket IDs. If the request is successful, a ticket object is returned for each requested ID. When specifying a list of ticket IDs, objects are returned only for tickets that currently exist.

Matchmaking-related operations include:

" + }, + "DescribeMatchmakingConfigurations":{ + "name":"DescribeMatchmakingConfigurations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeMatchmakingConfigurationsInput"}, + "output":{"shape":"DescribeMatchmakingConfigurationsOutput"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServiceException"}, + {"shape":"UnsupportedRegionException"} + ], + "documentation":"

Retrieves the details of FlexMatch matchmaking configurations. with this operation, you have the following options: (1) retrieve all existing configurations, (2) provide the names of one or more configurations to retrieve, or (3) retrieve all configurations that use a specified rule set name. When requesting multiple items, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a configuration is returned for each requested name. When specifying a list of names, only configurations that currently exist are returned.

Operations related to match configurations and rule sets include:

" + }, + "DescribeMatchmakingRuleSets":{ + "name":"DescribeMatchmakingRuleSets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeMatchmakingRuleSetsInput"}, + "output":{"shape":"DescribeMatchmakingRuleSetsOutput"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServiceException"}, + {"shape":"NotFoundException"}, + {"shape":"UnsupportedRegionException"} + ], + "documentation":"

Retrieves the details for FlexMatch matchmaking rule sets. You can request all existing rule sets for the region, or provide a list of one or more rule set names. When requesting multiple items, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a rule set is returned for each requested name.

Operations related to match configurations and rule sets include:

" + }, "DescribePlayerSessions":{ "name":"DescribePlayerSessions", "http":{ @@ -486,7 +596,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Retrieves the location of stored game session logs for a specified game session. When a game session is terminated, Amazon GameLift automatically stores the logs in Amazon S3. Use this URL to download the logs.

See the AWS Service Limits page for maximum log file sizes. Log files that exceed this limit are not saved.

Game-session-related operations include:

" + "documentation":"

Retrieves the location of stored game session logs for a specified game session. When a game session is terminated, Amazon GameLift automatically stores the logs in Amazon S3 and retains them for 14 days. Use this URL to download the logs.

See the AWS Service Limits page for maximum log file sizes. Log files that exceed this limit are not saved.

Game-session-related operations include:

" }, "GetInstanceAccess":{ "name":"GetInstanceAccess", @@ -630,7 +740,23 @@ {"shape":"NotFoundException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Places a request for a new game session in a queue (see CreateGameSessionQueue). When processing a placement request, Amazon GameLift searches for available resources on the queue's destinations, scanning each until it finds resources or the placement request times out.

A game session placement request can also request player sessions. When a new game session is successfully created, Amazon GameLift creates a player session for each player included in the request.

When placing a game session, by default Amazon GameLift tries each fleet in the order they are listed in the queue configuration. Ideally, a queue's destinations are listed in preference order.

Alternatively, when requesting a game session with players, you can also provide latency data for each player in relevant regions. Latency data indicates the performance lag a player experiences when connected to a fleet in the region. Amazon GameLift uses latency data to reorder the list of destinations to place the game session in a region with minimal lag. If latency data is provided for multiple players, Amazon GameLift calculates each region's average lag for all players and reorders to get the best game play across all players.

To place a new game session request, specify the following:

If successful, a new game session placement is created.

To track the status of a placement request, call DescribeGameSessionPlacement and check the request's status. If the status is Fulfilled, a new game session has been created and a game session ARN and region are referenced. If the placement request times out, you can resubmit the request or retry it with a different queue.

Game-session-related operations include:

" + "documentation":"

Places a request for a new game session in a queue (see CreateGameSessionQueue). When processing a placement request, Amazon GameLift searches for available resources on the queue's destinations, scanning each until it finds resources or the placement request times out.

A game session placement request can also request player sessions. When a new game session is successfully created, Amazon GameLift creates a player session for each player included in the request.

When placing a game session, by default Amazon GameLift tries each fleet in the order they are listed in the queue configuration. Ideally, a queue's destinations are listed in preference order.

Alternatively, when requesting a game session with players, you can also provide latency data for each player in relevant regions. Latency data indicates the performance lag a player experiences when connected to a fleet in the region. Amazon GameLift uses latency data to reorder the list of destinations to place the game session in a region with minimal lag. If latency data is provided for multiple players, Amazon GameLift calculates each region's average lag for all players and reorders to get the best game play across all players.

To place a new game session request, specify the following:

If successful, a new game session placement is created.

To track the status of a placement request, call DescribeGameSessionPlacement and check the request's status. If the status is FULFILLED, a new game session has been created and a game session ARN and region are referenced. If the placement request times out, you can resubmit the request or retry it with a different queue.

Game-session-related operations include:

" + }, + "StartMatchmaking":{ + "name":"StartMatchmaking", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartMatchmakingInput"}, + "output":{"shape":"StartMatchmakingOutput"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"NotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"UnsupportedRegionException"} + ], + "documentation":"

Uses FlexMatch to create a game match for a group of players based on custom matchmaking rules, and starts a new game for the matched players. Each matchmaking request specifies the type of match to build (team configuration, rules for an acceptable match, etc.). The request also specifies the players to find a match for and where to host the new game session for optimal performance. A matchmaking request might start with a single player or a group of players who want to play together. FlexMatch finds additional players as needed to fill the match. Match type, rules, and the queue used to place a new game session are defined in a MatchmakingConfiguration. For complete information on setting up and using FlexMatch, see the topic Adding FlexMatch to Your Game.

To start matchmaking, provide a unique ticket ID, specify a matchmaking configuration, and include the players to be matched. You must also include a set of player attributes relevant for the matchmaking configuration. If successful, a matchmaking ticket is returned with status set to QUEUED. Track the status of the ticket to respond as needed and acquire game session connection information for sucessfully completed matches.

Tracking ticket status -- A couple of options are available for tracking the status of matchmaking requests:

Processing a matchmaking request -- FlexMatch handles a matchmaking request as follows:

  1. Your client code submits a StartMatchmaking request for one or more players and tracks the status of the request ticket.

  2. FlexMatch uses this ticket and others in process to build an acceptable match. When a potential match is identified, all tickets in the proposed match are advanced to the next status.

  3. If the match requires player acceptance (set in the matchmaking configuration), the tickets move into status REQUIRES_ACCEPTANCE. This status triggers your client code to solicit acceptance from all players in every ticket involved in the match, and then call AcceptMatch for each player. If any player rejects or fails to accept the match before a specified timeout, the proposed match is dropped (see AcceptMatch for more details).

  4. Once a match is proposed and accepted, the matchmaking tickets move into status PLACING. FlexMatch locates resources for a new game session using the game session queue (set in the matchmaking configuration) and creates the game session based on the match data.

  5. When the match is successfully placed, the matchmaking tickets move into COMPLETED status. Connection information (including game session endpoint and player session) is added to the matchmaking tickets. Matched players can use the connection information to join the game.

Matchmaking-related operations include:

" }, "StopGameSessionPlacement":{ "name":"StopGameSessionPlacement", @@ -646,7 +772,23 @@ {"shape":"NotFoundException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Cancels a game session placement that is in Pending status. To stop a placement, provide the placement ID values. If successful, the placement is moved to Cancelled status.

Game-session-related operations include:

" + "documentation":"

Cancels a game session placement that is in PENDING status. To stop a placement, provide the placement ID values. If successful, the placement is moved to CANCELLED status.

Game-session-related operations include:

" + }, + "StopMatchmaking":{ + "name":"StopMatchmaking", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopMatchmakingInput"}, + "output":{"shape":"StopMatchmakingOutput"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"NotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"UnsupportedRegionException"} + ], + "documentation":"

Cancels a matchmaking ticket that is currently being processed. To stop the matchmaking operation, specify the ticket ID. If successful, work on the ticket is stopped, and the ticket status is changed to CANCELLED.

Matchmaking-related operations include:

" }, "UpdateAlias":{ "name":"UpdateAlias", @@ -771,6 +913,22 @@ ], "documentation":"

Updates settings for a game session queue, which determines how new game session requests in the queue are processed. To update settings, specify the queue name to be updated and provide the new settings. When updating destinations, provide a complete list of destinations.

Queue-related operations include:

" }, + "UpdateMatchmakingConfiguration":{ + "name":"UpdateMatchmakingConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateMatchmakingConfigurationInput"}, + "output":{"shape":"UpdateMatchmakingConfigurationOutput"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"NotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"UnsupportedRegionException"} + ], + "documentation":"

Updates settings for a FlexMatch matchmaking configuration. To update settings, specify the configuration name to be updated and provide the new settings.

Operations related to match configurations and rule sets include:

" + }, "UpdateRuntimeConfiguration":{ "name":"UpdateRuntimeConfiguration", "http":{ @@ -787,9 +945,59 @@ {"shape":"InvalidFleetStatusException"} ], "documentation":"

Updates the current run-time configuration for the specified fleet, which tells Amazon GameLift how to launch server processes on instances in the fleet. You can update a fleet's run-time configuration at any time after the fleet is created; it does not need to be in an ACTIVE status.

To update run-time configuration, specify the fleet ID and provide a RuntimeConfiguration object with the updated collection of server process configurations.

Each instance in a Amazon GameLift fleet checks regularly for an updated run-time configuration and changes how it launches server processes to comply with the latest version. Existing server processes are not affected by the update; they continue to run until they end, while Amazon GameLift simply adds new server processes to fit the current run-time configuration. As a result, the run-time configuration changes are applied gradually as existing processes shut down and new processes are launched in Amazon GameLift's normal process recycling activity.

Fleet-related operations include:

" + }, + "ValidateMatchmakingRuleSet":{ + "name":"ValidateMatchmakingRuleSet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ValidateMatchmakingRuleSetInput"}, + "output":{"shape":"ValidateMatchmakingRuleSetOutput"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"UnsupportedRegionException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Validates the syntax of a matchmaking rule or rule set. This operation checks that the rule set uses syntactically correct JSON and that it conforms to allowed property expressions. To validate syntax, provide a rule set string.

Operations related to match configurations and rule sets include:

" } }, "shapes":{ + "AcceptMatchInput":{ + "type":"structure", + "required":[ + "TicketId", + "PlayerIds", + "AcceptanceType" + ], + "members":{ + "TicketId":{ + "shape":"MatchmakingIdStringModel", + "documentation":"

Unique identifier for a matchmaking ticket. The ticket must be in status REQUIRES_ACCEPTANCE; otherwise this request will fail.

" + }, + "PlayerIds":{ + "shape":"MatchmakingPlayerIdList", + "documentation":"

Unique identifier for a player delivering the response. This parameter can include one or multiple player IDs.

" + }, + "AcceptanceType":{ + "shape":"AcceptanceType", + "documentation":"

Player response to the proposed match.

" + } + }, + "documentation":"

Represents the input for a request action.

" + }, + "AcceptMatchOutput":{ + "type":"structure", + "members":{ + } + }, + "AcceptanceType":{ + "type":"string", + "enum":[ + "ACCEPT", + "REJECT" + ] + }, "Alias":{ "type":"structure", "members":{ @@ -838,25 +1046,48 @@ "min":1, "pattern":"[a-zA-Z0-9:/-]+" }, + "AttributeValue":{ + "type":"structure", + "members":{ + "S":{ + "shape":"NonZeroAndMaxString", + "documentation":"

For single string values. Maximum string length is 100 characters.

" + }, + "N":{ + "shape":"DoubleObject", + "documentation":"

For number values, expressed as double.

" + }, + "SL":{ + "shape":"StringList", + "documentation":"

For a list of up to 10 strings. Maximum length for each string is 100 characters. Duplicate values are not recognized; all occurances of the the repeated value after the first of a repeated value are ignored.

" + }, + "SDM":{ + "shape":"StringDoubleMap", + "documentation":"

For a map of up to 10 type:value pairs. Maximum length for each string value is 100 characters.

" + } + }, + "documentation":"

Values for use in Player attribute type:value pairs. This object lets you specify an attribute value using any of the valid data types: string, number, string array or data map. Each AttributeValue object can use only one of the available properties.

" + }, "AwsCredentials":{ "type":"structure", "members":{ "AccessKeyId":{ "shape":"NonEmptyString", - "documentation":"

Access key for an AWS account.

" + "documentation":"

Temporary key allowing access to the Amazon GameLift S3 account.

" }, "SecretAccessKey":{ "shape":"NonEmptyString", - "documentation":"

Secret key for an AWS account.

" + "documentation":"

Temporary secret key allowing access to the Amazon GameLift S3 account.

" }, "SessionToken":{ "shape":"NonEmptyString", - "documentation":"

Token specific to a build ID.

" + "documentation":"

Token used to associate a specific build ID with the files uploaded using these credentials.

" } }, - "documentation":"

AWS access credentials sometimes used for uploading game build files to Amazon GameLift. They are valid for a limited time. If they expire before you upload your game build, get a new set by calling RequestUploadCredentials.

", + "documentation":"

Temporary access credentials used for uploading game build files to Amazon GameLift. They are valid for a limited time. If they expire before you upload your game build, get a new set by calling RequestUploadCredentials.

", "sensitive":true }, + "Boolean":{"type":"boolean"}, "Build":{ "type":"structure", "members":{ @@ -1087,7 +1318,7 @@ }, "GameProperties":{ "shape":"GamePropertyList", - "documentation":"

Set of developer-defined properties for a game session. These properties are passed to the server process hosting the game session.

" + "documentation":"

Set of developer-defined properties for a game session, formatted as a set of type:value pairs. These properties are included in the GameSession object, which is passed to the game server with a request to start a new game session (see Start a Game Session).

" }, "CreatorId":{ "shape":"NonZeroAndMaxString", @@ -1099,7 +1330,11 @@ }, "IdempotencyToken":{ "shape":"IdStringModel", - "documentation":"

Custom string that uniquely identifies a request for a new game session. Maximum token length is 48 characters. If provided, this string is included in the new game session's ID. (A game session ID has the following format: arn:aws:gamelift:<region>::gamesession/<fleet ID>/<custom ID string or idempotency token>.)

" + "documentation":"

Custom string that uniquely identifies a request for a new game session. Maximum token length is 48 characters. If provided, this string is included in the new game session's ID. (A game session ID has the following format: arn:aws:gamelift:<region>::gamesession/<fleet ID>/<custom ID string or idempotency token>.) Idempotency tokens remain in use for 30 days after a game session has ended; game session objects are retained for this time period and then deleted.

" + }, + "GameSessionData":{ + "shape":"GameSessionData", + "documentation":"

Set of developer-defined game session properties, formatted as a single string value. This data is included in the GameSession object, which is passed to the game server with a request to start a new game session (see Start a Game Session).

" } }, "documentation":"

Represents the input for a request action.

" @@ -1120,11 +1355,11 @@ "members":{ "Name":{ "shape":"GameSessionQueueName", - "documentation":"

Descriptive label that is associated with queue. Queue names must be unique within each region.

" + "documentation":"

Descriptive label that is associated with game session queue. Queue names must be unique within each region.

" }, "TimeoutInSeconds":{ "shape":"WholeNumber", - "documentation":"

Maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a TIMED_OUT status.

" + "documentation":"

Maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a TIMED_OUT status.

" }, "PlayerLatencyPolicies":{ "shape":"PlayerLatencyPolicyList", @@ -1147,6 +1382,106 @@ }, "documentation":"

Represents the returned data in response to a request action.

" }, + "CreateMatchmakingConfigurationInput":{ + "type":"structure", + "required":[ + "Name", + "GameSessionQueueArns", + "RequestTimeoutSeconds", + "AcceptanceRequired", + "RuleSetName" + ], + "members":{ + "Name":{ + "shape":"MatchmakingIdStringModel", + "documentation":"

Unique identifier for a matchmaking configuration. This name is used to identify the configuration associated with a matchmaking request or ticket.

" + }, + "Description":{ + "shape":"NonZeroAndMaxString", + "documentation":"

Meaningful description of the matchmaking configuration.

" + }, + "GameSessionQueueArns":{ + "shape":"QueueArnsList", + "documentation":"

Amazon Resource Name (ARN) that is assigned to a game session queue and uniquely identifies it. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912. These queues are used when placing game sessions for matches that are created with this matchmaking configuration. Queues can be located in any region.

" + }, + "RequestTimeoutSeconds":{ + "shape":"MatchmakingRequestTimeoutInteger", + "documentation":"

Maximum duration, in seconds, that a matchmaking ticket can remain in process before timing out. Requests that time out can be resubmitted as needed.

" + }, + "AcceptanceTimeoutSeconds":{ + "shape":"MatchmakingAcceptanceTimeoutInteger", + "documentation":"

Length of time (in seconds) to wait for players to accept a proposed match. If any player rejects the match or fails to accept before the timeout, the ticket continues to look for an acceptable match.

" + }, + "AcceptanceRequired":{ + "shape":"Boolean", + "documentation":"

Flag that determines whether or not a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE.

" + }, + "RuleSetName":{ + "shape":"MatchmakingIdStringModel", + "documentation":"

Unique identifier for a matchmaking rule set to use with this configuration. A matchmaking configuration can only use rule sets that are defined in the same region.

" + }, + "NotificationTarget":{ + "shape":"SnsArnStringModel", + "documentation":"

SNS topic ARN that is set up to receive matchmaking notifications.

" + }, + "AdditionalPlayerCount":{ + "shape":"WholeNumber", + "documentation":"

Number of player slots in a match to keep open for future players. For example, if the configuration's rule set specifies a match for a single 12-person team, and the additional player count is set to 2, only 10 players are selected for the match.

" + }, + "CustomEventData":{ + "shape":"CustomEventData", + "documentation":"

Information to attached to all events related to the matchmaking configuration.

" + }, + "GameProperties":{ + "shape":"GamePropertyList", + "documentation":"

Set of developer-defined properties for a game session, formatted as a set of type:value pairs. These properties are included in the GameSession object, which is passed to the game server with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" + }, + "GameSessionData":{ + "shape":"GameSessionData", + "documentation":"

Set of developer-defined game session properties, formatted as a single string value. This data is included in the GameSession object, which is passed to the game server with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" + } + }, + "documentation":"

Represents the input for a request action.

" + }, + "CreateMatchmakingConfigurationOutput":{ + "type":"structure", + "members":{ + "Configuration":{ + "shape":"MatchmakingConfiguration", + "documentation":"

Object that describes the newly created matchmaking configuration.

" + } + }, + "documentation":"

Represents the returned data in response to a request action.

" + }, + "CreateMatchmakingRuleSetInput":{ + "type":"structure", + "required":[ + "Name", + "RuleSetBody" + ], + "members":{ + "Name":{ + "shape":"MatchmakingIdStringModel", + "documentation":"

Unique identifier for a matchmaking rule set. This name is used to identify the rule set associated with a matchmaking configuration.

" + }, + "RuleSetBody":{ + "shape":"RuleSetBody", + "documentation":"

Collection of matchmaking rules, formatted as a JSON string. (Note that comments are not allowed in JSON, but most elements support a description field.)

" + } + }, + "documentation":"

Represents the input for a request action.

" + }, + "CreateMatchmakingRuleSetOutput":{ + "type":"structure", + "required":["RuleSet"], + "members":{ + "RuleSet":{ + "shape":"MatchmakingRuleSet", + "documentation":"

Object that describes the newly created matchmaking rule set.

" + } + }, + "documentation":"

Represents the returned data in response to a request action.

" + }, "CreatePlayerSessionInput":{ "type":"structure", "required":[ @@ -1211,6 +1546,11 @@ }, "documentation":"

Represents the returned data in response to a request action.

" }, + "CustomEventData":{ + "type":"string", + "max":256, + "min":1 + }, "DeleteAliasInput":{ "type":"structure", "required":["AliasId"], @@ -1250,7 +1590,7 @@ "members":{ "Name":{ "shape":"GameSessionQueueName", - "documentation":"

Descriptive label that is associated with queue. Queue names must be unique within each region.

" + "documentation":"

Descriptive label that is associated with game session queue. Queue names must be unique within each region.

" } }, "documentation":"

Represents the input for a request action.

" @@ -1260,6 +1600,22 @@ "members":{ } }, + "DeleteMatchmakingConfigurationInput":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"MatchmakingIdStringModel", + "documentation":"

Unique identifier for a matchmaking configuration

" + } + }, + "documentation":"

Represents the input for a request action.

" + }, + "DeleteMatchmakingConfigurationOutput":{ + "type":"structure", + "members":{ + } + }, "DeleteScalingPolicyInput":{ "type":"structure", "required":[ @@ -1353,7 +1709,7 @@ }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To specify the start of the result set, do not specify a value. This parameter is ignored when the request specifies one or a list of fleet IDs.

" + "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To start at the beginning of the result set, do not specify a value. This parameter is ignored when the request specifies one or a list of fleet IDs.

" } }, "documentation":"

Represents the input for a request action.

" @@ -1385,7 +1741,7 @@ }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To specify the start of the result set, do not specify a value. This parameter is ignored when the request specifies one or a list of fleet IDs.

" + "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To start at the beginning of the result set, do not specify a value. This parameter is ignored when the request specifies one or a list of fleet IDs.

" } }, "documentation":"

Represents the input for a request action.

" @@ -1426,7 +1782,7 @@ }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To specify the start of the result set, do not specify a value.

" + "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To start at the beginning of the result set, do not specify a value.

" } }, "documentation":"

Represents the input for a request action.

" @@ -1479,7 +1835,7 @@ }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To specify the start of the result set, do not specify a value. This parameter is ignored when the request specifies one or a list of fleet IDs.

" + "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To start at the beginning of the result set, do not specify a value. This parameter is ignored when the request specifies one or a list of fleet IDs.

" } }, "documentation":"

Represents the input for a request action.

" @@ -1523,7 +1879,7 @@ }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To specify the start of the result set, do not specify a value.

" + "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To start at the beginning of the result set, do not specify a value.

" } }, "documentation":"

Represents the input for a request action.

" @@ -1576,7 +1932,7 @@ }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To specify the start of the result set, do not specify a value.

" + "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To start at the beginning of the result set, do not specify a value.

" } }, "documentation":"

Represents the input for a request action.

" @@ -1620,7 +1976,7 @@ }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To specify the start of the result set, do not specify a value.

" + "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To start at the beginning of the result set, do not specify a value.

" } }, "documentation":"

Represents the input for a request action.

" @@ -1657,7 +2013,7 @@ }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To specify the start of the result set, do not specify a value.

" + "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To start at the beginning of the result set, do not specify a value.

" } }, "documentation":"

Represents the input for a request action.

" @@ -1676,6 +2032,96 @@ }, "documentation":"

Represents the returned data in response to a request action.

" }, + "DescribeMatchmakingConfigurationsInput":{ + "type":"structure", + "members":{ + "Names":{ + "shape":"MatchmakingIdList", + "documentation":"

Unique identifier for a matchmaking configuration(s) to retrieve. To request all existing configurations, leave this parameter empty.

" + }, + "RuleSetName":{ + "shape":"MatchmakingIdStringModel", + "documentation":"

Unique identifier for a matchmaking rule set. Use this parameter to retrieve all matchmaking configurations that use this rule set.

" + }, + "Limit":{ + "shape":"PositiveInteger", + "documentation":"

Maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages. This parameter is limited to 10.

" + }, + "NextToken":{ + "shape":"NonZeroAndMaxString", + "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To start at the beginning of the result set, do not specify a value.

" + } + }, + "documentation":"

Represents the input for a request action.

" + }, + "DescribeMatchmakingConfigurationsOutput":{ + "type":"structure", + "members":{ + "Configurations":{ + "shape":"MatchmakingConfigurationList", + "documentation":"

Collection of requested matchmaking configuration objects.

" + }, + "NextToken":{ + "shape":"NonZeroAndMaxString", + "documentation":"

Token that indicates where to resume retrieving results on the next call to this action. If no token is returned, these results represent the end of the list.

" + } + }, + "documentation":"

Represents the returned data in response to a request action.

" + }, + "DescribeMatchmakingInput":{ + "type":"structure", + "required":["TicketIds"], + "members":{ + "TicketIds":{ + "shape":"MatchmakingIdList", + "documentation":"

Unique identifier for a matchmaking ticket. To request all existing tickets, leave this parameter empty.

" + } + }, + "documentation":"

Represents the input for a request action.

" + }, + "DescribeMatchmakingOutput":{ + "type":"structure", + "members":{ + "TicketList":{ + "shape":"MatchmakingTicketList", + "documentation":"

Collection of existing matchmaking ticket objects matching the request.

" + } + }, + "documentation":"

Represents the returned data in response to a request action.

" + }, + "DescribeMatchmakingRuleSetsInput":{ + "type":"structure", + "members":{ + "Names":{ + "shape":"MatchmakingRuleSetNameList", + "documentation":"

Unique identifier for a matchmaking rule set. This name is used to identify the rule set associated with a matchmaking configuration.

" + }, + "Limit":{ + "shape":"RuleSetLimit", + "documentation":"

Maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

" + }, + "NextToken":{ + "shape":"NonZeroAndMaxString", + "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To start at the beginning of the result set, do not specify a value.

" + } + }, + "documentation":"

Represents the input for a request action.

" + }, + "DescribeMatchmakingRuleSetsOutput":{ + "type":"structure", + "required":["RuleSets"], + "members":{ + "RuleSets":{ + "shape":"MatchmakingRuleSetList", + "documentation":"

Collection of requested matchmaking rule set objects.

" + }, + "NextToken":{ + "shape":"NonZeroAndMaxString", + "documentation":"

Token that indicates where to resume retrieving results on the next call to this action. If no token is returned, these results represent the end of the list.

" + } + }, + "documentation":"

Represents the returned data in response to a request action.

" + }, "DescribePlayerSessionsInput":{ "type":"structure", "members":{ @@ -1701,7 +2147,7 @@ }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To specify the start of the result set, do not specify a value. If a player session ID is specified, this parameter is ignored.

" + "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To start at the beginning of the result set, do not specify a value. If a player session ID is specified, this parameter is ignored.

" } }, "documentation":"

Represents the input for a request action.

" @@ -1759,7 +2205,7 @@ }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To specify the start of the result set, do not specify a value.

" + "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To start at the beginning of the result set, do not specify a value.

" } }, "documentation":"

Represents the input for a request action.

" @@ -1797,6 +2243,7 @@ "member":{"shape":"DesiredPlayerSession"} }, "Double":{"type":"double"}, + "DoubleObject":{"type":"double"}, "EC2InstanceCounts":{ "type":"structure", "members":{ @@ -1899,7 +2346,7 @@ }, "EventCode":{ "shape":"EventCode", - "documentation":"

Type of event being logged. The following events are currently in use:

" + "documentation":"

Type of event being logged. The following events are currently in use:

" }, "Message":{ "shape":"NonEmptyString", @@ -1911,7 +2358,7 @@ }, "PreSignedLogUrl":{ "shape":"NonZeroAndMaxString", - "documentation":"

Location of stored logs with additional detail related to the event, useful for debugging issues. The URL is valid for 15 minutes. Fleet creation logs can also be accessed through the Amazon GameLift console.

" + "documentation":"

Location of stored logs with additional detail that is related to the event. This is useful for debugging issues. The URL is valid for 15 minutes. You can also access fleet creation logs through the Amazon GameLift console.

" } }, "documentation":"

Log entry describing an event that involves Amazon GameLift resources (such as a fleet). In addition to tracking activity, event codes and messages can provide additional information for troubleshooting and debugging problems.

" @@ -2118,14 +2565,14 @@ "members":{ "Key":{ "shape":"GamePropertyKey", - "documentation":"

TBD

" + "documentation":"

Game property identifier.

" }, "Value":{ "shape":"GamePropertyValue", - "documentation":"

TBD

" + "documentation":"

Game property value.

" } }, - "documentation":"

Set of key-value pairs containing information a server process requires to set up a game session. This object allows you to pass in any set of data needed for your game. For more information, see the Amazon GameLift Developer Guide.

" + "documentation":"

Set of key-value pairs that contain information about a game session. When included in a game session request, these properties communicate details to be used when setting up the new game session, such as to specify a game mode, level, or map. Game properties are passed to the game server process when initiating a new game session; the server process uses the properties as appropriate. For more information, see the Amazon GameLift Developer Guide.

" }, "GamePropertyKey":{ "type":"string", @@ -2177,7 +2624,7 @@ }, "GameProperties":{ "shape":"GamePropertyList", - "documentation":"

Set of developer-defined properties for a game session. These properties are passed to the server process hosting the game session.

" + "documentation":"

Set of developer-defined properties for a game session, formatted as a set of type:value pairs. These properties are included in the GameSession object, which is passed to the game server with a request to start a new game session (see Start a Game Session).

" }, "IpAddress":{ "shape":"IpAddress", @@ -2194,15 +2641,46 @@ "CreatorId":{ "shape":"NonZeroAndMaxString", "documentation":"

Unique identifier for a player. This ID is used to enforce a resource protection policy (if one exists), that limits the number of game sessions a player can create.

" + }, + "GameSessionData":{ + "shape":"GameSessionData", + "documentation":"

Set of developer-defined game session properties, formatted as a single string value. This data is included in the GameSession object, which is passed to the game server with a request to start a new game session (see Start a Game Session).

" } }, - "documentation":"

Properties describing a game session.

Game-session-related operations include:

" + "documentation":"

Properties describing a game session.

A game session in ACTIVE status can host players. When a game session ends, its status is set to TERMINATED.

Once the session ends, the game session object is retained for 30 days. This means you can reuse idempotency token values after this time. Game session logs are retained for 14 days.

Game-session-related operations include:

" }, "GameSessionActivationTimeoutSeconds":{ "type":"integer", "max":600, "min":1 }, + "GameSessionConnectionInfo":{ + "type":"structure", + "members":{ + "GameSessionArn":{ + "shape":"ArnStringModel", + "documentation":"

Amazon Resource Name (ARN) that is assigned to a game session and uniquely identifies it.

" + }, + "IpAddress":{ + "shape":"StringModel", + "documentation":"

IP address of the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number.

" + }, + "Port":{ + "shape":"PositiveInteger", + "documentation":"

Port number for the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number.

" + }, + "MatchedPlayerSessions":{ + "shape":"MatchedPlayerSessionList", + "documentation":"

Collection of player session IDs, one for each player ID that was included in the original matchmaking request.

" + } + }, + "documentation":"

Connection information for the new game session that is created with matchmaking. (with StartMatchmaking). Once a match is set, the FlexMatch engine places the match and creates a new game session for it. This information, including the game session endpoint and player sessions for each player in the original matchmaking request, is added to the MatchmakingTicket, which can be retrieved by calling DescribeMatchmaking.

" + }, + "GameSessionData":{ + "type":"string", + "max":4096, + "min":1 + }, "GameSessionDetail":{ "type":"structure", "members":{ @@ -2242,7 +2720,7 @@ }, "GameSessionQueueName":{ "shape":"GameSessionQueueName", - "documentation":"

Descriptive label that is associated with queue. Queue names must be unique within each region.

" + "documentation":"

Descriptive label that is associated with game session queue. Queue names must be unique within each region.

" }, "Status":{ "shape":"GameSessionPlacementState", @@ -2250,7 +2728,7 @@ }, "GameProperties":{ "shape":"GamePropertyList", - "documentation":"

Set of developer-defined properties for a game session. These properties are passed to the server process hosting the game session.

" + "documentation":"

Set of developer-defined properties for a game session, formatted as a set of type:value pairs. These properties are included in the GameSession object, which is passed to the game server with a request to start a new game session (see Start a Game Session).

" }, "MaximumPlayerSessionCount":{ "shape":"WholeNumber", @@ -2262,19 +2740,19 @@ }, "GameSessionId":{ "shape":"NonZeroAndMaxString", - "documentation":"

Unique identifier for the game session. This value is set once the new game session is placed (placement status is Fulfilled).

" + "documentation":"

Unique identifier for the game session. This value is set once the new game session is placed (placement status is FULFILLED).

" }, "GameSessionArn":{ "shape":"NonZeroAndMaxString", - "documentation":"

Identifier for the game session created by this placement request. This value is set once the new game session is placed (placement status is Fulfilled). This identifier is unique across all regions. You can use this value as a GameSessionId value as needed.

" + "documentation":"

Identifier for the game session created by this placement request. This value is set once the new game session is placed (placement status is FULFILLED). This identifier is unique across all regions. You can use this value as a GameSessionId value as needed.

" }, "GameSessionRegion":{ "shape":"NonZeroAndMaxString", - "documentation":"

Name of the region where the game session created by this placement request is running. This value is set once the new game session is placed (placement status is Fulfilled).

" + "documentation":"

Name of the region where the game session created by this placement request is running. This value is set once the new game session is placed (placement status is FULFILLED).

" }, "PlayerLatencies":{ "shape":"PlayerLatencyList", - "documentation":"

Set of values, expressed in milliseconds, indicating the amount of latency that players are experiencing when connected to AWS regions.

" + "documentation":"

Set of values, expressed in milliseconds, indicating the amount of latency that a player experiences when connected to AWS regions.

" }, "StartTime":{ "shape":"Timestamp", @@ -2286,15 +2764,19 @@ }, "IpAddress":{ "shape":"IpAddress", - "documentation":"

IP address of the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number. This value is set once the new game session is placed (placement status is Fulfilled).

" + "documentation":"

IP address of the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number. This value is set once the new game session is placed (placement status is FULFILLED).

" }, "Port":{ "shape":"PortNumber", - "documentation":"

Port number for the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number. This value is set once the new game session is placed (placement status is Fulfilled).

" + "documentation":"

Port number for the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number. This value is set once the new game session is placed (placement status is FULFILLED).

" }, "PlacedPlayerSessions":{ "shape":"PlacedPlayerSessionList", - "documentation":"

Collection of information on player sessions created in response to the game session placement request. These player sessions are created only once a new game session is successfully placed (placement status is Fulfilled). This information includes the player ID (as provided in the placement request) and the corresponding player session ID. Retrieve full player sessions by calling DescribePlayerSessions with the player session ID.

" + "documentation":"

Collection of information on player sessions created in response to the game session placement request. These player sessions are created only once a new game session is successfully placed (placement status is FULFILLED). This information includes the player ID (as provided in the placement request) and the corresponding player session ID. Retrieve full player sessions by calling DescribePlayerSessions with the player session ID.

" + }, + "GameSessionData":{ + "shape":"GameSessionData", + "documentation":"

Set of developer-defined game session properties, formatted as a single string value. This data is included in the GameSession object, which is passed to the game server with a request to start a new game session (see Start a Game Session).

" } }, "documentation":"

Object that describes a StartGameSessionPlacement request. This object includes the full details of the original request plus the current status and start/end time stamps.

Game session placement-related operations include:

" @@ -2313,7 +2795,7 @@ "members":{ "Name":{ "shape":"GameSessionQueueName", - "documentation":"

Descriptive label that is associated with queue. Queue names must be unique within each region.

" + "documentation":"

Descriptive label that is associated with game session queue. Queue names must be unique within each region.

" }, "GameSessionQueueArn":{ "shape":"ArnStringModel", @@ -2321,7 +2803,7 @@ }, "TimeoutInSeconds":{ "shape":"WholeNumber", - "documentation":"

Maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a TIMED_OUT status.

" + "documentation":"

Maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a TIMED_OUT status.

" }, "PlayerLatencyPolicies":{ "shape":"PlayerLatencyPolicyList", @@ -2332,7 +2814,7 @@ "documentation":"

List of fleets that can be used to fulfill game session placement requests in the queue. Fleets are identified by either a fleet ARN or a fleet alias ARN. Destinations are listed in default preference order.

" } }, - "documentation":"

Configuration of a queue that is used to process game session placement requests. The queue configuration identifies several game features:

Queue-related operations include:

Queue-related operations include the following:

" + "documentation":"

Configuration of a queue that is used to process game session placement requests. The queue configuration identifies several game features:

Queue-related operations include:

" }, "GameSessionQueueDestination":{ "type":"structure", @@ -2467,7 +2949,7 @@ "documentation":"

Time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" } }, - "documentation":"

Properties that describe an instance of a virtual computing resource that hosts one or more game servers. A fleet contains zero or more instances.

" + "documentation":"

Properties that describe an instance of a virtual computing resource that hosts one or more game servers. A fleet may contain zero or more instances.

" }, "InstanceAccess":{ "type":"structure", @@ -2601,6 +3083,11 @@ "UDP" ] }, + "LatencyMap":{ + "type":"map", + "key":{"shape":"NonEmptyString"}, + "value":{"shape":"PositiveInteger"} + }, "LimitExceededException":{ "type":"structure", "members":{ @@ -2626,7 +3113,7 @@ }, "NextToken":{ "shape":"NonEmptyString", - "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To specify the start of the result set, do not specify a value.

" + "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To start at the beginning of the result set, do not specify a value.

" } }, "documentation":"

Represents the input for a request action.

" @@ -2658,7 +3145,7 @@ }, "NextToken":{ "shape":"NonEmptyString", - "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To specify the start of the result set, do not specify a value.

" + "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To start at the beginning of the result set, do not specify a value.

" } }, "documentation":"

Represents the input for a request action.

" @@ -2690,7 +3177,7 @@ }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To specify the start of the result set, do not specify a value.

" + "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To start at the beginning of the result set, do not specify a value.

" } }, "documentation":"

Represents the input for a request action.

" @@ -2709,6 +3196,194 @@ }, "documentation":"

Represents the returned data in response to a request action.

" }, + "MatchedPlayerSession":{ + "type":"structure", + "members":{ + "PlayerId":{ + "shape":"NonZeroAndMaxString", + "documentation":"

Unique identifier for a player

" + }, + "PlayerSessionId":{ + "shape":"PlayerSessionId", + "documentation":"

Unique identifier for a player session

" + } + }, + "documentation":"

New player session created as a result of a successful FlexMatch match. A successful match automatically creates new player sessions for every player ID in the original matchmaking request.

When players connect to the match's game session, they must include both player ID and player session ID in order to claim their assigned player slot.

" + }, + "MatchedPlayerSessionList":{ + "type":"list", + "member":{"shape":"MatchedPlayerSession"} + }, + "MatchmakingAcceptanceTimeoutInteger":{ + "type":"integer", + "max":600, + "min":1 + }, + "MatchmakingConfiguration":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"MatchmakingIdStringModel", + "documentation":"

Unique identifier for a matchmaking configuration. This name is used to identify the configuration associated with a matchmaking request or ticket.

" + }, + "Description":{ + "shape":"NonZeroAndMaxString", + "documentation":"

Descriptive label that is associated with matchmaking configuration.

" + }, + "GameSessionQueueArns":{ + "shape":"QueueArnsList", + "documentation":"

Amazon Resource Name (ARN) that is assigned to a game session queue and uniquely identifies it. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912. These queues are used when placing game sessions for matches that are created with this matchmaking configuration. Queues can be located in any region.

" + }, + "RequestTimeoutSeconds":{ + "shape":"MatchmakingRequestTimeoutInteger", + "documentation":"

Maximum duration, in seconds, that a matchmaking ticket can remain in process before timing out. Requests that time out can be resubmitted as needed.

" + }, + "AcceptanceTimeoutSeconds":{ + "shape":"MatchmakingAcceptanceTimeoutInteger", + "documentation":"

Length of time (in seconds) to wait for players to accept a proposed match. If any player rejects the match or fails to accept before the timeout, the ticket continues to look for an acceptable match.

" + }, + "AcceptanceRequired":{ + "shape":"Boolean", + "documentation":"

Flag that determines whether or not a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE.

" + }, + "RuleSetName":{ + "shape":"MatchmakingIdStringModel", + "documentation":"

Unique identifier for a matchmaking rule set to use with this configuration. A matchmaking configuration can only use rule sets that are defined in the same region.

" + }, + "NotificationTarget":{ + "shape":"SnsArnStringModel", + "documentation":"

SNS topic ARN that is set up to receive matchmaking notifications.

" + }, + "AdditionalPlayerCount":{ + "shape":"WholeNumber", + "documentation":"

Number of player slots in a match to keep open for future players. For example, if the configuration's rule set specifies a match for a single 12-person team, and the additional player count is set to 2, only 10 players are selected for the match.

" + }, + "CustomEventData":{ + "shape":"CustomEventData", + "documentation":"

Information to attached to all events related to the matchmaking configuration.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

Time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" + }, + "GameProperties":{ + "shape":"GamePropertyList", + "documentation":"

Set of developer-defined properties for a game session, formatted as a set of type:value pairs. These properties are included in the GameSession object, which is passed to the game server with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" + }, + "GameSessionData":{ + "shape":"GameSessionData", + "documentation":"

Set of developer-defined game session properties, formatted as a single string value. This data is included in the GameSession object, which is passed to the game server with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" + } + }, + "documentation":"

Guidelines for use with FlexMatch to match players into games. All matchmaking requests must specify a matchmaking configuration.

" + }, + "MatchmakingConfigurationList":{ + "type":"list", + "member":{"shape":"MatchmakingConfiguration"} + }, + "MatchmakingConfigurationStatus":{ + "type":"string", + "enum":[ + "CANCELED", + "COMPLETE", + "FAILED", + "PLACING", + "QUEUED", + "REQUIRES_ACCEPTANCE", + "SEARCHING", + "TIMED_OUT" + ] + }, + "MatchmakingIdList":{ + "type":"list", + "member":{"shape":"MatchmakingIdStringModel"} + }, + "MatchmakingIdStringModel":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-zA-Z0-9-\\.]+" + }, + "MatchmakingPlayerIdList":{ + "type":"list", + "member":{"shape":"PlayerIdStringModel"} + }, + "MatchmakingRequestTimeoutInteger":{ + "type":"integer", + "max":43200, + "min":1 + }, + "MatchmakingRuleSet":{ + "type":"structure", + "required":["RuleSetBody"], + "members":{ + "RuleSetName":{ + "shape":"MatchmakingIdStringModel", + "documentation":"

Unique identifier for a matchmaking rule set

" + }, + "RuleSetBody":{ + "shape":"RuleSetBody", + "documentation":"

Collection of matchmaking rules, formatted as a JSON string. (Note that comments14 are not allowed in JSON, but most elements support a description field.)

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

Time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" + } + }, + "documentation":"

Set of rule statements, used with FlexMatch, that determine how to build a certain kind of player match. Each rule set describes a type of group to be created and defines the parameters for acceptable player matches. Rule sets are used in MatchmakingConfiguration objects.

A rule set may define the following elements for a match. For detailed information and examples showing how to construct a rule set, see Create Matchmaking Rules for Your Game.

" + }, + "MatchmakingRuleSetList":{ + "type":"list", + "member":{"shape":"MatchmakingRuleSet"} + }, + "MatchmakingRuleSetNameList":{ + "type":"list", + "member":{"shape":"MatchmakingIdStringModel"}, + "max":10, + "min":1 + }, + "MatchmakingTicket":{ + "type":"structure", + "members":{ + "TicketId":{ + "shape":"MatchmakingIdStringModel", + "documentation":"

Unique identifier for a matchmaking ticket.

" + }, + "ConfigurationName":{ + "shape":"MatchmakingIdStringModel", + "documentation":"

Name of the MatchmakingConfiguration that is used with this ticket. Matchmaking configurations determine how players are grouped into a match and how a new game session is created for the match.

" + }, + "Status":{ + "shape":"MatchmakingConfigurationStatus", + "documentation":"

Current status of the matchmaking request.

" + }, + "StatusReason":{ + "shape":"StringModel", + "documentation":"

Code to explain the current status. For example, a status reason may indicate when a ticket has returned to SEARCHING status after a proposed match fails to receive player acceptances.

" + }, + "StatusMessage":{ + "shape":"StringModel", + "documentation":"

Additional information about the current status.

" + }, + "StartTime":{ + "shape":"Timestamp", + "documentation":"

Time stamp indicating when this matchmaking request was received. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" + }, + "Players":{ + "shape":"PlayerList", + "documentation":"

A set of Player objects, each representing a player to find matches for. Players are identified by a unique player ID and may include latency data for use during matchmaking. If the ticket is in status COMPLETED, the Player objects include the team the players were assigned to in the resulting match.

" + }, + "GameSessionConnectionInfo":{ + "shape":"GameSessionConnectionInfo", + "documentation":"

Identifier and connection information of the game session created for the match. This information is added to the ticket only after the matchmaking request has been successfully completed.

" + } + }, + "documentation":"

Ticket generated to track the progress of a matchmaking request. Each ticket is uniquely identified by a ticket ID, supplied by the requester, when creating a matchmaking request with StartMatchmaking. Tickets can be retrieved by calling DescribeMatchmaking with the ticket ID.

" + }, + "MatchmakingTicketList":{ + "type":"list", + "member":{"shape":"MatchmakingTicket"} + }, "MaxConcurrentGameSessionActivations":{ "type":"integer", "max":2147483647, @@ -2792,6 +3467,33 @@ "type":"list", "member":{"shape":"PlacedPlayerSession"} }, + "Player":{ + "type":"structure", + "members":{ + "PlayerId":{ + "shape":"PlayerIdStringModel", + "documentation":"

Unique identifier for a player

" + }, + "PlayerAttributes":{ + "shape":"PlayerAttributeMap", + "documentation":"

Collection of name:value pairs containing player information for use in matchmaking. Player attribute names need to match playerAttributes names in the rule set being used. Example: \"PlayerAttributes\": {\"skill\": {\"N\": \"23\"}, \"gameMode\": {\"S\": \"deathmatch\"}}.

" + }, + "Team":{ + "shape":"NonZeroAndMaxString", + "documentation":"

Name of the team that the player is assigned to in a match. Team names are defined in a matchmaking rule set.

" + }, + "LatencyInMs":{ + "shape":"LatencyMap", + "documentation":"

Set of values, expressed in milliseconds, indicating the amount of latency that a player experiences when connected to AWS regions. If this property is present, FlexMatch considers placing the match only in regions that are included in the object map. If not present (that is, null), FlexMatch ignores latency issues and may place the match in any region in the queue.

If this property contains an empty map, FlexMatch assumes that no regions are available to the player. In this scenario, the ticket is not matchable and always times out unless canceled.

" + } + }, + "documentation":"

Object used in matchmaking to represent a player. When starting a matchmaking request, a player has a player ID and may have latency data. Team information is added after a match has been successfully completed.

" + }, + "PlayerAttributeMap":{ + "type":"map", + "key":{"shape":"NonZeroAndMaxString"}, + "value":{"shape":"AttributeValue"} + }, "PlayerData":{ "type":"string", "max":2048, @@ -2808,6 +3510,12 @@ "max":25, "min":1 }, + "PlayerIdStringModel":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-zA-Z0-9-\\.]+" + }, "PlayerLatency":{ "type":"structure", "members":{ @@ -2848,6 +3556,10 @@ "type":"list", "member":{"shape":"PlayerLatencyPolicy"} }, + "PlayerList":{ + "type":"list", + "member":{"shape":"Player"} + }, "PlayerSession":{ "type":"structure", "members":{ @@ -2892,7 +3604,7 @@ "documentation":"

Developer-defined information related to a player. Amazon GameLift does not use this data, so it can be formatted as needed for use in the game.

" } }, - "documentation":"

Properties describing a player session. A player session represents either a player reservation for a game session or actual player activity in a game session. A player session object (including player data) is automatically passed to a game session when the player connects to the game session and is validated.

Player-session-related operations include:

" + "documentation":"

Properties describing a player session. Player session objects are created either by creating a player session for a specific game session, or as part of a game session placement. A player session represents either a player reservation for a game session (status RESERVED) or actual player activity in a game session (status ACTIVE). A player session object (including player data) is automatically passed to a game session when the player connects to the game session and is validated.

When a player disconnects, the player session status changes to COMPLETED. Once the session ends, the player session object is retained for 30 days and then removed.

Player-session-related operations include:

" }, "PlayerSessionCreationPolicy":{ "type":"string", @@ -2996,6 +3708,10 @@ }, "documentation":"

Represents the returned data in response to a request action.

" }, + "QueueArnsList":{ + "type":"list", + "member":{"shape":"ArnStringModel"} + }, "RequestUploadCredentialsInput":{ "type":"structure", "required":["BuildId"], @@ -3081,6 +3797,16 @@ "TERMINAL" ] }, + "RuleSetBody":{ + "type":"string", + "max":65535, + "min":1 + }, + "RuleSetLimit":{ + "type":"integer", + "max":10, + "min":1 + }, "RuntimeConfiguration":{ "type":"structure", "members":{ @@ -3097,7 +3823,7 @@ "documentation":"

Maximum amount of time (in seconds) that a game session can remain in status ACTIVATING. If the game session is not active before the timeout, activation is terminated and the game session status is changed to TERMINATED.

" } }, - "documentation":"

A collection of server process configurations that describe what processes to run on each instance in a fleet. All fleets must have a runtime configuration. Each instance in the fleet launches the server processes specified in the run-time configuration and launches new ones as existing processes end. Each instance regularly checks for an updated run-time configuration and follows the new instructions.

The run-time configuration enables the instances in a fleet to run multiple processes simultaneously. Potential scenarios are as follows: (1) Run multiple processes of a single game server executable to maximize usage of your hosting resources. (2) Run one or more processes of different build executables, such as your game server executable and a related program, or two or more different versions of a game server. (3) Run multiple processes of a single game server but with different launch parameters, for example to run one process on each instance in debug mode.

A Amazon GameLift instance is limited to 50 processes running simultaneously. A run-time configuration must specify fewer than this limit. To calculate the total number of processes specified in a run-time configuration, add the values of the ConcurrentExecutions parameter for each ServerProcess object in the run-time configuration.

Fleet-related operations include:

" + "documentation":"

A collection of server process configurations that describe what processes to run on each instance in a fleet. All fleets must have a run-time configuration. Each instance in the fleet launches the server processes specified in the run-time configuration and launches new ones as existing processes end. Each instance regularly checks for an updated run-time configuration and follows the new instructions.

The run-time configuration enables the instances in a fleet to run multiple processes simultaneously. Potential scenarios are as follows: (1) Run multiple processes of a single game server executable to maximize usage of your hosting resources. (2) Run one or more processes of different build executables, such as your game server executable and a related program, or two or more different versions of a game server. (3) Run multiple processes of a single game server but with different launch parameters, for example to run one process on each instance in debug mode.

A Amazon GameLift instance is limited to 50 processes running simultaneously. A run-time configuration must specify fewer than this limit. To calculate the total number of processes specified in a run-time configuration, add the values of the ConcurrentExecutions parameter for each ServerProcess object in the run-time configuration.

Fleet-related operations include:

" }, "S3Location":{ "type":"structure", @@ -3208,7 +3934,7 @@ }, "NextToken":{ "shape":"NonZeroAndMaxString", - "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To specify the start of the result set, do not specify a value.

" + "documentation":"

Token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To start at the beginning of the result set, do not specify a value.

" } }, "documentation":"

Represents the input for a request action.

" @@ -3255,6 +3981,12 @@ "max":50, "min":1 }, + "SnsArnStringModel":{ + "type":"string", + "max":300, + "min":1, + "pattern":"[a-zA-Z0-9:_/-]+" + }, "StartGameSessionPlacementInput":{ "type":"structure", "required":[ @@ -3273,7 +4005,7 @@ }, "GameProperties":{ "shape":"GamePropertyList", - "documentation":"

Set of developer-defined properties for a game session. These properties are passed to the server process hosting the game session.

" + "documentation":"

Set of developer-defined properties for a game session, formatted as a set of type:value pairs. These properties are included in the GameSession object, which is passed to the game server with a request to start a new game session (see Start a Game Session).

" }, "MaximumPlayerSessionCount":{ "shape":"WholeNumber", @@ -3285,11 +4017,15 @@ }, "PlayerLatencies":{ "shape":"PlayerLatencyList", - "documentation":"

Set of values, expressed in milliseconds, indicating the amount of latency that players are experiencing when connected to AWS regions. This information is used to try to place the new game session where it can offer the best possible gameplay experience for the players.

" + "documentation":"

Set of values, expressed in milliseconds, indicating the amount of latency that a player experiences when connected to AWS regions. This information is used to try to place the new game session where it can offer the best possible gameplay experience for the players.

" }, "DesiredPlayerSessions":{ "shape":"DesiredPlayerSessionList", "documentation":"

Set of information on each player to create a player session for.

" + }, + "GameSessionData":{ + "shape":"GameSessionData", + "documentation":"

Set of developer-defined game session properties, formatted as a single string value. This data is included in the GameSession object, which is passed to the game server with a request to start a new game session (see Start a Game Session).

" } }, "documentation":"

Represents the input for a request action.

" @@ -3304,6 +4040,38 @@ }, "documentation":"

Represents the returned data in response to a request action.

" }, + "StartMatchmakingInput":{ + "type":"structure", + "required":[ + "ConfigurationName", + "Players" + ], + "members":{ + "TicketId":{ + "shape":"MatchmakingIdStringModel", + "documentation":"

Unique identifier for a matchmaking ticket. Use this identifier to track the matchmaking ticket status and retrieve match results.

" + }, + "ConfigurationName":{ + "shape":"MatchmakingIdStringModel", + "documentation":"

Name of the matchmaking configuration to use for this request. Matchmaking configurations must exist in the same region as this request.

" + }, + "Players":{ + "shape":"PlayerList", + "documentation":"

Information on each player to be matched. This information must include a player ID, and may contain player attributes and latency data to be used in the matchmaking process. After a successful match, Player objects contain the name of the team the player is assigned to.

" + } + }, + "documentation":"

Represents the input for a request action.

" + }, + "StartMatchmakingOutput":{ + "type":"structure", + "members":{ + "MatchmakingTicket":{ + "shape":"MatchmakingTicket", + "documentation":"

Ticket representing the matchmaking request. This object include the information included in the request, ticket status, and match results as generated during the matchmaking process.

" + } + }, + "documentation":"

Represents the returned data in response to a request action.

" + }, "StopGameSessionPlacementInput":{ "type":"structure", "required":["PlacementId"], @@ -3320,15 +4088,37 @@ "members":{ "GameSessionPlacement":{ "shape":"GameSessionPlacement", - "documentation":"

Object that describes the canceled game session placement, with Cancelled status and an end time stamp.

" + "documentation":"

Object that describes the canceled game session placement, with CANCELLED status and an end time stamp.

" } }, "documentation":"

Represents the returned data in response to a request action.

" }, + "StopMatchmakingInput":{ + "type":"structure", + "required":["TicketId"], + "members":{ + "TicketId":{ + "shape":"MatchmakingIdStringModel", + "documentation":"

Unique identifier for a matchmaking ticket.

" + } + }, + "documentation":"

Represents the input for a request action.

" + }, + "StopMatchmakingOutput":{ + "type":"structure", + "members":{ + } + }, + "StringDoubleMap":{ + "type":"map", + "key":{"shape":"NonZeroAndMaxString"}, + "value":{"shape":"DoubleObject"} + }, "StringList":{ "type":"list", "member":{"shape":"NonZeroAndMaxString"} }, + "StringModel":{"type":"string"}, "TerminalRoutingStrategyException":{ "type":"structure", "members":{ @@ -3346,6 +4136,14 @@ "documentation":"

The client failed authentication. Clients should not retry such requests.

", "exception":true }, + "UnsupportedRegionException":{ + "type":"structure", + "members":{ + "Message":{"shape":"NonEmptyString"} + }, + "documentation":"

The requested operation is not supported in the region specified.

", + "exception":true + }, "UpdateAliasInput":{ "type":"structure", "required":["AliasId"], @@ -3554,11 +4352,11 @@ "members":{ "Name":{ "shape":"GameSessionQueueName", - "documentation":"

Descriptive label that is associated with queue. Queue names must be unique within each region.

" + "documentation":"

Descriptive label that is associated with game session queue. Queue names must be unique within each region.

" }, "TimeoutInSeconds":{ "shape":"WholeNumber", - "documentation":"

Maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a TIMED_OUT status.

" + "documentation":"

Maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a TIMED_OUT status.

" }, "PlayerLatencyPolicies":{ "shape":"PlayerLatencyPolicyList", @@ -3581,6 +4379,71 @@ }, "documentation":"

Represents the returned data in response to a request action.

" }, + "UpdateMatchmakingConfigurationInput":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"MatchmakingIdStringModel", + "documentation":"

Unique identifier for a matchmaking configuration to update.

" + }, + "Description":{ + "shape":"NonZeroAndMaxString", + "documentation":"

Descriptive label that is associated with matchmaking configuration.

" + }, + "GameSessionQueueArns":{ + "shape":"QueueArnsList", + "documentation":"

Amazon Resource Name (ARN) that is assigned to a game session queue and uniquely identifies it. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912. These queues are used when placing game sessions for matches that are created with this matchmaking configuration. Queues can be located in any region.

" + }, + "RequestTimeoutSeconds":{ + "shape":"MatchmakingRequestTimeoutInteger", + "documentation":"

Maximum duration, in seconds, that a matchmaking ticket can remain in process before timing out. Requests that time out can be resubmitted as needed.

" + }, + "AcceptanceTimeoutSeconds":{ + "shape":"MatchmakingAcceptanceTimeoutInteger", + "documentation":"

Length of time (in seconds) to wait for players to accept a proposed match. If any player rejects the match or fails to accept before the timeout, the ticket continues to look for an acceptable match.

" + }, + "AcceptanceRequired":{ + "shape":"Boolean", + "documentation":"

Flag that determines whether or not a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE.

" + }, + "RuleSetName":{ + "shape":"MatchmakingIdStringModel", + "documentation":"

Unique identifier for a matchmaking rule set to use with this configuration. A matchmaking configuration can only use rule sets that are defined in the same region.

" + }, + "NotificationTarget":{ + "shape":"SnsArnStringModel", + "documentation":"

SNS topic ARN that is set up to receive matchmaking notifications. See Setting up Notifications for Matchmaking for more information.

" + }, + "AdditionalPlayerCount":{ + "shape":"WholeNumber", + "documentation":"

Number of player slots in a match to keep open for future players. For example, if the configuration's rule set specifies a match for a single 12-person team, and the additional player count is set to 2, only 10 players are selected for the match.

" + }, + "CustomEventData":{ + "shape":"CustomEventData", + "documentation":"

Information to attached to all events related to the matchmaking configuration.

" + }, + "GameProperties":{ + "shape":"GamePropertyList", + "documentation":"

Set of developer-defined properties for a game session, formatted as a set of type:value pairs. These properties are included in the GameSession object, which is passed to the game server with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" + }, + "GameSessionData":{ + "shape":"GameSessionData", + "documentation":"

Set of developer-defined game session properties, formatted as a single string value. This data is included in the GameSession object, which is passed to the game server with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" + } + }, + "documentation":"

Represents the input for a request action.

" + }, + "UpdateMatchmakingConfigurationOutput":{ + "type":"structure", + "members":{ + "Configuration":{ + "shape":"MatchmakingConfiguration", + "documentation":"

Object that describes the updated matchmaking configuration.

" + } + }, + "documentation":"

Represents the returned data in response to a request action.

" + }, "UpdateRuntimeConfigurationInput":{ "type":"structure", "required":[ @@ -3609,10 +4472,31 @@ }, "documentation":"

Represents the returned data in response to a request action.

" }, + "ValidateMatchmakingRuleSetInput":{ + "type":"structure", + "required":["RuleSetBody"], + "members":{ + "RuleSetBody":{ + "shape":"RuleSetBody", + "documentation":"

Collection of matchmaking rules to validate, formatted as a JSON string.

" + } + }, + "documentation":"

Represents the input for a request action.

" + }, + "ValidateMatchmakingRuleSetOutput":{ + "type":"structure", + "members":{ + "Valid":{ + "shape":"Boolean", + "documentation":"

Response indicating whether or not the rule set is valid.

" + } + }, + "documentation":"

Represents the returned data in response to a request action.

" + }, "WholeNumber":{ "type":"integer", "min":0 } }, - "documentation":"Amazon GameLift Service

Amazon GameLift is a managed service for developers who need a scalable, dedicated server solution for their multiplayer games. Amazon GameLift provides tools for the following tasks: (1) acquire computing resources and deploy game servers, (2) scale game server capacity to meet player demand, (3) host game sessions and manage player access, and (4) track in-depth metrics on player usage and server performance.

The Amazon GameLift service API includes two important function sets:

This reference guide describes the low-level service API for Amazon GameLift. You can use the API functionality with these tools:

MORE RESOURCES

API SUMMARY

This list offers a functional overview of the Amazon GameLift service API.

Managing Games and Players

These actions allow you to start new game sessions, find existing game sessions, track status and other game session information, and enable access for players to join game sessions.

Setting Up and Managing Game Servers

When setting up Amazon GameLift resources for your game, you first create a game build and upload it to Amazon GameLift. You can then use these actions to configure and manage a fleet of resources to run your game servers, scale capacity to meet player demand, access performance and utilization metrics, and more.

" + "documentation":"Amazon GameLift Service

Amazon GameLift is a managed service for developers who need a scalable, dedicated server solution for their multiplayer games. Amazon GameLift provides tools for the following tasks: (1) acquire computing resources and deploy game servers, (2) scale game server capacity to meet player demand, (3) host game sessions and manage player access, and (4) track in-depth metrics on player usage and server performance.

The Amazon GameLift service API includes two important function sets:

This reference guide describes the low-level service API for Amazon GameLift. You can use the API functionality with these tools:

MORE RESOURCES

API SUMMARY

This list offers a functional overview of the Amazon GameLift service API.

Managing Games and Players

Use these actions to start new game sessions, find existing game sessions, track game session status and other information, and enable player access to game sessions.

Setting Up and Managing Game Servers

When setting up Amazon GameLift resources for your game, you first create a game build and upload it to Amazon GameLift. You can then use these actions to configure and manage a fleet of resources to run your game servers, scale capacity to meet player demand, access performance and utilization metrics, and more.

" } diff --git a/botocore/data/glue/2017-03-31/paginators-1.json b/botocore/data/glue/2017-03-31/paginators-1.json new file mode 100644 index 00000000..ea142457 --- /dev/null +++ b/botocore/data/glue/2017-03-31/paginators-1.json @@ -0,0 +1,3 @@ +{ + "pagination": {} +} diff --git a/botocore/data/glue/2017-03-31/service-2.json b/botocore/data/glue/2017-03-31/service-2.json new file mode 100644 index 00000000..8326489d --- /dev/null +++ b/botocore/data/glue/2017-03-31/service-2.json @@ -0,0 +1,5463 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2017-03-31", + "endpointPrefix":"glue", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"AWS Glue", + "signatureVersion":"v4", + "targetPrefix":"AWSGlue", + "uid":"glue-2017-03-31" + }, + "operations":{ + "BatchCreatePartition":{ + "name":"BatchCreatePartition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchCreatePartitionRequest"}, + "output":{"shape":"BatchCreatePartitionResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"AlreadyExistsException"}, + {"shape":"ResourceNumberLimitExceededException"}, + {"shape":"InternalServiceException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Creates one or more partitions in a batch operation.

" + }, + "BatchDeleteConnection":{ + "name":"BatchDeleteConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchDeleteConnectionRequest"}, + "output":{"shape":"BatchDeleteConnectionResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Deletes a list of connection definitions from the Data Catalog.

" + }, + "BatchDeletePartition":{ + "name":"BatchDeletePartition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchDeletePartitionRequest"}, + "output":{"shape":"BatchDeletePartitionResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Deletes one or more partitions in a batch operation.

" + }, + "BatchDeleteTable":{ + "name":"BatchDeleteTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchDeleteTableRequest"}, + "output":{"shape":"BatchDeleteTableResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Deletes multiple tables at once.

" + }, + "BatchGetPartition":{ + "name":"BatchGetPartition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchGetPartitionRequest"}, + "output":{"shape":"BatchGetPartitionResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Retrieves partitions in a batch request.

" + }, + "CreateClassifier":{ + "name":"CreateClassifier", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateClassifierRequest"}, + "output":{"shape":"CreateClassifierResponse"}, + "errors":[ + {"shape":"AlreadyExistsException"}, + {"shape":"InvalidInputException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Creates a Classifier in the user's account.

" + }, + "CreateConnection":{ + "name":"CreateConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateConnectionRequest"}, + "output":{"shape":"CreateConnectionResponse"}, + "errors":[ + {"shape":"AlreadyExistsException"}, + {"shape":"InvalidInputException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Creates a connection definition in the Data Catalog.

" + }, + "CreateCrawler":{ + "name":"CreateCrawler", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateCrawlerRequest"}, + "output":{"shape":"CreateCrawlerResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"AlreadyExistsException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"ResourceNumberLimitExceededException"} + ], + "documentation":"

Creates a new Crawler with specified targets, role, configuration, and optional schedule. At least one crawl target must be specified, in either the s3Targets or the jdbcTargets field.

" + }, + "CreateDatabase":{ + "name":"CreateDatabase", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDatabaseRequest"}, + "output":{"shape":"CreateDatabaseResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"AlreadyExistsException"}, + {"shape":"ResourceNumberLimitExceededException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Creates a new database in a Data Catalog.

" + }, + "CreateDevEndpoint":{ + "name":"CreateDevEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDevEndpointRequest"}, + "output":{"shape":"CreateDevEndpointResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"AlreadyExistsException"}, + {"shape":"IdempotentParameterMismatchException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"InvalidInputException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNumberLimitExceededException"} + ], + "documentation":"

Creates a new DevEndpoint.

" + }, + "CreateJob":{ + "name":"CreateJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateJobRequest"}, + "output":{"shape":"CreateJobResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"IdempotentParameterMismatchException"}, + {"shape":"AlreadyExistsException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"ResourceNumberLimitExceededException"} + ], + "documentation":"

Creates a new job.

" + }, + "CreatePartition":{ + "name":"CreatePartition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePartitionRequest"}, + "output":{"shape":"CreatePartitionResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"AlreadyExistsException"}, + {"shape":"ResourceNumberLimitExceededException"}, + {"shape":"InternalServiceException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Creates a new partition.

" + }, + "CreateScript":{ + "name":"CreateScript", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateScriptRequest"}, + "output":{"shape":"CreateScriptResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Transforms a directed acyclic graph (DAG) into a Python script.

" + }, + "CreateTable":{ + "name":"CreateTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateTableRequest"}, + "output":{"shape":"CreateTableResponse"}, + "errors":[ + {"shape":"AlreadyExistsException"}, + {"shape":"InvalidInputException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"ResourceNumberLimitExceededException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Creates a new table definition in the Data Catalog.

" + }, + "CreateTrigger":{ + "name":"CreateTrigger", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateTriggerRequest"}, + "output":{"shape":"CreateTriggerResponse"}, + "errors":[ + {"shape":"AlreadyExistsException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"ResourceNumberLimitExceededException"} + ], + "documentation":"

Creates a new trigger.

" + }, + "CreateUserDefinedFunction":{ + "name":"CreateUserDefinedFunction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateUserDefinedFunctionRequest"}, + "output":{"shape":"CreateUserDefinedFunctionResponse"}, + "errors":[ + {"shape":"AlreadyExistsException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Creates a new function definition in the Data Catalog.

" + }, + "DeleteClassifier":{ + "name":"DeleteClassifier", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteClassifierRequest"}, + "output":{"shape":"DeleteClassifierResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Removes a Classifier from the metadata store.

" + }, + "DeleteConnection":{ + "name":"DeleteConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteConnectionRequest"}, + "output":{"shape":"DeleteConnectionResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Deletes a connection from the Data Catalog.

" + }, + "DeleteCrawler":{ + "name":"DeleteCrawler", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteCrawlerRequest"}, + "output":{"shape":"DeleteCrawlerResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"CrawlerRunningException"}, + {"shape":"SchedulerTransitioningException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Removes a specified Crawler from the metadata store, unless the Crawler state is RUNNING.

" + }, + "DeleteDatabase":{ + "name":"DeleteDatabase", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDatabaseRequest"}, + "output":{"shape":"DeleteDatabaseResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Removes a specified Database from a Data Catalog.

" + }, + "DeleteDevEndpoint":{ + "name":"DeleteDevEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDevEndpointRequest"}, + "output":{"shape":"DeleteDevEndpointResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

Deletes a specified DevEndpoint.

" + }, + "DeleteJob":{ + "name":"DeleteJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteJobRequest"}, + "output":{"shape":"DeleteJobResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Deletes a specified job.

" + }, + "DeletePartition":{ + "name":"DeletePartition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePartitionRequest"}, + "output":{"shape":"DeletePartitionResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Deletes a specified partition.

" + }, + "DeleteTable":{ + "name":"DeleteTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTableRequest"}, + "output":{"shape":"DeleteTableResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Removes a table definition from the Data Catalog.

" + }, + "DeleteTrigger":{ + "name":"DeleteTrigger", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTriggerRequest"}, + "output":{"shape":"DeleteTriggerResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Deletes a specified trigger.

" + }, + "DeleteUserDefinedFunction":{ + "name":"DeleteUserDefinedFunction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteUserDefinedFunctionRequest"}, + "output":{"shape":"DeleteUserDefinedFunctionResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Deletes an existing function definition from the Data Catalog.

" + }, + "GetCatalogImportStatus":{ + "name":"GetCatalogImportStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetCatalogImportStatusRequest"}, + "output":{"shape":"GetCatalogImportStatusResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Retrieves the status of a migration operation.

" + }, + "GetClassifier":{ + "name":"GetClassifier", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetClassifierRequest"}, + "output":{"shape":"GetClassifierResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Retrieve a Classifier by name.

" + }, + "GetClassifiers":{ + "name":"GetClassifiers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetClassifiersRequest"}, + "output":{"shape":"GetClassifiersResponse"}, + "errors":[ + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Lists all Classifier objects in the metadata store.

" + }, + "GetConnection":{ + "name":"GetConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetConnectionRequest"}, + "output":{"shape":"GetConnectionResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Retrieves a connection definition from the Data Catalog.

" + }, + "GetConnections":{ + "name":"GetConnections", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetConnectionsRequest"}, + "output":{"shape":"GetConnectionsResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Retrieves a list of connection definitions from the Data Catalog.

" + }, + "GetCrawler":{ + "name":"GetCrawler", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetCrawlerRequest"}, + "output":{"shape":"GetCrawlerResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Retrieves metadata for a specified Crawler.

" + }, + "GetCrawlerMetrics":{ + "name":"GetCrawlerMetrics", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetCrawlerMetricsRequest"}, + "output":{"shape":"GetCrawlerMetricsResponse"}, + "errors":[ + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Retrieves metrics about specified crawlers.

" + }, + "GetCrawlers":{ + "name":"GetCrawlers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetCrawlersRequest"}, + "output":{"shape":"GetCrawlersResponse"}, + "errors":[ + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Retrieves metadata for all Crawlers defined in the customer account.

" + }, + "GetDatabase":{ + "name":"GetDatabase", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDatabaseRequest"}, + "output":{"shape":"GetDatabaseResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Retrieves the definition of a specified database.

" + }, + "GetDatabases":{ + "name":"GetDatabases", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDatabasesRequest"}, + "output":{"shape":"GetDatabasesResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Retrieves all Databases defined in a given Data Catalog.

" + }, + "GetDataflowGraph":{ + "name":"GetDataflowGraph", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDataflowGraphRequest"}, + "output":{"shape":"GetDataflowGraphResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Transforms a Python script into a directed acyclic graph (DAG).

" + }, + "GetDevEndpoint":{ + "name":"GetDevEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDevEndpointRequest"}, + "output":{"shape":"GetDevEndpointResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

Retrieves information about a specified DevEndpoint.

" + }, + "GetDevEndpoints":{ + "name":"GetDevEndpoints", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDevEndpointsRequest"}, + "output":{"shape":"GetDevEndpointsResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

Retrieves all the DevEndpoints in this AWS account.

" + }, + "GetJob":{ + "name":"GetJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetJobRequest"}, + "output":{"shape":"GetJobResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Retrieves an existing job definition.

" + }, + "GetJobRun":{ + "name":"GetJobRun", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetJobRunRequest"}, + "output":{"shape":"GetJobRunResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Retrieves the metadata for a given job run.

" + }, + "GetJobRuns":{ + "name":"GetJobRuns", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetJobRunsRequest"}, + "output":{"shape":"GetJobRunsResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Retrieves metadata for all runs of a given job.

" + }, + "GetJobs":{ + "name":"GetJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetJobsRequest"}, + "output":{"shape":"GetJobsResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Retrieves all current jobs.

" + }, + "GetMapping":{ + "name":"GetMapping", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetMappingRequest"}, + "output":{"shape":"GetMappingResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Creates mappings.

" + }, + "GetPartition":{ + "name":"GetPartition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPartitionRequest"}, + "output":{"shape":"GetPartitionResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Retrieves information about a specified partition.

" + }, + "GetPartitions":{ + "name":"GetPartitions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPartitionsRequest"}, + "output":{"shape":"GetPartitionsResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Retrieves information about the partitions in a table.

" + }, + "GetPlan":{ + "name":"GetPlan", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPlanRequest"}, + "output":{"shape":"GetPlanResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Gets a Python script to perform a specified mapping.

" + }, + "GetTable":{ + "name":"GetTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetTableRequest"}, + "output":{"shape":"GetTableResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Retrieves the Table definition in a Data Catalog for a specified table.

" + }, + "GetTableVersions":{ + "name":"GetTableVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetTableVersionsRequest"}, + "output":{"shape":"GetTableVersionsResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Retrieves a list of strings that identify available versions of a specified table.

" + }, + "GetTables":{ + "name":"GetTables", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetTablesRequest"}, + "output":{"shape":"GetTablesResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Retrieves the definitions of some or all of the tables in a given Database.

" + }, + "GetTrigger":{ + "name":"GetTrigger", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetTriggerRequest"}, + "output":{"shape":"GetTriggerResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Retrieves the definition of a trigger.

" + }, + "GetTriggers":{ + "name":"GetTriggers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetTriggersRequest"}, + "output":{"shape":"GetTriggersResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Gets all the triggers associated with a job.

" + }, + "GetUserDefinedFunction":{ + "name":"GetUserDefinedFunction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetUserDefinedFunctionRequest"}, + "output":{"shape":"GetUserDefinedFunctionResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Retrieves a specified function definition from the Data Catalog.

" + }, + "GetUserDefinedFunctions":{ + "name":"GetUserDefinedFunctions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetUserDefinedFunctionsRequest"}, + "output":{"shape":"GetUserDefinedFunctionsResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Retrieves a multiple function definitions from the Data Catalog.

" + }, + "ImportCatalogToGlue":{ + "name":"ImportCatalogToGlue", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ImportCatalogToGlueRequest"}, + "output":{"shape":"ImportCatalogToGlueResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Imports an existing Athena Data Catalog to AWS Glue

" + }, + "ResetJobBookmark":{ + "name":"ResetJobBookmark", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResetJobBookmarkRequest"}, + "output":{"shape":"ResetJobBookmarkResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Resets a bookmark entry.

" + }, + "StartCrawler":{ + "name":"StartCrawler", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartCrawlerRequest"}, + "output":{"shape":"StartCrawlerResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"CrawlerRunningException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Starts a crawl using the specified Crawler, regardless of what is scheduled. If the Crawler is already running, does nothing.

" + }, + "StartCrawlerSchedule":{ + "name":"StartCrawlerSchedule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartCrawlerScheduleRequest"}, + "output":{"shape":"StartCrawlerScheduleResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"SchedulerRunningException"}, + {"shape":"SchedulerTransitioningException"}, + {"shape":"NoScheduleException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Changes the schedule state of the specified crawler to SCHEDULED, unless the crawler is already running or the schedule state is already SCHEDULED.

" + }, + "StartJobRun":{ + "name":"StartJobRun", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartJobRunRequest"}, + "output":{"shape":"StartJobRunResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"ResourceNumberLimitExceededException"}, + {"shape":"ConcurrentRunsExceededException"} + ], + "documentation":"

Runs a job.

" + }, + "StartTrigger":{ + "name":"StartTrigger", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartTriggerRequest"}, + "output":{"shape":"StartTriggerResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"ResourceNumberLimitExceededException"}, + {"shape":"ConcurrentRunsExceededException"} + ], + "documentation":"

Starts an existing trigger.

" + }, + "StopCrawler":{ + "name":"StopCrawler", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopCrawlerRequest"}, + "output":{"shape":"StopCrawlerResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"CrawlerNotRunningException"}, + {"shape":"CrawlerStoppingException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

If the specified Crawler is running, stops the crawl.

" + }, + "StopCrawlerSchedule":{ + "name":"StopCrawlerSchedule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopCrawlerScheduleRequest"}, + "output":{"shape":"StopCrawlerScheduleResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"SchedulerNotRunningException"}, + {"shape":"SchedulerTransitioningException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Sets the schedule state of the specified crawler to NOT_SCHEDULED, but does not stop the crawler if it is already running.

" + }, + "StopTrigger":{ + "name":"StopTrigger", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopTriggerRequest"}, + "output":{"shape":"StopTriggerResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Stops a specified trigger.

" + }, + "UpdateClassifier":{ + "name":"UpdateClassifier", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateClassifierRequest"}, + "output":{"shape":"UpdateClassifierResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"VersionMismatchException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Modifies an existing Classifier.

" + }, + "UpdateConnection":{ + "name":"UpdateConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateConnectionRequest"}, + "output":{"shape":"UpdateConnectionResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Updates a connection definition in the Data Catalog.

" + }, + "UpdateCrawler":{ + "name":"UpdateCrawler", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateCrawlerRequest"}, + "output":{"shape":"UpdateCrawlerResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"VersionMismatchException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"CrawlerRunningException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Updates a Crawler. If a Crawler is running, you must stop it using StopCrawler before updating it.

" + }, + "UpdateCrawlerSchedule":{ + "name":"UpdateCrawlerSchedule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateCrawlerScheduleRequest"}, + "output":{"shape":"UpdateCrawlerScheduleResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"VersionMismatchException"}, + {"shape":"SchedulerTransitioningException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Updates the schedule of a crawler using a Cron expression.

" + }, + "UpdateDatabase":{ + "name":"UpdateDatabase", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateDatabaseRequest"}, + "output":{"shape":"UpdateDatabaseResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Updates an existing database definition in a Data Catalog.

" + }, + "UpdateDevEndpoint":{ + "name":"UpdateDevEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateDevEndpointRequest"}, + "output":{"shape":"UpdateDevEndpointResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"InvalidInputException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Updates a specified DevEndpoint.

" + }, + "UpdateJob":{ + "name":"UpdateJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateJobRequest"}, + "output":{"shape":"UpdateJobResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Updates an existing job definition.

" + }, + "UpdatePartition":{ + "name":"UpdatePartition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdatePartitionRequest"}, + "output":{"shape":"UpdatePartitionResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Updates a partition.

" + }, + "UpdateTable":{ + "name":"UpdateTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateTableRequest"}, + "output":{"shape":"UpdateTableResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"ConcurrentModificationException"} + ], + "documentation":"

Updates a metadata table in the Data Catalog.

" + }, + "UpdateTrigger":{ + "name":"UpdateTrigger", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateTriggerRequest"}, + "output":{"shape":"UpdateTriggerResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Updates a trigger definition.

" + }, + "UpdateUserDefinedFunction":{ + "name":"UpdateUserDefinedFunction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateUserDefinedFunctionRequest"}, + "output":{"shape":"UpdateUserDefinedFunctionResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Updates an existing function definition in the Data Catalog.

" + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"MessageString", + "documentation":"

A message describing the problem.

" + } + }, + "documentation":"

Access to a resource was denied.

", + "exception":true + }, + "Action":{ + "type":"structure", + "members":{ + "JobName":{"shape":"NameString"}, + "Arguments":{"shape":"GenericMap"} + } + }, + "ActionList":{ + "type":"list", + "member":{"shape":"Action"} + }, + "AlreadyExistsException":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"MessageString", + "documentation":"

A message describing the problem.

" + } + }, + "documentation":"

A resource to be created or added already exists.

", + "exception":true + }, + "AttemptCount":{"type":"integer"}, + "BatchCreatePartitionRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName", + "PartitionInputList" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the catalog in which the partion is to be created. Currently, this should be the AWS account ID.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the metadata database in which the partition is to be created.

" + }, + "TableName":{ + "shape":"NameString", + "documentation":"

The name of the metadata table in which the partition is to be created.

" + }, + "PartitionInputList":{ + "shape":"PartitionInputList", + "documentation":"

A list of PartitionInput structures that define the partitions to be created.

" + } + } + }, + "BatchCreatePartitionResponse":{ + "type":"structure", + "members":{ + "Errors":{ + "shape":"PartitionErrors", + "documentation":"

Errors encountered when trying to create the requested partitions.

" + } + } + }, + "BatchDeleteConnectionRequest":{ + "type":"structure", + "required":["ConnectionNameList"], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog in which the connections reside. If none is supplied, the AWS account ID is used by default.

" + }, + "ConnectionNameList":{ + "shape":"DeleteConnectionNameList", + "documentation":"

A list of names of the connections to delete.

" + } + } + }, + "BatchDeleteConnectionResponse":{ + "type":"structure", + "members":{ + "Succeeded":{ + "shape":"NameStringList", + "documentation":"

A list of names of the connection definitions that were successfully deleted.

" + }, + "Errors":{ + "shape":"ErrorByName", + "documentation":"

A map of the names of connections that were not successfully deleted to error details.

" + } + } + }, + "BatchDeletePartitionRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName", + "PartitionsToDelete" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog where the partition to be deleted resides. If none is supplied, the AWS account ID is used by default.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the catalog database in which the table in question resides.

" + }, + "TableName":{ + "shape":"NameString", + "documentation":"

The name of the table where the partitions to be deleted is located.

" + }, + "PartitionsToDelete":{ + "shape":"BatchDeletePartitionValueList", + "documentation":"

A list of PartitionInput structures that define the partitions to be deleted.

" + } + } + }, + "BatchDeletePartitionResponse":{ + "type":"structure", + "members":{ + "Errors":{ + "shape":"PartitionErrors", + "documentation":"

Errors encountered when trying to delete the requested partitions.

" + } + } + }, + "BatchDeletePartitionValueList":{ + "type":"list", + "member":{"shape":"PartitionValueList"}, + "max":25, + "min":0 + }, + "BatchDeleteTableNameList":{ + "type":"list", + "member":{"shape":"NameString"}, + "max":100, + "min":0 + }, + "BatchDeleteTableRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TablesToDelete" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog where the table resides. If none is supplied, the AWS account ID is used by default.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the catalog database where the tables to delete reside.

" + }, + "TablesToDelete":{ + "shape":"BatchDeleteTableNameList", + "documentation":"

A list of the table to delete.

" + } + } + }, + "BatchDeleteTableResponse":{ + "type":"structure", + "members":{ + "Errors":{ + "shape":"TableErrors", + "documentation":"

A list of errors encountered in attempting to delete the specified tables.

" + } + } + }, + "BatchGetPartitionRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName", + "PartitionsToGet" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog where the partitions in question reside. If none is supplied, the AWS account ID is used by default.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the catalog database where the partitions reside.

" + }, + "TableName":{ + "shape":"NameString", + "documentation":"

The name of the partitions' table.

" + }, + "PartitionsToGet":{ + "shape":"BatchGetPartitionValueList", + "documentation":"

A list of partition values identifying the partitions to retrieve.

" + } + } + }, + "BatchGetPartitionResponse":{ + "type":"structure", + "members":{ + "Partitions":{ + "shape":"PartitionList", + "documentation":"

A list of the requested partitions.

" + }, + "UnprocessedKeys":{ + "shape":"BatchGetPartitionValueList", + "documentation":"

A list of the partition values in the request for which partions were not returned.

" + } + } + }, + "BatchGetPartitionValueList":{ + "type":"list", + "member":{"shape":"PartitionValueList"}, + "max":1000, + "min":0 + }, + "Boolean":{"type":"boolean"}, + "BooleanValue":{"type":"boolean"}, + "BoundedPartitionValueList":{ + "type":"list", + "member":{"shape":"ValueString"}, + "max":100, + "min":0 + }, + "CatalogEntries":{ + "type":"list", + "member":{"shape":"CatalogEntry"} + }, + "CatalogEntry":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName" + ], + "members":{ + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The database in which the table metadata resides.

" + }, + "TableName":{ + "shape":"NameString", + "documentation":"

The name of the table in question.

" + } + }, + "documentation":"

Specifies a table definition in the Data Catalog.

" + }, + "CatalogIdString":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + }, + "CatalogImportStatus":{ + "type":"structure", + "members":{ + "ImportCompleted":{ + "shape":"Boolean", + "documentation":"

True if the migration has completed, or False otherwise.

" + }, + "ImportTime":{ + "shape":"Timestamp", + "documentation":"

The time that the migration was started.

" + }, + "ImportedBy":{ + "shape":"NameString", + "documentation":"

The name of the person who initiated the migration.

" + } + }, + "documentation":"

A structure containing migration status information.

" + }, + "Classification":{"type":"string"}, + "Classifier":{ + "type":"structure", + "members":{ + "GrokClassifier":{ + "shape":"GrokClassifier", + "documentation":"

A GrokClassifier object.

" + } + }, + "documentation":"

Classifiers are written in Python and triggered during a Crawl Task. You can write your own Classifiers to best categorize your data sources and specify the appropriate schemas to use for them. A Classifier first checks whether a given file is in a format it can handle, and then, if so, creates a schema in the form of a StructType object that matches that data format.

" + }, + "ClassifierList":{ + "type":"list", + "member":{"shape":"Classifier"} + }, + "ClassifierNameList":{ + "type":"list", + "member":{"shape":"NameString"} + }, + "CodeGenArgName":{"type":"string"}, + "CodeGenArgValue":{"type":"string"}, + "CodeGenEdge":{ + "type":"structure", + "required":[ + "Source", + "Target" + ], + "members":{ + "Source":{ + "shape":"CodeGenIdentifier", + "documentation":"

The ID of the node at which the edge starts.

" + }, + "Target":{ + "shape":"CodeGenIdentifier", + "documentation":"

The ID of the node at which the edge ends.

" + }, + "TargetParameter":{ + "shape":"CodeGenArgName", + "documentation":"

The target of the edge.

" + } + }, + "documentation":"

Represents a directional edge in a directed acyclic graph (DAG).

" + }, + "CodeGenIdentifier":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[A-Za-z_][A-Za-z0-9_]*" + }, + "CodeGenNode":{ + "type":"structure", + "required":[ + "Id", + "NodeType", + "Args" + ], + "members":{ + "Id":{ + "shape":"CodeGenIdentifier", + "documentation":"

A node identifier that is unique within the node's graph.

" + }, + "NodeType":{ + "shape":"CodeGenNodeType", + "documentation":"

The type of node this is.

" + }, + "Args":{ + "shape":"CodeGenNodeArgs", + "documentation":"

Properties of the node, in the form of name-value pairs.

" + }, + "LineNumber":{ + "shape":"Integer", + "documentation":"

The line number of the node.

" + } + }, + "documentation":"

Represents a node in a directed acyclic graph (DAG)

" + }, + "CodeGenNodeArg":{ + "type":"structure", + "required":[ + "Name", + "Value" + ], + "members":{ + "Name":{ + "shape":"CodeGenArgName", + "documentation":"

The name of the argument or property.

" + }, + "Value":{ + "shape":"CodeGenArgValue", + "documentation":"

The value of the argument or property.

" + }, + "Param":{ + "shape":"Boolean", + "documentation":"

True if the value is used as a parameter.

" + } + }, + "documentation":"

An argument or property of a node.

" + }, + "CodeGenNodeArgs":{ + "type":"list", + "member":{"shape":"CodeGenNodeArg"}, + "max":50, + "min":0 + }, + "CodeGenNodeType":{"type":"string"}, + "Column":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name of the Column.

" + }, + "Type":{ + "shape":"ColumnTypeString", + "documentation":"

The datatype of data in the Column.

" + }, + "Comment":{ + "shape":"CommentString", + "documentation":"

Free-form text comment.

" + } + }, + "documentation":"

A column in a Table.

" + }, + "ColumnList":{ + "type":"list", + "member":{"shape":"Column"} + }, + "ColumnTypeString":{ + "type":"string", + "max":131072, + "min":0, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + }, + "ColumnValueStringList":{ + "type":"list", + "member":{"shape":"ColumnValuesString"} + }, + "ColumnValuesString":{"type":"string"}, + "CommentString":{ + "type":"string", + "max":255, + "min":0, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + }, + "ConcurrentModificationException":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"MessageString", + "documentation":"

A message describing the problem.

" + } + }, + "documentation":"

Two processes are trying to modify a resource simultaneously.

", + "exception":true + }, + "ConcurrentRunsExceededException":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"MessageString", + "documentation":"

A message describing the problem.

" + } + }, + "documentation":"

Too many jobs are being run concurrently.

", + "exception":true + }, + "Condition":{ + "type":"structure", + "members":{ + "LogicalOperator":{"shape":"LogicalOperator"}, + "JobName":{"shape":"NameString"}, + "State":{"shape":"JobRunState"} + } + }, + "ConditionList":{ + "type":"list", + "member":{"shape":"Condition"} + }, + "Connection":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name of the connection definition.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

Description of the connection.

" + }, + "ConnectionType":{ + "shape":"ConnectionType", + "documentation":"

The type of the connection.

" + }, + "MatchCriteria":{ + "shape":"MatchCriteria", + "documentation":"

A list of criteria that can be used in selecting this connection.

" + }, + "ConnectionProperties":{ + "shape":"ConnectionProperties", + "documentation":"

A list of key-value pairs used as parameters for this connection.

" + }, + "PhysicalConnectionRequirements":{ + "shape":"PhysicalConnectionRequirements", + "documentation":"

A map of physical connection requirements, such as VPC and SecurityGroup, needed for making this connection successfully.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The time this connection definition was created.

" + }, + "LastUpdatedTime":{ + "shape":"Timestamp", + "documentation":"

The last time this connection definition was updated.

" + }, + "LastUpdatedBy":{ + "shape":"NameString", + "documentation":"

The user, group or role that last updated this connection definition.

" + } + }, + "documentation":"

Defines a connection to a data source.

" + }, + "ConnectionInput":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name of the connection.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

Description of the connection.

" + }, + "ConnectionType":{ + "shape":"ConnectionType", + "documentation":"

The type of the connection.

" + }, + "MatchCriteria":{ + "shape":"MatchCriteria", + "documentation":"

A list of criteria that can be used in selecting this connection.

" + }, + "ConnectionProperties":{ + "shape":"ConnectionProperties", + "documentation":"

A list of key-value pairs used as parameters for this connection.

" + }, + "PhysicalConnectionRequirements":{ + "shape":"PhysicalConnectionRequirements", + "documentation":"

A map of physical connection requirements, such as VPC and SecurityGroup, needed for making this connection successfully.

" + } + }, + "documentation":"

A structure used to specify a connection to create or update.

" + }, + "ConnectionList":{ + "type":"list", + "member":{"shape":"Connection"} + }, + "ConnectionName":{"type":"string"}, + "ConnectionProperties":{ + "type":"map", + "key":{"shape":"ConnectionPropertyKey"}, + "value":{"shape":"ValueString"}, + "max":100, + "min":0 + }, + "ConnectionPropertyKey":{ + "type":"string", + "enum":[ + "HOST", + "PORT", + "USERNAME", + "PASSWORD", + "JDBC_DRIVER_JAR_URI", + "JDBC_DRIVER_CLASS_NAME", + "JDBC_ENGINE", + "JDBC_ENGINE_VERSION", + "CONFIG_FILES", + "INSTANCE_ID", + "JDBC_CONNECTION_URL" + ] + }, + "ConnectionType":{ + "type":"string", + "enum":[ + "JDBC", + "SFTP" + ] + }, + "ConnectionsList":{ + "type":"structure", + "members":{ + "Connections":{ + "shape":"StringList", + "documentation":"

A list of connections used by the job.

" + } + }, + "documentation":"

Specifies the connections used by a job.

" + }, + "Crawler":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The Crawler name.

" + }, + "Role":{ + "shape":"RoleArn", + "documentation":"

The ARN of an IAM role used to access customer resources such as data in S3.

" + }, + "Targets":{ + "shape":"CrawlerTargets", + "documentation":"

A collection of targets to crawl.

" + }, + "DatabaseName":{ + "shape":"DatabaseName", + "documentation":"

The Database where this Crawler's output should be stored.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

A description of this Crawler and where it should be used.

" + }, + "Classifiers":{ + "shape":"ClassifierNameList", + "documentation":"

A list of custom Classifiers associated with this Crawler.

" + }, + "SchemaChangePolicy":{ + "shape":"SchemaChangePolicy", + "documentation":"

Sets policy for the crawler's update and delete behavior.

" + }, + "State":{ + "shape":"CrawlerState", + "documentation":"

Indicates whether this Crawler is running, or whether a run is pending.

" + }, + "TablePrefix":{ + "shape":"TablePrefix", + "documentation":"

The table prefix used for catalog tables created.

" + }, + "Schedule":{ + "shape":"Schedule", + "documentation":"

A Schedule object that specifies the schedule on which this Crawler is to be run.

" + }, + "CrawlElapsedTime":{ + "shape":"MillisecondsCount", + "documentation":"

If this Crawler is running, contains the total time elapsed since the last crawl began.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The time when the Crawler was created.

" + }, + "LastUpdated":{ + "shape":"Timestamp", + "documentation":"

The time the Crawler was last updated.

" + }, + "LastCrawl":{ + "shape":"LastCrawlInfo", + "documentation":"

The status of the last crawl, and potentially error information if an error occurred.

" + }, + "Version":{ + "shape":"VersionId", + "documentation":"

The version of the Crawler.

" + } + }, + "documentation":"

Specifies a crawler program that examines a data source and uses classifiers to try to its schema. If successful, the crawler records metatdata concerning the data source in the Data Catalog.

" + }, + "CrawlerList":{ + "type":"list", + "member":{"shape":"Crawler"} + }, + "CrawlerMetrics":{ + "type":"structure", + "members":{ + "CrawlerName":{ + "shape":"NameString", + "documentation":"

The name of the crawler.

" + }, + "TimeLeftSeconds":{ + "shape":"NonNegativeDouble", + "documentation":"

The estimated time left to complete a running crawl.

" + }, + "StillEstimating":{ + "shape":"Boolean", + "documentation":"

True if the crawler is estimating its

" + }, + "LastRuntimeSeconds":{ + "shape":"NonNegativeDouble", + "documentation":"

The duration of the crawler's most recent run, in seconds.

" + }, + "MedianRuntimeSeconds":{ + "shape":"NonNegativeDouble", + "documentation":"

The median duration of this crawler's runs, in seconds.

" + }, + "TablesCreated":{ + "shape":"NonNegativeInteger", + "documentation":"

A list of the tables created by this crawler.

" + }, + "TablesUpdated":{ + "shape":"NonNegativeInteger", + "documentation":"

A list of the tables created by this crawler.

" + }, + "TablesDeleted":{ + "shape":"NonNegativeInteger", + "documentation":"

A list of the tables deleted by this crawler.

" + } + }, + "documentation":"

Metrics for a specified crawler.

" + }, + "CrawlerMetricsList":{ + "type":"list", + "member":{"shape":"CrawlerMetrics"} + }, + "CrawlerNameList":{ + "type":"list", + "member":{"shape":"NameString"}, + "max":100, + "min":0 + }, + "CrawlerNotRunningException":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"MessageString", + "documentation":"

A message describing the problem.

" + } + }, + "documentation":"

The specified crawler is not running.

", + "exception":true + }, + "CrawlerRunningException":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"MessageString", + "documentation":"

A message describing the problem.

" + } + }, + "documentation":"

The operation cannot be performed because the crawler is already running.

", + "exception":true + }, + "CrawlerState":{ + "type":"string", + "enum":[ + "READY", + "RUNNING", + "STOPPING" + ] + }, + "CrawlerStoppingException":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"MessageString", + "documentation":"

A message describing the problem.

" + } + }, + "documentation":"

The specified crawler is stopping.

", + "exception":true + }, + "CrawlerTargets":{ + "type":"structure", + "members":{ + "S3Targets":{ + "shape":"S3TargetList", + "documentation":"

Specifies targets in AWS S3.

" + }, + "JdbcTargets":{ + "shape":"JdbcTargetList", + "documentation":"

Specifies JDBC targets.

" + } + }, + "documentation":"

Specifies crawler targets.

" + }, + "CreateClassifierRequest":{ + "type":"structure", + "members":{ + "GrokClassifier":{ + "shape":"CreateGrokClassifierRequest", + "documentation":"

A grok classifier to create.

" + } + } + }, + "CreateClassifierResponse":{ + "type":"structure", + "members":{ + } + }, + "CreateConnectionRequest":{ + "type":"structure", + "required":["ConnectionInput"], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog in which to create the connection. If none is supplied, the AWS account ID is used by default.

" + }, + "ConnectionInput":{ + "shape":"ConnectionInput", + "documentation":"

A ConnectionInput object defining the connection to create.

" + } + } + }, + "CreateConnectionResponse":{ + "type":"structure", + "members":{ + } + }, + "CreateCrawlerRequest":{ + "type":"structure", + "required":[ + "Name", + "Role", + "DatabaseName", + "Targets" + ], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

Name of the new Crawler.

" + }, + "Role":{ + "shape":"RoleArn", + "documentation":"

The AWS ARN of the IAM role used by the new Crawler to access customer resources.

" + }, + "DatabaseName":{ + "shape":"DatabaseName", + "documentation":"

The Glue Database where results will be stored, such as: arn:aws:daylight:us-east-1::database/sometable/*.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

A description of the new Crawler.

" + }, + "Targets":{ + "shape":"CrawlerTargets", + "documentation":"

A list of collection of targets to crawl.

" + }, + "Schedule":{ + "shape":"CronExpression", + "documentation":"

A cron expression that can be used as a Cloudwatch event (see CloudWatch Schedule Expression Syntax. For example, to run every day at 12:15 UTC, specify: cron(15 12 * * ? *).

" + }, + "Classifiers":{ + "shape":"ClassifierNameList", + "documentation":"

A list of custom Classifier names that the user has registered. By default, all AWS classifiers are included in a crawl, but these custom classifiers always override the default classifiers for a given classification.

" + }, + "TablePrefix":{ + "shape":"TablePrefix", + "documentation":"

The table prefix used for catalog tables created.

" + }, + "SchemaChangePolicy":{ + "shape":"SchemaChangePolicy", + "documentation":"

Policy for the crawler's update and deletion behavior.

" + } + } + }, + "CreateCrawlerResponse":{ + "type":"structure", + "members":{ + } + }, + "CreateDatabaseRequest":{ + "type":"structure", + "required":["DatabaseInput"], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog in which to create the database. If none is supplied, the AWS account ID is used by default.

" + }, + "DatabaseInput":{ + "shape":"DatabaseInput", + "documentation":"

A DatabaseInput object defining the metadata database to create in the catalog.

" + } + } + }, + "CreateDatabaseResponse":{ + "type":"structure", + "members":{ + } + }, + "CreateDevEndpointRequest":{ + "type":"structure", + "required":[ + "EndpointName", + "RoleArn", + "SecurityGroupIds", + "SubnetId" + ], + "members":{ + "EndpointName":{ + "shape":"GenericString", + "documentation":"

The name to be assigned to the new DevEndpoint.

" + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

The IAM role for the DevEndpoint.

" + }, + "SecurityGroupIds":{ + "shape":"StringList", + "documentation":"

Security group IDs for the security groups to be used by the new DevEndpoint.

" + }, + "SubnetId":{ + "shape":"GenericString", + "documentation":"

The subnet ID for the new DevEndpoint to use.

" + }, + "PublicKey":{ + "shape":"GenericString", + "documentation":"

The public key to use for authentication.

" + }, + "NumberOfNodes":{ + "shape":"IntegerValue", + "documentation":"

The number of nodes to use.

" + }, + "ExtraPythonLibsS3Path":{ + "shape":"GenericString", + "documentation":"

Path to one or more Python libraries in an S3 bucket that should be loaded in your DevEndpoint.

" + }, + "ExtraJarsS3Path":{ + "shape":"GenericString", + "documentation":"

Path to one or more Java Jars in an S3 bucket that should be loaded in your DevEndpoint.

" + } + } + }, + "CreateDevEndpointResponse":{ + "type":"structure", + "members":{ + "EndpointName":{ + "shape":"GenericString", + "documentation":"

The name assigned to the new DevEndpoint.

" + }, + "Status":{ + "shape":"GenericString", + "documentation":"

The current status of the new DevEndpoint.

" + }, + "SecurityGroupIds":{ + "shape":"StringList", + "documentation":"

The security groups assigned to the new DevEndpoint.

" + }, + "SubnetId":{ + "shape":"GenericString", + "documentation":"

The subnet ID assigned to the new DevEndpoint.

" + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

The AWS ARN of the role assigned to the new DevEndpoint.

" + }, + "YarnEndpointAddress":{ + "shape":"GenericString", + "documentation":"

The address of the YARN endpoint used by this DevEndpoint.

" + }, + "NumberOfNodes":{ + "shape":"IntegerValue", + "documentation":"

The number of nodes in this DevEndpoint.

" + }, + "AvailabilityZone":{ + "shape":"GenericString", + "documentation":"

The AWS availability zone where this DevEndpoint is located.

" + }, + "VpcId":{ + "shape":"GenericString", + "documentation":"

The ID of the VPC used by this DevEndpoint.

" + }, + "ExtraPythonLibsS3Path":{ + "shape":"GenericString", + "documentation":"

Path to one or more Python libraries in an S3 bucket that will be loaded in your DevEndpoint.

" + }, + "ExtraJarsS3Path":{ + "shape":"GenericString", + "documentation":"

Path to one or more Java Jars in an S3 bucket that will be loaded in your DevEndpoint.

" + }, + "FailureReason":{ + "shape":"GenericString", + "documentation":"

The reason for a current failure in this DevEndpoint.

" + }, + "CreatedTimestamp":{ + "shape":"TimestampValue", + "documentation":"

The point in time at which this DevEndpoint was created.

" + } + } + }, + "CreateGrokClassifierRequest":{ + "type":"structure", + "required":[ + "Classification", + "Name", + "GrokPattern" + ], + "members":{ + "Classification":{ + "shape":"Classification", + "documentation":"

The type of result that the classifier matches, such as Twitter Json, Omniture logs, Cloudwatch logs, and so forth.

" + }, + "Name":{ + "shape":"NameString", + "documentation":"

The name of the new Classifier.

" + }, + "GrokPattern":{ + "shape":"GrokPattern", + "documentation":"

The grok pattern used by this classifier.

" + }, + "CustomPatterns":{ + "shape":"CustomPatterns", + "documentation":"

Custom grok patterns used by this classifier.

" + } + }, + "documentation":"

Specifies a Grok classifier for CreateClassifier to create.

" + }, + "CreateJobRequest":{ + "type":"structure", + "required":[ + "Name", + "Role", + "Command" + ], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name you assign to this job.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

Description of the job.

" + }, + "LogUri":{ + "shape":"UriString", + "documentation":"

Location of the logs for this job.

" + }, + "Role":{ + "shape":"RoleString", + "documentation":"

The role associated with this job.

" + }, + "ExecutionProperty":{ + "shape":"ExecutionProperty", + "documentation":"

An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job.

" + }, + "Command":{ + "shape":"JobCommand", + "documentation":"

The JobCommand that executes this job.

" + }, + "DefaultArguments":{ + "shape":"GenericMap", + "documentation":"

The default parameters for this job.

" + }, + "Connections":{ + "shape":"ConnectionsList", + "documentation":"

The connections used for this job.

" + }, + "MaxRetries":{ + "shape":"MaxRetries", + "documentation":"

The maximum number of times to retry this job if it fails.

" + }, + "AllocatedCapacity":{ + "shape":"IntegerValue", + "documentation":"

The number of capacity units allocated to this job.

" + } + } + }, + "CreateJobResponse":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The unique name of the new job that has been created.

" + } + } + }, + "CreatePartitionRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName", + "PartitionInput" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the catalog in which the partion is to be created. Currently, this should be the AWS account ID.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the metadata database in which the partition is to be created.

" + }, + "TableName":{ + "shape":"NameString", + "documentation":"

The name of the metadata table in which the partition is to be created.

" + }, + "PartitionInput":{ + "shape":"PartitionInput", + "documentation":"

A PartitionInput structure defining the partition to be created.

" + } + } + }, + "CreatePartitionResponse":{ + "type":"structure", + "members":{ + } + }, + "CreateScriptRequest":{ + "type":"structure", + "members":{ + "DagNodes":{ + "shape":"DagNodes", + "documentation":"

A list of the nodes in the DAG.

" + }, + "DagEdges":{ + "shape":"DagEdges", + "documentation":"

A list of the edges in the DAG.

" + } + } + }, + "CreateScriptResponse":{ + "type":"structure", + "members":{ + "PythonScript":{ + "shape":"PythonScript", + "documentation":"

The Python script generated from the DAG.

" + } + } + }, + "CreateTableRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableInput" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog in which to create the Table. If none is supplied, the AWS account ID is used by default.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The catalog database in which to create the new table.

" + }, + "TableInput":{ + "shape":"TableInput", + "documentation":"

The TableInput object that defines the metadata table to create in the catalog.

" + } + } + }, + "CreateTableResponse":{ + "type":"structure", + "members":{ + } + }, + "CreateTriggerRequest":{ + "type":"structure", + "required":[ + "Name", + "Type", + "Actions" + ], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name to assign to the new trigger.

" + }, + "Type":{ + "shape":"TriggerType", + "documentation":"

The type of the new trigger.

" + }, + "Schedule":{ + "shape":"GenericString", + "documentation":"

A cron schedule expression for the new trigger.

" + }, + "Predicate":{ + "shape":"Predicate", + "documentation":"

A predicate to specify when the new trigger should fire.

" + }, + "Actions":{ + "shape":"ActionList", + "documentation":"

The actions initiated by this trigger when it fires.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

A description of the new trigger.

" + } + } + }, + "CreateTriggerResponse":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name assigned to the new trigger.

" + } + } + }, + "CreateUserDefinedFunctionRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "FunctionInput" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog in which to create the function. If none is supplied, the AWS account ID is used by default.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the catalog database in which to create the function.

" + }, + "FunctionInput":{ + "shape":"UserDefinedFunctionInput", + "documentation":"

A FunctionInput object that defines the function to create in the Data Catalog.

" + } + } + }, + "CreateUserDefinedFunctionResponse":{ + "type":"structure", + "members":{ + } + }, + "CronExpression":{"type":"string"}, + "CustomPatterns":{ + "type":"string", + "max":16000, + "min":0, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "DagEdges":{ + "type":"list", + "member":{"shape":"CodeGenEdge"} + }, + "DagNodes":{ + "type":"list", + "member":{"shape":"CodeGenNode"} + }, + "Database":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

Name of the database.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

Description of the database.

" + }, + "LocationUri":{ + "shape":"URI", + "documentation":"

The location of the database (for example, an HDFS path).

" + }, + "Parameters":{ + "shape":"ParametersMap", + "documentation":"

A list of key-value pairs that define parameters and properties of the database.

" + }, + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the metadata database was created in the catalog.

" + } + }, + "documentation":"

The Database object represents a logical grouping of tables that may reside in a Hive metastore or an RDBMS.

" + }, + "DatabaseInput":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

Name of the database.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

Description of the database

" + }, + "LocationUri":{ + "shape":"URI", + "documentation":"

The location of the database (for example, an HDFS path).

" + }, + "Parameters":{ + "shape":"ParametersMap", + "documentation":"

A list of key-value pairs that define parameters and properties of the database.

" + } + }, + "documentation":"

The structure used to create or updata a database.

" + }, + "DatabaseList":{ + "type":"list", + "member":{"shape":"Database"} + }, + "DatabaseName":{"type":"string"}, + "DeleteBehavior":{ + "type":"string", + "enum":[ + "LOG", + "DELETE_FROM_DATABASE", + "DEPRECATE_IN_DATABASE" + ] + }, + "DeleteClassifierRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

Name of the Classifier to remove.

" + } + } + }, + "DeleteClassifierResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteConnectionNameList":{ + "type":"list", + "member":{"shape":"NameString"}, + "max":25, + "min":0 + }, + "DeleteConnectionRequest":{ + "type":"structure", + "required":["ConnectionName"], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog in which the connection resides. If none is supplied, the AWS account ID is used by default.

" + }, + "ConnectionName":{ + "shape":"NameString", + "documentation":"

The name of the connection to delete.

" + } + } + }, + "DeleteConnectionResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteCrawlerRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

Name of the Crawler to remove.

" + } + } + }, + "DeleteCrawlerResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteDatabaseRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog in which the database resides. If none is supplied, the AWS account ID is used by default.

" + }, + "Name":{ + "shape":"NameString", + "documentation":"

The name of the Database to delete.

" + } + } + }, + "DeleteDatabaseResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteDevEndpointRequest":{ + "type":"structure", + "required":["EndpointName"], + "members":{ + "EndpointName":{ + "shape":"GenericString", + "documentation":"

The name of the DevEndpoint.

" + } + } + }, + "DeleteDevEndpointResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteJobRequest":{ + "type":"structure", + "required":["JobName"], + "members":{ + "JobName":{ + "shape":"NameString", + "documentation":"

The name of the job to delete.

" + } + } + }, + "DeleteJobResponse":{ + "type":"structure", + "members":{ + "JobName":{ + "shape":"NameString", + "documentation":"

The name of the job that was deleted.

" + } + } + }, + "DeletePartitionRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName", + "PartitionValues" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog where the partition to be deleted resides. If none is supplied, the AWS account ID is used by default.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the catalog database in which the table in question resides.

" + }, + "TableName":{ + "shape":"NameString", + "documentation":"

The name of the table where the partition to be deleted is located.

" + }, + "PartitionValues":{ + "shape":"ValueStringList", + "documentation":"

The values that define the partition.

" + } + } + }, + "DeletePartitionResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteTableRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "Name" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog where the table resides. If none is supplied, the AWS account ID is used by default.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the catalog database in which the table resides.

" + }, + "Name":{ + "shape":"NameString", + "documentation":"

The name of the table to be deleted.

" + } + } + }, + "DeleteTableResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteTriggerRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name of the trigger to delete.

" + } + } + }, + "DeleteTriggerResponse":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name of the trigger that was deleted.

" + } + } + }, + "DeleteUserDefinedFunctionRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "FunctionName" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog where the function to be deleted is located. If none is supplied, the AWS account ID is used by default.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the catalog database where the function is located.

" + }, + "FunctionName":{ + "shape":"NameString", + "documentation":"

The name of the function definition to be deleted.

" + } + } + }, + "DeleteUserDefinedFunctionResponse":{ + "type":"structure", + "members":{ + } + }, + "DescriptionString":{ + "type":"string", + "max":2048, + "min":0, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "DescriptionStringRemovable":{ + "type":"string", + "max":2048, + "min":0, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "DevEndpoint":{ + "type":"structure", + "members":{ + "EndpointName":{ + "shape":"GenericString", + "documentation":"

The name of the DevEndpoint.

" + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

The AWS ARN of the IAM role used in this DevEndpoint.

" + }, + "SecurityGroupIds":{ + "shape":"StringList", + "documentation":"

A list of security group identifiers used in this DevEndpoint.

" + }, + "SubnetId":{ + "shape":"GenericString", + "documentation":"

The subnet ID for this DevEndpoint.

" + }, + "YarnEndpointAddress":{ + "shape":"GenericString", + "documentation":"

The YARN endpoint address used by this DevEndpoint.

" + }, + "PublicAddress":{ + "shape":"GenericString", + "documentation":"

The public address used by this DevEndpoint.

" + }, + "Status":{ + "shape":"GenericString", + "documentation":"

The current status of this DevEndpoint.

" + }, + "NumberOfNodes":{ + "shape":"IntegerValue", + "documentation":"

The number of nodes used by this DevEndpoint.

" + }, + "AvailabilityZone":{ + "shape":"GenericString", + "documentation":"

The AWS availability zone where this DevEndpoint is located.

" + }, + "VpcId":{ + "shape":"GenericString", + "documentation":"

The ID of the virtual private cloud (VPC) used by this DevEndpoint.

" + }, + "ExtraPythonLibsS3Path":{ + "shape":"GenericString", + "documentation":"

Path to one or more Python libraries in an S3 bucket that should be loaded in your DevEndpoint.

" + }, + "ExtraJarsS3Path":{ + "shape":"GenericString", + "documentation":"

Path to one or more Java Jars in an S3 bucket that should be loaded in your DevEndpoint.

" + }, + "FailureReason":{ + "shape":"GenericString", + "documentation":"

The reason for a current failure in this DevEndpoint.

" + }, + "LastUpdateStatus":{ + "shape":"GenericString", + "documentation":"

The status of the last update.

" + }, + "CreatedTimestamp":{ + "shape":"TimestampValue", + "documentation":"

The point in time at which this DevEndpoint was created.

" + }, + "LastModifiedTimestamp":{ + "shape":"TimestampValue", + "documentation":"

The point in time at which this DevEndpoint was last modified.

" + }, + "PublicKey":{ + "shape":"GenericString", + "documentation":"

The public key to be used by this DevEndpoint for authentication.

" + } + }, + "documentation":"

A development endpoint where a developer can remotely debug ETL scripts.

" + }, + "DevEndpointCustomLibraries":{ + "type":"structure", + "members":{ + "ExtraPythonLibsS3Path":{ + "shape":"GenericString", + "documentation":"

Path to one or more Python libraries in an S3 bucket that should be loaded in your DevEndpoint.

" + }, + "ExtraJarsS3Path":{ + "shape":"GenericString", + "documentation":"

Path to one or more Java Jars in an S3 bucket that should be loaded in your DevEndpoint.

" + } + }, + "documentation":"

Custom libraries to be loaded into a DevEndpoint.

" + }, + "DevEndpointList":{ + "type":"list", + "member":{"shape":"DevEndpoint"} + }, + "EntityNotFoundException":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"MessageString", + "documentation":"

A message describing the problem.

" + } + }, + "documentation":"

A specified entity does not exist

", + "exception":true + }, + "ErrorByName":{ + "type":"map", + "key":{"shape":"NameString"}, + "value":{"shape":"ErrorDetail"} + }, + "ErrorDetail":{ + "type":"structure", + "members":{ + "ErrorCode":{ + "shape":"NameString", + "documentation":"

The code associated with this error.

" + }, + "ErrorMessage":{ + "shape":"DescriptionString", + "documentation":"

A message describing the error.

" + } + }, + "documentation":"

Contains details about an error.

" + }, + "ErrorString":{"type":"string"}, + "ExecutionProperty":{ + "type":"structure", + "members":{ + "MaxConcurrentRuns":{ + "shape":"MaxConcurrentRuns", + "documentation":"

The maximum number of concurrent runs allowed for a job.

" + } + }, + "documentation":"

An execution property of a job.

" + }, + "FieldType":{"type":"string"}, + "FilterString":{ + "type":"string", + "max":2048, + "min":0, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + }, + "FormatString":{ + "type":"string", + "max":128, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + }, + "GenericMap":{ + "type":"map", + "key":{"shape":"GenericString"}, + "value":{"shape":"GenericString"} + }, + "GenericString":{"type":"string"}, + "GetCatalogImportStatusRequest":{ + "type":"structure", + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the catalog to migrate. Currently, this should be the AWS account ID.

" + } + } + }, + "GetCatalogImportStatusResponse":{ + "type":"structure", + "members":{ + "ImportStatus":{ + "shape":"CatalogImportStatus", + "documentation":"

The status of the specified catalog migration.

" + } + } + }, + "GetClassifierRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

Name of the Classifier to retrieve.

" + } + } + }, + "GetClassifierResponse":{ + "type":"structure", + "members":{ + "Classifier":{ + "shape":"Classifier", + "documentation":"

The requested Classifier.

" + } + } + }, + "GetClassifiersRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"PageSize", + "documentation":"

Size of the list to return (optional).

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

An optional continuation token.

" + } + } + }, + "GetClassifiersResponse":{ + "type":"structure", + "members":{ + "Classifiers":{ + "shape":"ClassifierList", + "documentation":"

The requested list of Classifier objects.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A continuation token.

" + } + } + }, + "GetConnectionRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog in which the connection resides. If none is supplied, the AWS account ID is used by default.

" + }, + "Name":{ + "shape":"NameString", + "documentation":"

The name of the connection definition to retrieve.

" + } + } + }, + "GetConnectionResponse":{ + "type":"structure", + "members":{ + "Connection":{ + "shape":"Connection", + "documentation":"

The requested connection definition.

" + } + } + }, + "GetConnectionsFilter":{ + "type":"structure", + "members":{ + "MatchCriteria":{ + "shape":"MatchCriteria", + "documentation":"

A criteria string that must match the criteria recorded in the connection definition for that connection definition to be returned.

" + }, + "ConnectionType":{ + "shape":"ConnectionType", + "documentation":"

The type of connections to return.

" + } + }, + "documentation":"

Filters the connection definitions returned by the GetConnections API.

" + }, + "GetConnectionsRequest":{ + "type":"structure", + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog in which the connections reside. If none is supplied, the AWS account ID is used by default.

" + }, + "Filter":{ + "shape":"GetConnectionsFilter", + "documentation":"

A filter that controls which connections will be returned.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A continuation token, if this is a continuation call.

" + }, + "MaxResults":{ + "shape":"PageSize", + "documentation":"

The maximum number of connections to return in one response.

" + } + } + }, + "GetConnectionsResponse":{ + "type":"structure", + "members":{ + "ConnectionList":{ + "shape":"ConnectionList", + "documentation":"

A list of requested connection definitions.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A continuation token, if the list of connections returned does not include the last of the filtered connections.

" + } + } + }, + "GetCrawlerMetricsRequest":{ + "type":"structure", + "members":{ + "CrawlerNameList":{ + "shape":"CrawlerNameList", + "documentation":"

A list of the names of crawlers about which to retrieve metrics.

" + }, + "MaxResults":{ + "shape":"PageSize", + "documentation":"

The maximum size of a list to return.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A continuation token, if this is a continuation call.

" + } + } + }, + "GetCrawlerMetricsResponse":{ + "type":"structure", + "members":{ + "CrawlerMetricsList":{ + "shape":"CrawlerMetricsList", + "documentation":"

A list of metrics for the specified crawler.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A continuation token, if the returned list does not contain the last metric available.

" + } + } + }, + "GetCrawlerRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

Name of the Crawler to retrieve metadata for.

" + } + } + }, + "GetCrawlerResponse":{ + "type":"structure", + "members":{ + "Crawler":{ + "shape":"Crawler", + "documentation":"

The metadata for the specified Crawler.

" + } + } + }, + "GetCrawlersRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"PageSize", + "documentation":"

The number of Crawlers to return on each call.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A continuation token, if this is a continuation request.

" + } + } + }, + "GetCrawlersResponse":{ + "type":"structure", + "members":{ + "Crawlers":{ + "shape":"CrawlerList", + "documentation":"

A list of Crawler metadata.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A continuation token, if the returned list has not reached the end of those defined in this customer account.

" + } + } + }, + "GetDatabaseRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog in which the database resides. If none is supplied, the AWS account ID is used by default.

" + }, + "Name":{ + "shape":"NameString", + "documentation":"

The name of the database to retrieve.

" + } + } + }, + "GetDatabaseResponse":{ + "type":"structure", + "members":{ + "Database":{ + "shape":"Database", + "documentation":"

The definition of the specified database in the catalog.

" + } + } + }, + "GetDatabasesRequest":{ + "type":"structure", + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog from which to retrieve Databases. If none is supplied, the AWS account ID is used by default.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A continuation token, if this is a continuation call.

" + }, + "MaxResults":{ + "shape":"PageSize", + "documentation":"

The maximum number of databases to return in one response.

" + } + } + }, + "GetDatabasesResponse":{ + "type":"structure", + "required":["DatabaseList"], + "members":{ + "DatabaseList":{ + "shape":"DatabaseList", + "documentation":"

A list of Database objects from the specified catalog.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A continuation token for paginating the returned list of tokens, returned if the current segment of the list is not the last.

" + } + } + }, + "GetDataflowGraphRequest":{ + "type":"structure", + "members":{ + "PythonScript":{ + "shape":"PythonScript", + "documentation":"

The Python script to transform.

" + } + } + }, + "GetDataflowGraphResponse":{ + "type":"structure", + "members":{ + "DagNodes":{ + "shape":"DagNodes", + "documentation":"

A list of the nodes in the resulting DAG.

" + }, + "DagEdges":{ + "shape":"DagEdges", + "documentation":"

A list of the edges in the resulting DAG.

" + } + } + }, + "GetDevEndpointRequest":{ + "type":"structure", + "required":["EndpointName"], + "members":{ + "EndpointName":{ + "shape":"GenericString", + "documentation":"

Name of the DevEndpoint for which to retrieve information.

" + } + } + }, + "GetDevEndpointResponse":{ + "type":"structure", + "members":{ + "DevEndpoint":{ + "shape":"DevEndpoint", + "documentation":"

A DevEndpoint definition.

" + } + } + }, + "GetDevEndpointsRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"PageSize", + "documentation":"

The maximum size of information to return.

" + }, + "NextToken":{ + "shape":"GenericString", + "documentation":"

A continuation token, if this is a continuation call.

" + } + } + }, + "GetDevEndpointsResponse":{ + "type":"structure", + "members":{ + "DevEndpoints":{ + "shape":"DevEndpointList", + "documentation":"

A list of DevEndpoint definitions.

" + }, + "NextToken":{ + "shape":"GenericString", + "documentation":"

A continuation token, if not all DevEndpoint definitions have yet been returned.

" + } + } + }, + "GetJobRequest":{ + "type":"structure", + "required":["JobName"], + "members":{ + "JobName":{ + "shape":"NameString", + "documentation":"

The name of the job to retrieve.

" + } + } + }, + "GetJobResponse":{ + "type":"structure", + "members":{ + "Job":{ + "shape":"Job", + "documentation":"

The requested job definition.

" + } + } + }, + "GetJobRunRequest":{ + "type":"structure", + "required":[ + "JobName", + "RunId" + ], + "members":{ + "JobName":{ + "shape":"NameString", + "documentation":"

Name of the job being run.

" + }, + "RunId":{ + "shape":"IdString", + "documentation":"

The ID of the job run.

" + }, + "PredecessorsIncluded":{ + "shape":"BooleanValue", + "documentation":"

A list of the predecessor runs to return as well.

" + } + } + }, + "GetJobRunResponse":{ + "type":"structure", + "members":{ + "JobRun":{ + "shape":"JobRun", + "documentation":"

The requested job-run metadata.

" + } + } + }, + "GetJobRunsRequest":{ + "type":"structure", + "required":["JobName"], + "members":{ + "JobName":{ + "shape":"NameString", + "documentation":"

The name of the job for which to retrieve all job runs.

" + }, + "NextToken":{ + "shape":"GenericString", + "documentation":"

A continuation token, if this is a continuation call.

" + }, + "MaxResults":{ + "shape":"PageSize", + "documentation":"

The maximum size of the response.

" + } + } + }, + "GetJobRunsResponse":{ + "type":"structure", + "members":{ + "JobRuns":{ + "shape":"JobRunList", + "documentation":"

A list of job-run metatdata objects.

" + }, + "NextToken":{ + "shape":"GenericString", + "documentation":"

A continuation token, if not all reequested job runs have been returned.

" + } + } + }, + "GetJobsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"GenericString", + "documentation":"

A continuation token, if this is a continuation call.

" + }, + "MaxResults":{ + "shape":"PageSize", + "documentation":"

The maximum size of the response.

" + } + } + }, + "GetJobsResponse":{ + "type":"structure", + "members":{ + "Jobs":{ + "shape":"JobList", + "documentation":"

A list of jobs.

" + }, + "NextToken":{ + "shape":"GenericString", + "documentation":"

A continuation token, if not all jobs have yet been returned.

" + } + } + }, + "GetMappingRequest":{ + "type":"structure", + "required":["Source"], + "members":{ + "Source":{ + "shape":"CatalogEntry", + "documentation":"

Specifies the source table.

" + }, + "Sinks":{ + "shape":"CatalogEntries", + "documentation":"

A list of target tables.

" + }, + "Location":{ + "shape":"Location", + "documentation":"

Parameters for the mapping.

" + } + } + }, + "GetMappingResponse":{ + "type":"structure", + "required":["Mapping"], + "members":{ + "Mapping":{ + "shape":"MappingList", + "documentation":"

A list of mappings to the specified targets.

" + } + } + }, + "GetPartitionRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName", + "PartitionValues" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog where the partition in question resides. If none is supplied, the AWS account ID is used by default.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the catalog database where the partition resides.

" + }, + "TableName":{ + "shape":"NameString", + "documentation":"

The name of the partition's table.

" + }, + "PartitionValues":{ + "shape":"ValueStringList", + "documentation":"

The values that define the partition.

" + } + } + }, + "GetPartitionResponse":{ + "type":"structure", + "members":{ + "Partition":{ + "shape":"Partition", + "documentation":"

The requested information, in the form of a Partition object.

" + } + } + }, + "GetPartitionsRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog where the partitions in question reside. If none is supplied, the AWS account ID is used by default.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the catalog database where the partitions reside.

" + }, + "TableName":{ + "shape":"NameString", + "documentation":"

The name of the partitions' table.

" + }, + "Expression":{ + "shape":"PredicateString", + "documentation":"

An expression filtering the partitions to be returned.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A continuation token, if this is not the first call to retrieve these partitions.

" + }, + "Segment":{ + "shape":"Segment", + "documentation":"

The segment of the table's partitions to scan in this request.

" + }, + "MaxResults":{ + "shape":"PageSize", + "documentation":"

The maximum number of partitions to return in a single response.

" + } + } + }, + "GetPartitionsResponse":{ + "type":"structure", + "members":{ + "Partitions":{ + "shape":"PartitionList", + "documentation":"

A list of requested partitions.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A continuation token, if the returned list of partitions does not does not include the last one.

" + } + } + }, + "GetPlanRequest":{ + "type":"structure", + "required":[ + "Mapping", + "Source" + ], + "members":{ + "Mapping":{ + "shape":"MappingList", + "documentation":"

The list of mappings from a source table to target tables.

" + }, + "Source":{ + "shape":"CatalogEntry", + "documentation":"

The source table.

" + }, + "Sinks":{ + "shape":"CatalogEntries", + "documentation":"

The target tables.

" + }, + "Location":{ + "shape":"Location", + "documentation":"

Parameters for the mapping.

" + } + } + }, + "GetPlanResponse":{ + "type":"structure", + "members":{ + "PythonScript":{ + "shape":"PythonScript", + "documentation":"

A python script to perform the mapping.

" + } + } + }, + "GetTableRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "Name" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog where the table resides. If none is supplied, the AWS account ID is used by default.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the database in the catalog in which the table resides.

" + }, + "Name":{ + "shape":"NameString", + "documentation":"

The name of the table for which to retrieve the definition.

" + } + } + }, + "GetTableResponse":{ + "type":"structure", + "members":{ + "Table":{ + "shape":"Table", + "documentation":"

The Table object that defines the specified table.

" + } + } + }, + "GetTableVersionsList":{ + "type":"list", + "member":{"shape":"TableVersion"} + }, + "GetTableVersionsRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog where the tables reside. If none is supplied, the AWS account ID is used by default.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The database in the catalog in which the table resides.

" + }, + "TableName":{ + "shape":"NameString", + "documentation":"

The name of the table.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A continuation token, if this is not the first call.

" + }, + "MaxResults":{ + "shape":"PageSize", + "documentation":"

The maximum number of table versions to return in one response.

" + } + } + }, + "GetTableVersionsResponse":{ + "type":"structure", + "members":{ + "TableVersions":{ + "shape":"GetTableVersionsList", + "documentation":"

A list of strings identifying available versions of the specified table.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A continuation token, if the list of available versions does not include the last one.

" + } + } + }, + "GetTablesRequest":{ + "type":"structure", + "required":["DatabaseName"], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog where the tables reside. If none is supplied, the AWS account ID is used by default.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The database in the catalog whose tables to list.

" + }, + "Expression":{ + "shape":"FilterString", + "documentation":"

A regular expression pattern. If present, only those tables whose names match the pattern are returned.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A continuation token, included if this is a continuation call.

" + }, + "MaxResults":{ + "shape":"PageSize", + "documentation":"

The maximum number of tables to return in a single response.

" + } + } + }, + "GetTablesResponse":{ + "type":"structure", + "members":{ + "TableList":{ + "shape":"TableList", + "documentation":"

A list of the requested Table objects.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A continuation token, present if the current list segment is not the last.

" + } + } + }, + "GetTriggerRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name of the trigger to retrieve.

" + } + } + }, + "GetTriggerResponse":{ + "type":"structure", + "members":{ + "Trigger":{ + "shape":"Trigger", + "documentation":"

The requested trigger definition.

" + } + } + }, + "GetTriggersRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"GenericString", + "documentation":"

A continuation token, if this is a continuation call.

" + }, + "DependentJobName":{ + "shape":"NameString", + "documentation":"

The name of the job for which to retrieve triggers.

" + }, + "MaxResults":{ + "shape":"PageSize", + "documentation":"

The maximum size of the response.

" + } + } + }, + "GetTriggersResponse":{ + "type":"structure", + "members":{ + "Triggers":{ + "shape":"TriggerList", + "documentation":"

A list of triggers for the specified job.

" + }, + "NextToken":{ + "shape":"GenericString", + "documentation":"

A continuation token, if not all the requested triggers have yet been returned.

" + } + } + }, + "GetUserDefinedFunctionRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "FunctionName" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog where the function to be retrieved is located. If none is supplied, the AWS account ID is used by default.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the catalog database where the function is located.

" + }, + "FunctionName":{ + "shape":"NameString", + "documentation":"

The name of the function.

" + } + } + }, + "GetUserDefinedFunctionResponse":{ + "type":"structure", + "members":{ + "UserDefinedFunction":{ + "shape":"UserDefinedFunction", + "documentation":"

The requested function definition.

" + } + } + }, + "GetUserDefinedFunctionsRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "Pattern" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog where the functions to be retrieved are located. If none is supplied, the AWS account ID is used by default.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the catalog database where the functions are located.

" + }, + "Pattern":{ + "shape":"NameString", + "documentation":"

An optional function-name pattern string that filters the function definitions returned.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A continuation token, if this is a continuation call.

" + }, + "MaxResults":{ + "shape":"PageSize", + "documentation":"

The maximum number of functions to return in one response.

" + } + } + }, + "GetUserDefinedFunctionsResponse":{ + "type":"structure", + "members":{ + "UserDefinedFunctions":{ + "shape":"UserDefinedFunctionList", + "documentation":"

A list of requested function definitions.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A continuation token, if the list of functions returned does not include the last requested function.

" + } + } + }, + "GrokClassifier":{ + "type":"structure", + "required":[ + "Name", + "Classification", + "GrokPattern" + ], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name of the classifier.

" + }, + "Classification":{ + "shape":"Classification", + "documentation":"

The data form that the classifier matches, such as Twitter, JSON, Omniture Logs, and so forth.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The time this classifier was registered.

" + }, + "LastUpdated":{ + "shape":"Timestamp", + "documentation":"

The time this classifier was last updated.

" + }, + "Version":{ + "shape":"VersionId", + "documentation":"

The version of this classifier.

" + }, + "GrokPattern":{ + "shape":"GrokPattern", + "documentation":"

The grok pattern used by this classifier.

" + }, + "CustomPatterns":{ + "shape":"CustomPatterns", + "documentation":"

Custom grok patterns used by this classifier.

" + } + }, + "documentation":"

A classifier that uses grok.

" + }, + "GrokPattern":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\t]*" + }, + "IdString":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + }, + "IdempotentParameterMismatchException":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"MessageString", + "documentation":"

A message describing the problem.

" + } + }, + "documentation":"

The same unique identifier was associated with two different records.

", + "exception":true + }, + "ImportCatalogToGlueRequest":{ + "type":"structure", + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the catalog to import. Currently, this should be the AWS account ID.

" + } + } + }, + "ImportCatalogToGlueResponse":{ + "type":"structure", + "members":{ + } + }, + "Integer":{"type":"integer"}, + "IntegerFlag":{ + "type":"integer", + "max":1, + "min":0 + }, + "IntegerValue":{"type":"integer"}, + "InternalServiceException":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"MessageString", + "documentation":"

A message describing the problem.

" + } + }, + "documentation":"

An internal service error occurred.

", + "exception":true, + "fault":true + }, + "InvalidInputException":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"MessageString", + "documentation":"

A message describing the problem.

" + } + }, + "documentation":"

The input provided was not valid.

", + "exception":true + }, + "JdbcTarget":{ + "type":"structure", + "members":{ + "ConnectionName":{ + "shape":"ConnectionName", + "documentation":"

The name of the connection to use for the JDBC target.

" + }, + "Path":{ + "shape":"Path", + "documentation":"

The path of the JDBC target.

" + }, + "Exclusions":{ + "shape":"PathList", + "documentation":"

A list of items to exclude from the crawl.

" + } + }, + "documentation":"

Specifies a JDBC target for a crawl.

" + }, + "JdbcTargetList":{ + "type":"list", + "member":{"shape":"JdbcTarget"} + }, + "Job":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name you assign to this job.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

Description of this job.

" + }, + "LogUri":{ + "shape":"UriString", + "documentation":"

Location of the logs for this job.

" + }, + "Role":{ + "shape":"RoleString", + "documentation":"

The role associated with this job.

" + }, + "CreatedOn":{ + "shape":"TimestampValue", + "documentation":"

The time and date that this job specification was created.

" + }, + "LastModifiedOn":{ + "shape":"TimestampValue", + "documentation":"

The last point in time when this job specification was modified.

" + }, + "ExecutionProperty":{ + "shape":"ExecutionProperty", + "documentation":"

An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job.

" + }, + "Command":{ + "shape":"JobCommand", + "documentation":"

The JobCommand that executes this job.

" + }, + "DefaultArguments":{ + "shape":"GenericMap", + "documentation":"

The default parameters for this job.

" + }, + "Connections":{ + "shape":"ConnectionsList", + "documentation":"

The connections used for this job.

" + }, + "MaxRetries":{ + "shape":"MaxRetries", + "documentation":"

The maximum number of times to retry this job if it fails.

" + }, + "AllocatedCapacity":{ + "shape":"IntegerValue", + "documentation":"

The number of capacity units allocated to this job.

" + } + }, + "documentation":"

Specifies a job in the Data Catalog.

" + }, + "JobBookmarkEntry":{ + "type":"structure", + "members":{ + "JobName":{ + "shape":"JobName", + "documentation":"

Name of the job in question.

" + }, + "Version":{ + "shape":"IntegerValue", + "documentation":"

Version of the job.

" + }, + "Run":{ + "shape":"IntegerValue", + "documentation":"

The run ID number.

" + }, + "Attempt":{ + "shape":"IntegerValue", + "documentation":"

The attempt ID number.

" + }, + "JobBookmark":{ + "shape":"JsonValue", + "documentation":"

The bookmark itself.

" + } + }, + "documentation":"

Defines a point which a job can resume processing.

" + }, + "JobCommand":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"GenericString", + "documentation":"

The name of this job command.

" + }, + "ScriptLocation":{ + "shape":"ScriptLocationString", + "documentation":"

Specifies the location of a script that executes a job.

" + } + }, + "documentation":"

Specifies code that executes a job.

" + }, + "JobList":{ + "type":"list", + "member":{"shape":"Job"} + }, + "JobName":{"type":"string"}, + "JobRun":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"IdString", + "documentation":"

The ID of this job run.

" + }, + "Attempt":{ + "shape":"AttemptCount", + "documentation":"

The number or the attempt to run this job.

" + }, + "PreviousRunId":{ + "shape":"IdString", + "documentation":"

The ID of the previous run of this job.

" + }, + "TriggerName":{ + "shape":"NameString", + "documentation":"

The name of the trigger for this job run.

" + }, + "JobName":{ + "shape":"NameString", + "documentation":"

The name of the job being run.

" + }, + "StartedOn":{ + "shape":"TimestampValue", + "documentation":"

The date and time at which this job run was started.

" + }, + "LastModifiedOn":{ + "shape":"TimestampValue", + "documentation":"

The last time this job run was modified.

" + }, + "CompletedOn":{ + "shape":"TimestampValue", + "documentation":"

The date and time this job run completed.

" + }, + "JobRunState":{ + "shape":"JobRunState", + "documentation":"

The current state of the job run.

" + }, + "Arguments":{ + "shape":"GenericMap", + "documentation":"

The job arguments associated with this run.

" + }, + "ErrorMessage":{ + "shape":"ErrorString", + "documentation":"

An error message associated with this job run.

" + }, + "PredecessorRuns":{ + "shape":"PredecessorList", + "documentation":"

A list of predecessors to this job run.

" + }, + "AllocatedCapacity":{ + "shape":"IntegerValue", + "documentation":"

The amount of infrastructure capacity allocated to this job run.

" + } + }, + "documentation":"

Contains information about a job run.

" + }, + "JobRunList":{ + "type":"list", + "member":{"shape":"JobRun"} + }, + "JobRunState":{ + "type":"string", + "enum":[ + "STARTING", + "RUNNING", + "STOPPING", + "STOPPED", + "SUCCEEDED", + "FAILED" + ] + }, + "JobUpdate":{ + "type":"structure", + "members":{ + "Description":{ + "shape":"DescriptionString", + "documentation":"

Description of the job.

" + }, + "LogUri":{ + "shape":"UriString", + "documentation":"

Location of the logs for this job.

" + }, + "Role":{ + "shape":"RoleString", + "documentation":"

The role associated with this job.

" + }, + "ExecutionProperty":{ + "shape":"ExecutionProperty", + "documentation":"

An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job.

" + }, + "Command":{ + "shape":"JobCommand", + "documentation":"

The JobCommand that executes this job.

" + }, + "DefaultArguments":{ + "shape":"GenericMap", + "documentation":"

The default parameters for this job.

" + }, + "Connections":{ + "shape":"ConnectionsList", + "documentation":"

The connections used for this job.

" + }, + "MaxRetries":{ + "shape":"MaxRetries", + "documentation":"

The maximum number of times to retry this job if it fails.

" + }, + "AllocatedCapacity":{ + "shape":"IntegerValue", + "documentation":"

The number of capacity units allocated to this job.

" + } + }, + "documentation":"

Specifies information used to update an existing job.

" + }, + "JsonValue":{"type":"string"}, + "KeyString":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + }, + "LastCrawlInfo":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"LastCrawlStatus", + "documentation":"

Status of the last crawl.

" + }, + "ErrorMessage":{ + "shape":"DescriptionString", + "documentation":"

Error information about the last crawl, if an error occurred.

" + }, + "LogGroup":{ + "shape":"LogGroup", + "documentation":"

The log group for the last crawl.

" + }, + "LogStream":{ + "shape":"LogStream", + "documentation":"

The log stream for the last crawl.

" + }, + "MessagePrefix":{ + "shape":"MessagePrefix", + "documentation":"

The prefix for a message about this crawl.

" + }, + "StartTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the crawl started.

" + } + }, + "documentation":"

Status and error information about the most recent crawl.

" + }, + "LastCrawlStatus":{ + "type":"string", + "enum":[ + "SUCCEEDED", + "CANCELLED", + "FAILED" + ] + }, + "Location":{ + "type":"structure", + "members":{ + "Jdbc":{ + "shape":"CodeGenNodeArgs", + "documentation":"

A JDBC location.

" + }, + "S3":{ + "shape":"CodeGenNodeArgs", + "documentation":"

An AWS S3 location.

" + } + }, + "documentation":"

The location of resources.

" + }, + "LocationMap":{ + "type":"map", + "key":{"shape":"ColumnValuesString"}, + "value":{"shape":"ColumnValuesString"} + }, + "LocationString":{ + "type":"string", + "max":2056, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "LogGroup":{ + "type":"string", + "max":512, + "min":1, + "pattern":"[\\.\\-_/#A-Za-z0-9]+" + }, + "LogStream":{ + "type":"string", + "max":512, + "min":1, + "pattern":"[^:*]*" + }, + "Logical":{ + "type":"string", + "enum":["AND"] + }, + "LogicalOperator":{ + "type":"string", + "enum":["EQUALS"] + }, + "MappingEntry":{ + "type":"structure", + "members":{ + "SourceTable":{ + "shape":"TableName", + "documentation":"

The name of the source table.

" + }, + "SourcePath":{ + "shape":"SchemaPathString", + "documentation":"

The source path.

" + }, + "SourceType":{ + "shape":"FieldType", + "documentation":"

The source type.

" + }, + "TargetTable":{ + "shape":"TableName", + "documentation":"

The target table.

" + }, + "TargetPath":{ + "shape":"SchemaPathString", + "documentation":"

The target path.

" + }, + "TargetType":{ + "shape":"FieldType", + "documentation":"

The target type.

" + } + }, + "documentation":"

Defines a mapping.

" + }, + "MappingList":{ + "type":"list", + "member":{"shape":"MappingEntry"} + }, + "MatchCriteria":{ + "type":"list", + "member":{"shape":"NameString"}, + "max":10, + "min":0 + }, + "MaxConcurrentRuns":{"type":"integer"}, + "MaxRetries":{"type":"integer"}, + "MessagePrefix":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + }, + "MessageString":{"type":"string"}, + "MillisecondsCount":{"type":"long"}, + "NameString":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + }, + "NameStringList":{ + "type":"list", + "member":{"shape":"NameString"} + }, + "NoScheduleException":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"MessageString", + "documentation":"

A message describing the problem.

" + } + }, + "documentation":"

There is no applicable schedule.

", + "exception":true + }, + "NonNegativeDouble":{ + "type":"double", + "min":0.0 + }, + "NonNegativeInteger":{ + "type":"integer", + "min":0 + }, + "OperationTimeoutException":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"MessageString", + "documentation":"

A message describing the problem.

" + } + }, + "documentation":"

The operation timed out.

", + "exception":true + }, + "Order":{ + "type":"structure", + "required":[ + "Column", + "SortOrder" + ], + "members":{ + "Column":{ + "shape":"NameString", + "documentation":"

The name of the column.

" + }, + "SortOrder":{ + "shape":"IntegerFlag", + "documentation":"

Indicates that the column is sorted in ascending order (== 1), or in descending order (==0).

" + } + }, + "documentation":"

Specifies the sort order of a sorted column.

" + }, + "OrderList":{ + "type":"list", + "member":{"shape":"Order"} + }, + "PageSize":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "ParametersMap":{ + "type":"map", + "key":{"shape":"KeyString"}, + "value":{"shape":"ParametersMapValue"} + }, + "ParametersMapValue":{ + "type":"string", + "max":51200 + }, + "Partition":{ + "type":"structure", + "members":{ + "Values":{ + "shape":"ValueStringList", + "documentation":"

The values of the partition.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the catalog database where the table in question is located.

" + }, + "TableName":{ + "shape":"NameString", + "documentation":"

The name of the table in question.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the partition was created.

" + }, + "LastAccessTime":{ + "shape":"Timestamp", + "documentation":"

The last time at which the partition was accessed.

" + }, + "StorageDescriptor":{ + "shape":"StorageDescriptor", + "documentation":"

Provides information about the physical location where the partition is stored.

" + }, + "Parameters":{ + "shape":"ParametersMap", + "documentation":"

Partition parameters, in the form of a list of key-value pairs.

" + }, + "LastAnalyzedTime":{ + "shape":"Timestamp", + "documentation":"

The last time at which column statistics were computed for this partition.

" + } + }, + "documentation":"

Represents a slice of table data.

" + }, + "PartitionError":{ + "type":"structure", + "members":{ + "PartitionValues":{ + "shape":"ValueStringList", + "documentation":"

The values that define the partition.

" + }, + "ErrorDetail":{ + "shape":"ErrorDetail", + "documentation":"

Details about the partition error.

" + } + }, + "documentation":"

Contains information about a partition error.

" + }, + "PartitionErrors":{ + "type":"list", + "member":{"shape":"PartitionError"} + }, + "PartitionInput":{ + "type":"structure", + "members":{ + "Values":{ + "shape":"ValueStringList", + "documentation":"

The values of the partition.

" + }, + "LastAccessTime":{ + "shape":"Timestamp", + "documentation":"

The last time at which the partition was accessed.

" + }, + "StorageDescriptor":{ + "shape":"StorageDescriptor", + "documentation":"

Provides information about the physical location where the partition is stored.

" + }, + "Parameters":{ + "shape":"ParametersMap", + "documentation":"

Partition parameters, in the form of a list of key-value pairs.

" + }, + "LastAnalyzedTime":{ + "shape":"Timestamp", + "documentation":"

The last time at which column statistics were computed for this partition.

" + } + }, + "documentation":"

The structure used to create and update a partion.

" + }, + "PartitionInputList":{ + "type":"list", + "member":{"shape":"PartitionInput"}, + "max":100, + "min":0 + }, + "PartitionList":{ + "type":"list", + "member":{"shape":"Partition"} + }, + "PartitionValueList":{ + "type":"structure", + "required":["Values"], + "members":{ + "Values":{"shape":"ValueStringList"} + } + }, + "Path":{"type":"string"}, + "PathList":{ + "type":"list", + "member":{"shape":"Path"} + }, + "PhysicalConnectionRequirements":{ + "type":"structure", + "members":{ + "SubnetId":{ + "shape":"NameString", + "documentation":"

The subnet ID used by the connection.

" + }, + "SecurityGroupIdList":{ + "shape":"SecurityGroupIdList", + "documentation":"

The security group ID list used by the connection.

" + }, + "AvailabilityZone":{ + "shape":"NameString", + "documentation":"

The connection's availability zone.

" + } + }, + "documentation":"

Specifies the physical requirements for a connection.

" + }, + "Predecessor":{ + "type":"structure", + "members":{ + "JobName":{ + "shape":"NameString", + "documentation":"

The name of the predecessor job.

" + }, + "RunId":{ + "shape":"IdString", + "documentation":"

The job-run ID of the precessor job run.

" + } + }, + "documentation":"

A job run that preceded this one.

" + }, + "PredecessorList":{ + "type":"list", + "member":{"shape":"Predecessor"} + }, + "Predicate":{ + "type":"structure", + "members":{ + "Logical":{ + "shape":"Logical", + "documentation":"

Currently \"OR\" is not supported.

" + }, + "Conditions":{ + "shape":"ConditionList", + "documentation":"

A list of the conditions that determine when the trigger will fire.

" + } + }, + "documentation":"

Defines the predicate of the trigger, which determines when it fires.

" + }, + "PredicateString":{ + "type":"string", + "max":2048, + "min":0, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "PrincipalType":{ + "type":"string", + "enum":[ + "USER", + "ROLE", + "GROUP" + ] + }, + "PythonScript":{"type":"string"}, + "ResetJobBookmarkRequest":{ + "type":"structure", + "required":["JobName"], + "members":{ + "JobName":{ + "shape":"JobName", + "documentation":"

The name of the job in question.

" + } + } + }, + "ResetJobBookmarkResponse":{ + "type":"structure", + "members":{ + "JobBookmarkEntry":{ + "shape":"JobBookmarkEntry", + "documentation":"

The reset bookmark entry.

" + } + } + }, + "ResourceNumberLimitExceededException":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"MessageString", + "documentation":"

A message describing the problem.

" + } + }, + "documentation":"

A resource numerical limit was exceeded.

", + "exception":true + }, + "ResourceType":{ + "type":"string", + "enum":[ + "JAR", + "FILE", + "ARCHIVE" + ] + }, + "ResourceUri":{ + "type":"structure", + "members":{ + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

The type of the resource.

" + }, + "Uri":{ + "shape":"URI", + "documentation":"

The URI for accessing the resource.

" + } + }, + "documentation":"

URIs for function resources.

" + }, + "ResourceUriList":{ + "type":"list", + "member":{"shape":"ResourceUri"}, + "max":1000, + "min":0 + }, + "RoleArn":{ + "type":"string", + "pattern":"arn:aws:iam::\\d{12}:role/.*" + }, + "RoleString":{"type":"string"}, + "S3Target":{ + "type":"structure", + "members":{ + "Path":{ + "shape":"Path", + "documentation":"

The path to the S3 target.

" + }, + "Exclusions":{ + "shape":"PathList", + "documentation":"

A list of S3 objects to exclude from the crawl.

" + } + }, + "documentation":"

Specifies a crawler target in AWS S3.

" + }, + "S3TargetList":{ + "type":"list", + "member":{"shape":"S3Target"} + }, + "Schedule":{ + "type":"structure", + "members":{ + "ScheduleExpression":{ + "shape":"CronExpression", + "documentation":"

A cron expression that can be used as a Cloudwatch event to schedule something (see CloudWatch Schedule Expression Syntax. For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *).

" + }, + "State":{ + "shape":"ScheduleState", + "documentation":"

The state of the schedule.

" + } + }, + "documentation":"

A scheduling object using a cron statement to schedule an event.

" + }, + "ScheduleState":{ + "type":"string", + "enum":[ + "SCHEDULED", + "NOT_SCHEDULED", + "TRANSITIONING" + ] + }, + "SchedulerNotRunningException":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"MessageString", + "documentation":"

A message describing the problem.

" + } + }, + "documentation":"

The specified scheduler is not running.

", + "exception":true + }, + "SchedulerRunningException":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"MessageString", + "documentation":"

A message describing the problem.

" + } + }, + "documentation":"

The specified scheduler is already running.

", + "exception":true + }, + "SchedulerTransitioningException":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"MessageString", + "documentation":"

A message describing the problem.

" + } + }, + "documentation":"

The specified scheduler is transitioning.

", + "exception":true + }, + "SchemaChangePolicy":{ + "type":"structure", + "members":{ + "UpdateBehavior":{ + "shape":"UpdateBehavior", + "documentation":"

The update behavior.

" + }, + "DeleteBehavior":{ + "shape":"DeleteBehavior", + "documentation":"

The deletion behavior.

" + } + }, + "documentation":"

Crawler policy for update and deletion behavior.

" + }, + "SchemaPathString":{"type":"string"}, + "ScriptLocationString":{"type":"string"}, + "SecurityGroupIdList":{ + "type":"list", + "member":{"shape":"NameString"}, + "max":50, + "min":0 + }, + "Segment":{ + "type":"structure", + "required":[ + "SegmentNumber", + "TotalSegments" + ], + "members":{ + "SegmentNumber":{ + "shape":"NonNegativeInteger", + "documentation":"

The zero-based index number of the this segment. For example, if the total number of segments is 4, SegmentNumber values will range from zero through three.

" + }, + "TotalSegments":{ + "shape":"TotalSegmentsInteger", + "documentation":"

The total numer of segments.

" + } + }, + "documentation":"

Defines a non-overlapping region of a table's partitions, allowing multiple requests to be executed in parallel.

" + }, + "SerDeInfo":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

Name of the SerDe.

" + }, + "SerializationLibrary":{ + "shape":"NameString", + "documentation":"

Usually the class that implements the SerDe. An example is: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe.

" + }, + "Parameters":{ + "shape":"ParametersMap", + "documentation":"

A list of initialization parameters for the SerDe, in key-value form.

" + } + }, + "documentation":"

Information about a serialization/deserialization program (SerDe) which serves as an extractor and loader.

" + }, + "SkewedInfo":{ + "type":"structure", + "members":{ + "SkewedColumnNames":{ + "shape":"NameStringList", + "documentation":"

A list of names of columns that contain skewed values.

" + }, + "SkewedColumnValues":{ + "shape":"ColumnValueStringList", + "documentation":"

A list of values that appear so frequently as to be considered skewed.

" + }, + "SkewedColumnValueLocationMaps":{ + "shape":"LocationMap", + "documentation":"

A mapping of skewed values to the columns that contain them.

" + } + }, + "documentation":"

Specifies skewed values in a table. Skewed are ones that occur with very high frequency.

" + }, + "StartCrawlerRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

Name of the Crawler to start.

" + } + } + }, + "StartCrawlerResponse":{ + "type":"structure", + "members":{ + } + }, + "StartCrawlerScheduleRequest":{ + "type":"structure", + "required":["CrawlerName"], + "members":{ + "CrawlerName":{ + "shape":"NameString", + "documentation":"

Name of the crawler to schedule.

" + } + } + }, + "StartCrawlerScheduleResponse":{ + "type":"structure", + "members":{ + } + }, + "StartJobRunRequest":{ + "type":"structure", + "required":["JobName"], + "members":{ + "JobName":{ + "shape":"NameString", + "documentation":"

The name of the job to start.

" + }, + "JobRunId":{ + "shape":"IdString", + "documentation":"

The ID of the job run to start.

" + }, + "Arguments":{ + "shape":"GenericMap", + "documentation":"

Specific arguments for this job run.

" + }, + "AllocatedCapacity":{ + "shape":"IntegerValue", + "documentation":"

The infrastructure capacity to allocate to this job.

" + } + } + }, + "StartJobRunResponse":{ + "type":"structure", + "members":{ + "JobRunId":{ + "shape":"IdString", + "documentation":"

The ID assigned to this job run.

" + } + } + }, + "StartTriggerRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name of the trigger to start.

" + } + } + }, + "StartTriggerResponse":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name of the trigger that was started.

" + } + } + }, + "StopCrawlerRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

Name of the Crawler to stop.

" + } + } + }, + "StopCrawlerResponse":{ + "type":"structure", + "members":{ + } + }, + "StopCrawlerScheduleRequest":{ + "type":"structure", + "required":["CrawlerName"], + "members":{ + "CrawlerName":{ + "shape":"NameString", + "documentation":"

Name of the crawler whose schedule state to set.

" + } + } + }, + "StopCrawlerScheduleResponse":{ + "type":"structure", + "members":{ + } + }, + "StopTriggerRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name of the trigger to stop.

" + } + } + }, + "StopTriggerResponse":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name of the trigger that was stopped.

" + } + } + }, + "StorageDescriptor":{ + "type":"structure", + "members":{ + "Columns":{ + "shape":"ColumnList", + "documentation":"

A list of the Columns in the table.

" + }, + "Location":{ + "shape":"LocationString", + "documentation":"

The physical location of the table. By default this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.

" + }, + "InputFormat":{ + "shape":"FormatString", + "documentation":"

The input format: SequenceFileInputFormat (binary), or TextInputFormat, or a custom format.

" + }, + "OutputFormat":{ + "shape":"FormatString", + "documentation":"

The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat, or a custom format.

" + }, + "Compressed":{ + "shape":"Boolean", + "documentation":"

True if the data in the table is compressed, or False if not.

" + }, + "NumberOfBuckets":{ + "shape":"Integer", + "documentation":"

Must be specified if the table contains any dimension columns.

" + }, + "SerdeInfo":{ + "shape":"SerDeInfo", + "documentation":"

Serialization/deserialization (SerDe) information.

" + }, + "BucketColumns":{ + "shape":"NameStringList", + "documentation":"

A list of reducer grouping columns, clustering columns, and bucketing columns in the table.

" + }, + "SortColumns":{ + "shape":"OrderList", + "documentation":"

A list specifying the sort order of each bucket in the table.

" + }, + "Parameters":{ + "shape":"ParametersMap", + "documentation":"

User-supplied properties in key-value form.

" + }, + "SkewedInfo":{ + "shape":"SkewedInfo", + "documentation":"

Information about values that appear very frequently in a column (skewed values).

" + }, + "StoredAsSubDirectories":{ + "shape":"Boolean", + "documentation":"

True if the table data is stored in subdirectories, or False if not.

" + } + }, + "documentation":"

Describes the physical storage of table data.

" + }, + "StringList":{ + "type":"list", + "member":{"shape":"GenericString"} + }, + "Table":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

Name of the table.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

Name of the metadata database where the table metadata resides.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

Description of the table.

" + }, + "Owner":{ + "shape":"NameString", + "documentation":"

Owner of the table.

" + }, + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

Time when the table definition was created in the Data Catalog.

" + }, + "UpdateTime":{ + "shape":"Timestamp", + "documentation":"

Last time the table was updated.

" + }, + "LastAccessTime":{ + "shape":"Timestamp", + "documentation":"

Last time the table was accessed. This is usually taken from HDFS, and may not be reliable.

" + }, + "LastAnalyzedTime":{ + "shape":"Timestamp", + "documentation":"

Last time column statistics were computed for this table.

" + }, + "Retention":{ + "shape":"NonNegativeInteger", + "documentation":"

Retention time for this table.

" + }, + "StorageDescriptor":{ + "shape":"StorageDescriptor", + "documentation":"

A storage descriptor containing information about the physical storage of this table.

" + }, + "PartitionKeys":{ + "shape":"ColumnList", + "documentation":"

A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.

" + }, + "ViewOriginalText":{ + "shape":"ViewTextString", + "documentation":"

If the table is a view, the original text of the view; otherwise null.

" + }, + "ViewExpandedText":{ + "shape":"ViewTextString", + "documentation":"

If the table is a view, the expanded text of the view; otherwise null.

" + }, + "TableType":{ + "shape":"TableTypeString", + "documentation":"

The type of this table (EXTERNAL_TABLE, VIRTUAL_VIEW, etc.).

" + }, + "Parameters":{ + "shape":"ParametersMap", + "documentation":"

Properties associated with this table, as a list of key-value pairs.

" + }, + "CreatedBy":{ + "shape":"NameString", + "documentation":"

Person or entity who created the table.

" + } + }, + "documentation":"

Represents a collection of related data organized in columns and rows.

" + }, + "TableError":{ + "type":"structure", + "members":{ + "TableName":{ + "shape":"NameString", + "documentation":"

Name of the table.

" + }, + "ErrorDetail":{ + "shape":"ErrorDetail", + "documentation":"

Detail about the error.

" + } + }, + "documentation":"

An error record for table operations.

" + }, + "TableErrors":{ + "type":"list", + "member":{"shape":"TableError"} + }, + "TableInput":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

Name of the table.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

Description of the table.

" + }, + "Owner":{ + "shape":"NameString", + "documentation":"

Owner of the table.

" + }, + "LastAccessTime":{ + "shape":"Timestamp", + "documentation":"

Last time the table was accessed.

" + }, + "LastAnalyzedTime":{ + "shape":"Timestamp", + "documentation":"

Last time column statistics were computed for this table.

" + }, + "Retention":{ + "shape":"NonNegativeInteger", + "documentation":"

Retention time for this table.

" + }, + "StorageDescriptor":{ + "shape":"StorageDescriptor", + "documentation":"

A storage descriptor containing information about the physical storage of this table.

" + }, + "PartitionKeys":{ + "shape":"ColumnList", + "documentation":"

A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.

" + }, + "ViewOriginalText":{ + "shape":"ViewTextString", + "documentation":"

If the table is a view, the original text of the view; otherwise null.

" + }, + "ViewExpandedText":{ + "shape":"ViewTextString", + "documentation":"

If the table is a view, the expanded text of the view; otherwise null.

" + }, + "TableType":{ + "shape":"TableTypeString", + "documentation":"

The type of this table (EXTERNAL_TABLE, VIRTUAL_VIEW, etc.).

" + }, + "Parameters":{ + "shape":"ParametersMap", + "documentation":"

Properties associated with this table, as a list of key-value pairs.

" + } + }, + "documentation":"

Structure used to create or update the table.

" + }, + "TableList":{ + "type":"list", + "member":{"shape":"Table"} + }, + "TableName":{"type":"string"}, + "TablePrefix":{ + "type":"string", + "max":128, + "min":0 + }, + "TableTypeString":{ + "type":"string", + "max":255 + }, + "TableVersion":{ + "type":"structure", + "members":{ + "Table":{"shape":"Table"}, + "VersionId":{"shape":"VersionString"} + } + }, + "Timestamp":{"type":"timestamp"}, + "TimestampValue":{"type":"timestamp"}, + "Token":{"type":"string"}, + "TotalSegmentsInteger":{ + "type":"integer", + "max":10, + "min":1 + }, + "Trigger":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

Name of the trigger.

" + }, + "Id":{ + "shape":"IdString", + "documentation":"

The trigger ID.

" + }, + "Type":{ + "shape":"TriggerType", + "documentation":"

The type of trigger that this is.

" + }, + "State":{ + "shape":"TriggerState", + "documentation":"

The current state of the trigger.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

A description of this trigger.

" + }, + "Schedule":{ + "shape":"GenericString", + "documentation":"

A cron schedule expression.

" + }, + "Actions":{ + "shape":"ActionList", + "documentation":"

The actions initiated by this trigger.

" + }, + "Predicate":{ + "shape":"Predicate", + "documentation":"

The predicate of this trigger.

" + } + }, + "documentation":"

Information about a specific trigger.

" + }, + "TriggerList":{ + "type":"list", + "member":{"shape":"Trigger"} + }, + "TriggerState":{ + "type":"string", + "enum":[ + "CREATING", + "CREATED", + "ACTIVATING", + "ACTIVATED", + "DEACTIVATING", + "DEACTIVATED", + "DELETING", + "UPDATING" + ] + }, + "TriggerType":{ + "type":"string", + "enum":[ + "SCHEDULED", + "CONDITIONAL", + "ON_DEMAND" + ] + }, + "TriggerUpdate":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name of the trigger.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

A description of this trigger.

" + }, + "Schedule":{ + "shape":"GenericString", + "documentation":"

A cron expression specifying the schedule.

" + }, + "Actions":{ + "shape":"ActionList", + "documentation":"

The actions initiated by this trigger.

" + }, + "Predicate":{ + "shape":"Predicate", + "documentation":"

The predicate of this trigger, which defines when it will fire.

" + } + }, + "documentation":"

A structure used to provide information used to updata a trigger.

" + }, + "URI":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "UpdateBehavior":{ + "type":"string", + "enum":[ + "LOG", + "UPDATE_IN_DATABASE" + ] + }, + "UpdateClassifierRequest":{ + "type":"structure", + "members":{ + "GrokClassifier":{ + "shape":"UpdateGrokClassifierRequest", + "documentation":"

A GrokClassifier object with updated fields.

" + } + } + }, + "UpdateClassifierResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateConnectionRequest":{ + "type":"structure", + "required":[ + "Name", + "ConnectionInput" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog in which the connection resides. If none is supplied, the AWS account ID is used by default.

" + }, + "Name":{ + "shape":"NameString", + "documentation":"

The name of the connection definition to update.

" + }, + "ConnectionInput":{ + "shape":"ConnectionInput", + "documentation":"

A ConnectionInput object that redefines the connection in question.

" + } + } + }, + "UpdateConnectionResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateCrawlerRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

Name of the new Crawler.

" + }, + "Role":{ + "shape":"RoleArn", + "documentation":"

The AWS ARN of the IAM role used by the new Crawler to access customer resources.

" + }, + "DatabaseName":{ + "shape":"DatabaseName", + "documentation":"

The Glue Database where results will be stored, such as: arn:aws:daylight:us-east-1::database/sometable/*.

" + }, + "Description":{ + "shape":"DescriptionStringRemovable", + "documentation":"

A description of the new Crawler.

" + }, + "Targets":{ + "shape":"CrawlerTargets", + "documentation":"

A list of collection of targets to crawl.

" + }, + "Schedule":{ + "shape":"CronExpression", + "documentation":"

A cron expression that can be used as a Cloudwatch event (see CloudWatch Schedule Expression Syntax. For example, to run every day at 12:15 UTC, specify: cron(15 12 * * ? *).

" + }, + "Classifiers":{ + "shape":"ClassifierNameList", + "documentation":"

A list of custom Classifier names that the user has registered. By default, all AWS classifiers are included in a crawl, but these custom classifiers always override the default classifiers for a given classification.

" + }, + "TablePrefix":{ + "shape":"TablePrefix", + "documentation":"

The table prefix used for catalog tables created.

" + }, + "SchemaChangePolicy":{ + "shape":"SchemaChangePolicy", + "documentation":"

Policy for the crawler's update and deletion behavior.

" + } + } + }, + "UpdateCrawlerResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateCrawlerScheduleRequest":{ + "type":"structure", + "required":["CrawlerName"], + "members":{ + "CrawlerName":{ + "shape":"NameString", + "documentation":"

Name of the crawler whose schedule to update.

" + }, + "Schedule":{ + "shape":"CronExpression", + "documentation":"

Cron expression of the updated schedule.

" + } + } + }, + "UpdateCrawlerScheduleResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateDatabaseRequest":{ + "type":"structure", + "required":[ + "Name", + "DatabaseInput" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog in which the metadata database resides. If none is supplied, the AWS account ID is used by default.

" + }, + "Name":{ + "shape":"NameString", + "documentation":"

The name of the metadata database to update in the catalog.

" + }, + "DatabaseInput":{ + "shape":"DatabaseInput", + "documentation":"

A DatabaseInput object specifying the new definition of the metadata database in the catalog.

" + } + } + }, + "UpdateDatabaseResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateDevEndpointRequest":{ + "type":"structure", + "required":["EndpointName"], + "members":{ + "EndpointName":{ + "shape":"GenericString", + "documentation":"

The name of the DevEndpoint to be updated.

" + }, + "PublicKey":{ + "shape":"GenericString", + "documentation":"

The public key for the DevEndpoint to use.

" + }, + "CustomLibraries":{ + "shape":"DevEndpointCustomLibraries", + "documentation":"

Custom Python or Java custom libraries to be loaded in the DevEndpoint.

" + } + } + }, + "UpdateDevEndpointResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateGrokClassifierRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name of the GrokClassifier.

" + }, + "Classification":{ + "shape":"Classification", + "documentation":"

The type of result that the classifier matches, such as Twitter Json, Omniture logs, Cloudwatch logs, and so forth.

" + }, + "GrokPattern":{ + "shape":"GrokPattern", + "documentation":"

The grok pattern used by this classifier.

" + }, + "CustomPatterns":{ + "shape":"CustomPatterns", + "documentation":"

Custom grok patterns used by this classifier.

" + } + }, + "documentation":"

Specifies a Grok classifier to update when passed to UpdateClassifier.

" + }, + "UpdateJobRequest":{ + "type":"structure", + "required":[ + "JobName", + "JobUpdate" + ], + "members":{ + "JobName":{ + "shape":"NameString", + "documentation":"

Name of the job definition to update.

" + }, + "JobUpdate":{ + "shape":"JobUpdate", + "documentation":"

Specifies the values with which to update the job.

" + } + } + }, + "UpdateJobResponse":{ + "type":"structure", + "members":{ + "JobName":{ + "shape":"NameString", + "documentation":"

Returns the name of the updated job.

" + } + } + }, + "UpdatePartitionRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName", + "PartitionValueList", + "PartitionInput" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog where the partition to be updated resides. If none is supplied, the AWS account ID is used by default.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the catalog database in which the table in question resides.

" + }, + "TableName":{ + "shape":"NameString", + "documentation":"

The name of the table where the partition to be updated is located.

" + }, + "PartitionValueList":{ + "shape":"BoundedPartitionValueList", + "documentation":"

A list of the values defining the partition.

" + }, + "PartitionInput":{ + "shape":"PartitionInput", + "documentation":"

The new partition object to which to update the partition.

" + } + } + }, + "UpdatePartitionResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateTableRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableInput" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog where the table resides. If none is supplied, the AWS account ID is used by default.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the catalog database in which the table resides.

" + }, + "TableInput":{ + "shape":"TableInput", + "documentation":"

An updated TableInput object to define the metadata table in the catalog.

" + } + } + }, + "UpdateTableResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateTriggerRequest":{ + "type":"structure", + "required":[ + "Name", + "TriggerUpdate" + ], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name of the trigger to update.

" + }, + "TriggerUpdate":{ + "shape":"TriggerUpdate", + "documentation":"

The new values with which to update the trigger.

" + } + } + }, + "UpdateTriggerResponse":{ + "type":"structure", + "members":{ + "Trigger":{ + "shape":"Trigger", + "documentation":"

The resulting trigger definition.

" + } + } + }, + "UpdateUserDefinedFunctionRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "FunctionName", + "FunctionInput" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog where the function to be updated is located. If none is supplied, the AWS account ID is used by default.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the catalog database where the function to be updated is located.

" + }, + "FunctionName":{ + "shape":"NameString", + "documentation":"

The name of the function.

" + }, + "FunctionInput":{ + "shape":"UserDefinedFunctionInput", + "documentation":"

A FunctionInput object that re-defines the function in the Data Catalog.

" + } + } + }, + "UpdateUserDefinedFunctionResponse":{ + "type":"structure", + "members":{ + } + }, + "UriString":{"type":"string"}, + "UserDefinedFunction":{ + "type":"structure", + "members":{ + "FunctionName":{ + "shape":"NameString", + "documentation":"

The name of the function.

" + }, + "ClassName":{ + "shape":"NameString", + "documentation":"

The Java class that contains the function code.

" + }, + "OwnerName":{ + "shape":"NameString", + "documentation":"

The owner of the function.

" + }, + "OwnerType":{ + "shape":"PrincipalType", + "documentation":"

The owner type.

" + }, + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the function was created.

" + }, + "ResourceUris":{ + "shape":"ResourceUriList", + "documentation":"

The resource URIs for the function.

" + } + }, + "documentation":"

Represents the equivalent of a Hive user-defined function (UDF) definition.

" + }, + "UserDefinedFunctionInput":{ + "type":"structure", + "members":{ + "FunctionName":{ + "shape":"NameString", + "documentation":"

The name of the function.

" + }, + "ClassName":{ + "shape":"NameString", + "documentation":"

The Java class that contains the function code.

" + }, + "OwnerName":{ + "shape":"NameString", + "documentation":"

The owner of the function.

" + }, + "OwnerType":{ + "shape":"PrincipalType", + "documentation":"

The owner type.

" + }, + "ResourceUris":{ + "shape":"ResourceUriList", + "documentation":"

The resource URIs for the function.

" + } + }, + "documentation":"

A structure used to create or updata a user-defined function.

" + }, + "UserDefinedFunctionList":{ + "type":"list", + "member":{"shape":"UserDefinedFunction"} + }, + "ValidationException":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"MessageString", + "documentation":"

A message describing the problem.

" + } + }, + "documentation":"

A value could not be validated.

", + "exception":true + }, + "ValueString":{ + "type":"string", + "max":1024 + }, + "ValueStringList":{ + "type":"list", + "member":{"shape":"ValueString"} + }, + "VersionId":{"type":"long"}, + "VersionMismatchException":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"MessageString", + "documentation":"

A message describing the problem.

" + } + }, + "documentation":"

There was a version conflict.

", + "exception":true + }, + "VersionString":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + }, + "ViewTextString":{ + "type":"string", + "max":2048 + } + }, + "documentation":"Defines service operations used by the GlueFrontendService" +} diff --git a/botocore/data/inspector/2016-02-16/service-2.json b/botocore/data/inspector/2016-02-16/service-2.json index 3c9b3e40..a93d0e72 100644 --- a/botocore/data/inspector/2016-02-16/service-2.json +++ b/botocore/data/inspector/2016-02-16/service-2.json @@ -1027,7 +1027,8 @@ "FAILED", "ERROR", "COMPLETED", - "COMPLETED_WITH_ERRORS" + "COMPLETED_WITH_ERRORS", + "CANCELED" ] }, "AssessmentRunStateChange":{ @@ -1330,7 +1331,7 @@ }, "userAttributesForFindings":{ "shape":"UserAttributeList", - "documentation":"

The user-defined attributes that are assigned to every finding that is generated by the assessment run that uses this assessment template.

" + "documentation":"

The user-defined attributes that are assigned to every finding that is generated by the assessment run that uses this assessment template. An attribute is a key and value pair (an Attribute object). Within an assessment template, each key must be unique.

" } } }, @@ -2669,6 +2670,13 @@ } } }, + "StopAction":{ + "type":"string", + "enum":[ + "START_EVALUATION", + "SKIP_EVALUATION" + ] + }, "StopAssessmentRunRequest":{ "type":"structure", "required":["assessmentRunArn"], @@ -2676,6 +2684,10 @@ "assessmentRunArn":{ "shape":"Arn", "documentation":"

The ARN of the assessment run that you want to stop.

" + }, + "stopAction":{ + "shape":"StopAction", + "documentation":"

An input option that can be set to either START_EVALUATION or SKIP_EVALUATION. START_EVALUATION (the default value), stops the AWS agent from collecting data and begins the results evaluation and the findings generation process. SKIP_EVALUATION cancels the assessment run immediately, after which no findings are generated.

" } } }, diff --git a/botocore/data/kinesis/2013-12-02/paginators-1.json b/botocore/data/kinesis/2013-12-02/paginators-1.json index f2b4e081..5f6d0a45 100644 --- a/botocore/data/kinesis/2013-12-02/paginators-1.json +++ b/botocore/data/kinesis/2013-12-02/paginators-1.json @@ -13,7 +13,8 @@ "StreamDescription.RetentionPeriodHours", "StreamDescription.EnhancedMonitoring", "StreamDescription.EncryptionType", - "StreamDescription.KeyId" + "StreamDescription.KeyId", + "StreamDescription.StreamCreationTimestamp" ] }, "ListStreams": { diff --git a/botocore/data/kinesisanalytics/2015-08-14/service-2.json b/botocore/data/kinesisanalytics/2015-08-14/service-2.json index 9863f323..c740a953 100644 --- a/botocore/data/kinesisanalytics/2015-08-14/service-2.json +++ b/botocore/data/kinesisanalytics/2015-08-14/service-2.json @@ -27,7 +27,7 @@ {"shape":"InvalidArgumentException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

Adds a CloudWatch log stream to monitor application configuration errors. For more information about using CloudWatch log streams with Amazon Kinesis Analytics applications, see Monitoring Configuration Errors.

" + "documentation":"

Adds a CloudWatch log stream to monitor application configuration errors. For more information about using CloudWatch log streams with Amazon Kinesis Analytics applications, see Working with Amazon CloudWatch Logs.

" }, "AddApplicationInput":{ "name":"AddApplicationInput", @@ -41,7 +41,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ResourceInUseException"}, {"shape":"InvalidArgumentException"}, - {"shape":"ConcurrentModificationException"} + {"shape":"ConcurrentModificationException"}, + {"shape":"CodeValidationException"} ], "documentation":"

Adds a streaming source to your Amazon Kinesis application. For conceptual information, see Configuring Application Input.

You can add a streaming source either when you create an application or you can use this operation to add a streaming source after you create an application. For more information, see CreateApplication.

Any configuration update, including adding a streaming source using this operation, results in a new version of the application. You can use the DescribeApplication operation to find the current application version.

This operation requires permissions to perform the kinesisanalytics:AddApplicationInput action.

" }, @@ -122,7 +123,7 @@ {"shape":"InvalidArgumentException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

Deletes a CloudWatch log stream from an application. For more information about using CloudWatch log streams with Amazon Kinesis Analytics applications, see Monitoring Configuration Errors.

" + "documentation":"

Deletes a CloudWatch log stream from an application. For more information about using CloudWatch log streams with Amazon Kinesis Analytics applications, see Working with Amazon CloudWatch Logs.

" }, "DeleteApplicationOutput":{ "name":"DeleteApplicationOutput", @@ -180,7 +181,8 @@ "errors":[ {"shape":"InvalidArgumentException"}, {"shape":"UnableToDetectSchemaException"}, - {"shape":"ResourceProvisionedThroughputExceededException"} + {"shape":"ResourceProvisionedThroughputExceededException"}, + {"shape":"ServiceUnavailableException"} ], "documentation":"

Infers a schema by evaluating sample records on the specified streaming source (Amazon Kinesis stream or Amazon Kinesis Firehose delivery stream). In the response, the operation returns the inferred schema and also the sample records that the operation used to infer the schema.

You can use the inferred schema when configuring a streaming source for your application. For conceptual information, see Configuring Application Input. Note that when you create an application using the Amazon Kinesis Analytics console, the console uses this operation to infer a schema and show it in the console user interface.

This operation requires permissions to perform the kinesisanalytics:DiscoverInputSchema action.

" }, @@ -253,15 +255,15 @@ "members":{ "ApplicationName":{ "shape":"ApplicationName", - "documentation":"

The Amazon Kinesis Analytics application name.

" + "documentation":"

The Kinesis Analytics application name.

" }, "CurrentApplicationVersionId":{ "shape":"ApplicationVersionId", - "documentation":"

The version ID of the Amazon Kinesis Analytics application.

" + "documentation":"

The version ID of the Kinesis Analytics application.

" }, "CloudWatchLoggingOption":{ "shape":"CloudWatchLoggingOption", - "documentation":"

Provide the CloudWatch log stream ARN and the IAM role ARN. Note: To write application messages to CloudWatch, the IAM role used must have the PutLogEvents policy action enabled.

" + "documentation":"

Provides the CloudWatch log stream Amazon Resource Name (ARN) and the IAM role ARN. Note: To write application messages to CloudWatch, the IAM role that is used must have the PutLogEvents policy action enabled.

" } } }, @@ -414,7 +416,7 @@ }, "CloudWatchLoggingOptionDescriptions":{ "shape":"CloudWatchLoggingOptionDescriptions", - "documentation":"

Describes the CloudWatch log streams configured to receive application messages. For more information about using CloudWatch log streams with Amazon Kinesis Analytics applications, see Monitoring Configuration Errors.

" + "documentation":"

Describes the CloudWatch log streams that are configured to receive application messages. For more information about using CloudWatch log streams with Amazon Kinesis Analytics applications, see Working with Amazon CloudWatch Logs.

" }, "ApplicationCode":{ "shape":"ApplicationCode", @@ -540,10 +542,10 @@ }, "RoleARN":{ "shape":"RoleARN", - "documentation":"

IAM ARN of the role to use to send application messages. Note: To write application messages to CloudWatch, the IAM role used must have the PutLogEvents policy action enabled.

" + "documentation":"

IAM ARN of the role to use to send application messages. Note: To write application messages to CloudWatch, the IAM role that is used must have the PutLogEvents policy action enabled.

" } }, - "documentation":"

Provides a description of CloudWatch logging options, including the log stream ARN and the role ARN.

" + "documentation":"

Provides a description of CloudWatch logging options, including the log stream Amazon Resource Name (ARN) and the role ARN.

" }, "CloudWatchLoggingOptionDescription":{ "type":"structure", @@ -642,7 +644,7 @@ }, "CloudWatchLoggingOptions":{ "shape":"CloudWatchLoggingOptions", - "documentation":"

Use this parameter to configure a CloudWatch log stream to monitor application configuration errors. For more information, see Monitoring Configuration Errors.

" + "documentation":"

Use this parameter to configure a CloudWatch log stream to monitor application configuration errors. For more information, see Working with Amazon CloudWatch Logs.

" }, "ApplicationCode":{ "shape":"ApplicationCode", @@ -672,11 +674,11 @@ "members":{ "ApplicationName":{ "shape":"ApplicationName", - "documentation":"

The Amazon Kinesis Analytics application name.

" + "documentation":"

The Kinesis Analytics application name.

" }, "CurrentApplicationVersionId":{ "shape":"ApplicationVersionId", - "documentation":"

The version ID of the Amazon Kinesis Analytics application.

" + "documentation":"

The version ID of the Kinesis Analytics application.

" }, "CloudWatchLoggingOptionId":{ "shape":"Id", @@ -879,11 +881,11 @@ }, "KinesisStreamsInput":{ "shape":"KinesisStreamsInput", - "documentation":"

If the streaming source is an Amazon Kinesis stream, identifies the stream's Amazon Resource Name (ARN) and an IAM role that enables Amazon Kinesis Analytics to access the stream on your behalf.

" + "documentation":"

If the streaming source is an Amazon Kinesis stream, identifies the stream's Amazon Resource Name (ARN) and an IAM role that enables Amazon Kinesis Analytics to access the stream on your behalf.

Note: Either KinesisStreamsInput or KinesisFirehoseInput is required.

" }, "KinesisFirehoseInput":{ "shape":"KinesisFirehoseInput", - "documentation":"

If the streaming source is an Amazon Kinesis Firehose delivery stream, identifies the Firehose delivery stream's ARN and an IAM role that enables Amazon Kinesis Analytics to access the stream on your behalf.

" + "documentation":"

If the streaming source is an Amazon Kinesis Firehose delivery stream, identifies the Firehose delivery stream's ARN and an IAM role that enables Amazon Kinesis Analytics to access the stream on your behalf.

Note: Either KinesisStreamsInput or KinesisFirehoseInput is required.

" }, "InputParallelism":{ "shape":"InputParallelism", @@ -969,7 +971,7 @@ }, "InputParallelismCount":{ "type":"integer", - "max":10, + "max":64, "min":1 }, "InputParallelismUpdate":{ @@ -1085,7 +1087,7 @@ "members":{ "RecordRowPath":{ "shape":"RecordRowPath", - "documentation":"

Path to the top-level parent that contains the records.

For example, consider the following JSON record:

In the RecordRowPath, \"$\" refers to the root and path \"$.vehicle.Model\" refers to the specific \"Model\" key in the JSON.

" + "documentation":"

Path to the top-level parent that contains the records.

" } }, "documentation":"

Provides additional mapping information when JSON is the record format on the streaming source.

" @@ -1470,7 +1472,7 @@ "RecordColumnMapping":{"type":"string"}, "RecordColumnName":{ "type":"string", - "pattern":"[a-zA-Z][a-zA-Z0-9_]+" + "pattern":"[a-zA-Z_][a-zA-Z0-9_]*" }, "RecordColumnSqlType":{"type":"string"}, "RecordColumns":{ @@ -1681,6 +1683,14 @@ }, "documentation":"

Describes the S3 bucket name, object key name, and IAM role that Amazon Kinesis Analytics can assume to read the Amazon S3 object on your behalf and populate the in-application reference table.

" }, + "ServiceUnavailableException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

The service is unavailable, back off and retry the operation.

", + "exception":true + }, "SourceSchema":{ "type":"structure", "required":[ diff --git a/botocore/data/lambda/2015-03-31/examples-1.json b/botocore/data/lambda/2015-03-31/examples-1.json index c5a45d3f..9aea28ad 100644 --- a/botocore/data/lambda/2015-03-31/examples-1.json +++ b/botocore/data/lambda/2015-03-31/examples-1.json @@ -438,7 +438,7 @@ "CodeSha256": "", "CodeSize": 123, "Description": "", - "FunctionArn": "arn:aws:lambda:us-west-2:123456789012:function:myFunction", + "FunctionArn": "arn:aws:lambda:us-west-2:123456789012:function:myFunction:1", "FunctionName": "myFunction", "Handler": "index.handler", "LastModified": "2016-11-21T19:49:20.006+0000", diff --git a/botocore/data/mgh/2017-05-31/paginators-1.json b/botocore/data/mgh/2017-05-31/paginators-1.json new file mode 100644 index 00000000..ea142457 --- /dev/null +++ b/botocore/data/mgh/2017-05-31/paginators-1.json @@ -0,0 +1,3 @@ +{ + "pagination": {} +} diff --git a/botocore/data/mgh/2017-05-31/service-2.json b/botocore/data/mgh/2017-05-31/service-2.json new file mode 100644 index 00000000..e097a4a4 --- /dev/null +++ b/botocore/data/mgh/2017-05-31/service-2.json @@ -0,0 +1,1123 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2017-05-31", + "endpointPrefix":"mgh", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"AWS Migration Hub", + "signatureVersion":"v4", + "targetPrefix":"AWSMigrationHub", + "uid":"AWSMigrationHub-2017-05-31" + }, + "operations":{ + "AssociateCreatedArtifact":{ + "name":"AssociateCreatedArtifact", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateCreatedArtifactRequest"}, + "output":{"shape":"AssociateCreatedArtifactResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerError"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"DryRunOperation"}, + {"shape":"UnauthorizedOperation"}, + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Associates a created artifact of an AWS cloud resource, the target receiving the migration, with the migration task performed by a migration tool. This API has the following traits:

" + }, + "AssociateDiscoveredResource":{ + "name":"AssociateDiscoveredResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateDiscoveredResourceRequest"}, + "output":{"shape":"AssociateDiscoveredResourceResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerError"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"DryRunOperation"}, + {"shape":"UnauthorizedOperation"}, + {"shape":"InvalidInputException"}, + {"shape":"PolicyErrorException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Associates a discovered resource ID from Application Discovery Service (ADS) with a migration task.

" + }, + "CreateProgressUpdateStream":{ + "name":"CreateProgressUpdateStream", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateProgressUpdateStreamRequest"}, + "output":{"shape":"CreateProgressUpdateStreamResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerError"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"DryRunOperation"}, + {"shape":"UnauthorizedOperation"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

Creates a progress update stream which is an AWS resource used for access control as well as a namespace for migration task names that is implicitly linked to your AWS account. It must uniquely identify the migration tool as it is used for all updates made by the tool; however, it does not need to be unique for each AWS account because it is scoped to the AWS account.

" + }, + "DeleteProgressUpdateStream":{ + "name":"DeleteProgressUpdateStream", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteProgressUpdateStreamRequest"}, + "output":{"shape":"DeleteProgressUpdateStreamResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerError"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"DryRunOperation"}, + {"shape":"UnauthorizedOperation"}, + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Deletes a progress update stream, including all of its tasks, which was previously created as an AWS resource used for access control. This API has the following traits:

" + }, + "DescribeApplicationState":{ + "name":"DescribeApplicationState", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeApplicationStateRequest"}, + "output":{"shape":"DescribeApplicationStateResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerError"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidInputException"}, + {"shape":"PolicyErrorException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Gets the migration status of an application.

" + }, + "DescribeMigrationTask":{ + "name":"DescribeMigrationTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeMigrationTaskRequest"}, + "output":{"shape":"DescribeMigrationTaskResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerError"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Retrieves a list of all attributes associated with a specific migration task.

" + }, + "DisassociateCreatedArtifact":{ + "name":"DisassociateCreatedArtifact", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateCreatedArtifactRequest"}, + "output":{"shape":"DisassociateCreatedArtifactResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerError"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"DryRunOperation"}, + {"shape":"UnauthorizedOperation"}, + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Disassociates a created artifact of an AWS resource with a migration task performed by a migration tool that was previously associated. This API has the following traits:

" + }, + "DisassociateDiscoveredResource":{ + "name":"DisassociateDiscoveredResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateDiscoveredResourceRequest"}, + "output":{"shape":"DisassociateDiscoveredResourceResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerError"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"DryRunOperation"}, + {"shape":"UnauthorizedOperation"}, + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Disassociate an Application Discovery Service (ADS) discovered resource from a migration task.

" + }, + "ImportMigrationTask":{ + "name":"ImportMigrationTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ImportMigrationTaskRequest"}, + "output":{"shape":"ImportMigrationTaskResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerError"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"DryRunOperation"}, + {"shape":"UnauthorizedOperation"}, + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Registers a new migration task which represents a server, database, etc., being migrated to AWS by a migration tool.

This API is a prerequisite to calling the NotifyMigrationTaskState API as the migration tool must first register the migration task with Migration Hub.

" + }, + "ListCreatedArtifacts":{ + "name":"ListCreatedArtifacts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListCreatedArtifactsRequest"}, + "output":{"shape":"ListCreatedArtifactsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerError"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Lists the created artifacts attached to a given migration task in an update stream. This API has the following traits:

" + }, + "ListDiscoveredResources":{ + "name":"ListDiscoveredResources", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDiscoveredResourcesRequest"}, + "output":{"shape":"ListDiscoveredResourcesResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerError"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Lists discovered resources associated with the given MigrationTask.

" + }, + "ListMigrationTasks":{ + "name":"ListMigrationTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListMigrationTasksRequest"}, + "output":{"shape":"ListMigrationTasksResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerError"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidInputException"}, + {"shape":"PolicyErrorException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Lists all, or filtered by resource name, migration tasks associated with the user account making this call. This API has the following traits:

" + }, + "ListProgressUpdateStreams":{ + "name":"ListProgressUpdateStreams", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListProgressUpdateStreamsRequest"}, + "output":{"shape":"ListProgressUpdateStreamsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerError"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

Lists progress update streams associated with the user account making this call.

" + }, + "NotifyApplicationState":{ + "name":"NotifyApplicationState", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"NotifyApplicationStateRequest"}, + "output":{"shape":"NotifyApplicationStateResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerError"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"DryRunOperation"}, + {"shape":"UnauthorizedOperation"}, + {"shape":"InvalidInputException"}, + {"shape":"PolicyErrorException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Sets the migration state of an application. For a given application identified by the value passed to ApplicationId, its status is set or updated by passing one of three values to Status: NOT_STARTED | IN_PROGRESS | COMPLETED.

" + }, + "NotifyMigrationTaskState":{ + "name":"NotifyMigrationTaskState", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"NotifyMigrationTaskStateRequest"}, + "output":{"shape":"NotifyMigrationTaskStateResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerError"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"DryRunOperation"}, + {"shape":"UnauthorizedOperation"}, + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Notifies Migration Hub of the current status, progress, or other detail regarding a migration task. This API has the following traits:

" + }, + "PutResourceAttributes":{ + "name":"PutResourceAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutResourceAttributesRequest"}, + "output":{"shape":"PutResourceAttributesResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerError"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"DryRunOperation"}, + {"shape":"UnauthorizedOperation"}, + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Provides identifying details of the resource being migrated so that it can be associated in the Application Discovery Service (ADS)'s repository. This association occurs asynchronously after PutResourceAttributes returns.

Keep in mind that subsequent calls to PutResourceAttributes will override previously stored attributes. For example, if it is first called with a MAC address, but later, it is desired to add an IP address, it will then be required to call it with both the IP and MAC addresses to prevent overiding the MAC address.

Because this is an asynchronous call, it will always return 200, whether an association occurs or not. To confirm if an association was found based on the provided details, call ListAssociatedResource.

" + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Exception raised when the account making the call is not whitelisted or there are other authentication errors.

", + "exception":true + }, + "ApplicationId":{ + "type":"string", + "max":1600, + "min":1 + }, + "ApplicationStatus":{ + "type":"string", + "enum":[ + "NOT_STARTED", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "AssociateCreatedArtifactRequest":{ + "type":"structure", + "required":[ + "ProgressUpdateStream", + "MigrationTaskName", + "CreatedArtifact" + ], + "members":{ + "ProgressUpdateStream":{ + "shape":"ProgressUpdateStream", + "documentation":"

The name of the ProgressUpdateStream.

" + }, + "MigrationTaskName":{ + "shape":"MigrationTaskName", + "documentation":"

Unique identifier that references the migration task.

" + }, + "CreatedArtifact":{ + "shape":"CreatedArtifact", + "documentation":"

An ARN of the AWS resource related to the migration (e.g., AMI, EC2 instance, RDS instance, etc.)

" + }, + "DryRun":{ + "shape":"DryRun", + "documentation":"

Optional boolean flag to indicate whether any effect should take place. Used to test if the caller has permission to make the call.

" + } + } + }, + "AssociateCreatedArtifactResult":{ + "type":"structure", + "members":{ + } + }, + "AssociateDiscoveredResourceRequest":{ + "type":"structure", + "required":[ + "ProgressUpdateStream", + "MigrationTaskName", + "DiscoveredResource" + ], + "members":{ + "ProgressUpdateStream":{ + "shape":"ProgressUpdateStream", + "documentation":"

The name of the ProgressUpdateStream.

" + }, + "MigrationTaskName":{ + "shape":"MigrationTaskName", + "documentation":"

The identifier given to the MigrationTask.

" + }, + "DiscoveredResource":{ + "shape":"DiscoveredResource", + "documentation":"

Object representing a Resource.

" + }, + "DryRun":{ + "shape":"DryRun", + "documentation":"

Optional boolean flag to indicate whether any effect should take place. Used to test if the caller has permission to make the call.

" + } + } + }, + "AssociateDiscoveredResourceResult":{ + "type":"structure", + "members":{ + } + }, + "ConfigurationId":{ + "type":"string", + "min":1 + }, + "CreateProgressUpdateStreamRequest":{ + "type":"structure", + "required":["ProgressUpdateStreamName"], + "members":{ + "ProgressUpdateStreamName":{ + "shape":"ProgressUpdateStream", + "documentation":"

The name of the ProgressUpdateStream.

" + }, + "DryRun":{ + "shape":"DryRun", + "documentation":"

Optional boolean flag to indicate whether any effect should take place. Used to test if the caller has permission to make the call.

" + } + } + }, + "CreateProgressUpdateStreamResult":{ + "type":"structure", + "members":{ + } + }, + "CreatedArtifact":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"CreatedArtifactName", + "documentation":"

An ARN that uniquely identifies the result of a migration task.

" + }, + "Description":{ + "shape":"CreatedArtifactDescription", + "documentation":"

A description that can be free-form text to record additional detail about the artifact for clarity or for later reference.

" + } + }, + "documentation":"

An ARN of the AWS cloud resource target receiving the migration (e.g., AMI, EC2 instance, RDS instance, etc.).

" + }, + "CreatedArtifactDescription":{ + "type":"string", + "max":500, + "min":0 + }, + "CreatedArtifactList":{ + "type":"list", + "member":{"shape":"CreatedArtifact"} + }, + "CreatedArtifactName":{ + "type":"string", + "max":1600, + "min":1, + "pattern":"arn:[a-z-]+:[a-z0-9-]+:(?:[a-z0-9-]+|):(?:[0-9]{12}|):.*" + }, + "DeleteProgressUpdateStreamRequest":{ + "type":"structure", + "required":["ProgressUpdateStreamName"], + "members":{ + "ProgressUpdateStreamName":{ + "shape":"ProgressUpdateStream", + "documentation":"

The name of the ProgressUpdateStream.

" + }, + "DryRun":{ + "shape":"DryRun", + "documentation":"

Optional boolean flag to indicate whether any effect should take place. Used to test if the caller has permission to make the call.

" + } + } + }, + "DeleteProgressUpdateStreamResult":{ + "type":"structure", + "members":{ + } + }, + "DescribeApplicationStateRequest":{ + "type":"structure", + "required":["ApplicationId"], + "members":{ + "ApplicationId":{ + "shape":"ApplicationId", + "documentation":"

The configurationId in ADS that uniquely identifies the grouped application.

" + } + } + }, + "DescribeApplicationStateResult":{ + "type":"structure", + "members":{ + "ApplicationStatus":{ + "shape":"ApplicationStatus", + "documentation":"

Status of the application - Not Started, In-Progress, Complete.

" + }, + "LastUpdatedTime":{ + "shape":"UpdateDateTime", + "documentation":"

The timestamp when the application status was last updated.

" + } + } + }, + "DescribeMigrationTaskRequest":{ + "type":"structure", + "required":[ + "ProgressUpdateStream", + "MigrationTaskName" + ], + "members":{ + "ProgressUpdateStream":{ + "shape":"ProgressUpdateStream", + "documentation":"

The name of the ProgressUpdateStream.

" + }, + "MigrationTaskName":{ + "shape":"MigrationTaskName", + "documentation":"

The identifier given to the MigrationTask.

" + } + } + }, + "DescribeMigrationTaskResult":{ + "type":"structure", + "members":{ + "MigrationTask":{ + "shape":"MigrationTask", + "documentation":"

Object encapsulating information about the migration task.

" + } + } + }, + "DisassociateCreatedArtifactRequest":{ + "type":"structure", + "required":[ + "ProgressUpdateStream", + "MigrationTaskName", + "CreatedArtifactName" + ], + "members":{ + "ProgressUpdateStream":{ + "shape":"ProgressUpdateStream", + "documentation":"

The name of the ProgressUpdateStream.

" + }, + "MigrationTaskName":{ + "shape":"MigrationTaskName", + "documentation":"

Unique identifier that references the migration task to be disassociated with the artifact.

" + }, + "CreatedArtifactName":{ + "shape":"CreatedArtifactName", + "documentation":"

An ARN of the AWS resource related to the migration (e.g., AMI, EC2 instance, RDS instance, etc.)

" + }, + "DryRun":{ + "shape":"DryRun", + "documentation":"

Optional boolean flag to indicate whether any effect should take place. Used to test if the caller has permission to make the call.

" + } + } + }, + "DisassociateCreatedArtifactResult":{ + "type":"structure", + "members":{ + } + }, + "DisassociateDiscoveredResourceRequest":{ + "type":"structure", + "required":[ + "ProgressUpdateStream", + "MigrationTaskName", + "ConfigurationId" + ], + "members":{ + "ProgressUpdateStream":{ + "shape":"ProgressUpdateStream", + "documentation":"

The name of the ProgressUpdateStream.

" + }, + "MigrationTaskName":{ + "shape":"MigrationTaskName", + "documentation":"

The identifier given to the MigrationTask.

" + }, + "ConfigurationId":{ + "shape":"ConfigurationId", + "documentation":"

ConfigurationId of the ADS resource to be disassociated.

" + }, + "DryRun":{ + "shape":"DryRun", + "documentation":"

Optional boolean flag to indicate whether any effect should take place. Used to test if the caller has permission to make the call.

" + } + } + }, + "DisassociateDiscoveredResourceResult":{ + "type":"structure", + "members":{ + } + }, + "DiscoveredResource":{ + "type":"structure", + "required":["ConfigurationId"], + "members":{ + "ConfigurationId":{ + "shape":"ConfigurationId", + "documentation":"

The configurationId in ADS that uniquely identifies the on-premise resource.

" + }, + "Description":{ + "shape":"DiscoveredResourceDescription", + "documentation":"

A description that can be free-form text to record additional detail about the discovered resource for clarity or later reference.

" + } + }, + "documentation":"

Object representing the on-premises resource being migrated.

" + }, + "DiscoveredResourceDescription":{ + "type":"string", + "max":500, + "min":0 + }, + "DiscoveredResourceList":{ + "type":"list", + "member":{"shape":"DiscoveredResource"} + }, + "DryRun":{"type":"boolean"}, + "DryRunOperation":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Exception raised to indicate a successfully authorized action when the DryRun flag is set to \"true\".

", + "exception":true + }, + "ErrorMessage":{"type":"string"}, + "ImportMigrationTaskRequest":{ + "type":"structure", + "required":[ + "ProgressUpdateStream", + "MigrationTaskName" + ], + "members":{ + "ProgressUpdateStream":{ + "shape":"ProgressUpdateStream", + "documentation":"

The name of the ProgressUpdateStream.

" + }, + "MigrationTaskName":{ + "shape":"MigrationTaskName", + "documentation":"

Unique identifier that references the migration task.

" + }, + "DryRun":{ + "shape":"DryRun", + "documentation":"

Optional boolean flag to indicate whether any effect should take place. Used to test if the caller has permission to make the call.

" + } + } + }, + "ImportMigrationTaskResult":{ + "type":"structure", + "members":{ + } + }, + "InternalServerError":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Exception raised when there is an internal, configuration, or dependency error encountered.

", + "exception":true, + "fault":true + }, + "InvalidInputException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Exception raised when the provided input violates a policy constraint or is entered in the wrong format or data type.

", + "exception":true + }, + "LatestResourceAttributeList":{ + "type":"list", + "member":{"shape":"ResourceAttribute"}, + "max":100, + "min":0 + }, + "ListCreatedArtifactsRequest":{ + "type":"structure", + "required":[ + "ProgressUpdateStream", + "MigrationTaskName" + ], + "members":{ + "ProgressUpdateStream":{ + "shape":"ProgressUpdateStream", + "documentation":"

The name of the ProgressUpdateStream.

" + }, + "MigrationTaskName":{ + "shape":"MigrationTaskName", + "documentation":"

Unique identifier that references the migration task.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

If a NextToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in NextToken.

" + }, + "MaxResults":{ + "shape":"MaxResultsCreatedArtifacts", + "documentation":"

Maximum number of results to be returned per page.

" + } + } + }, + "ListCreatedArtifactsResult":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"Token", + "documentation":"

If there are more created artifacts than the max result, return the next token to be passed to the next call as a bookmark of where to start from.

" + }, + "CreatedArtifactList":{ + "shape":"CreatedArtifactList", + "documentation":"

List of created artifacts up to the maximum number of results specified in the request.

" + } + } + }, + "ListDiscoveredResourcesRequest":{ + "type":"structure", + "required":[ + "ProgressUpdateStream", + "MigrationTaskName" + ], + "members":{ + "ProgressUpdateStream":{ + "shape":"ProgressUpdateStream", + "documentation":"

The name of the ProgressUpdateStream.

" + }, + "MigrationTaskName":{ + "shape":"MigrationTaskName", + "documentation":"

The name of the MigrationTask.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

If a NextToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in NextToken.

" + }, + "MaxResults":{ + "shape":"MaxResultsResources", + "documentation":"

The maximum number of results returned per page.

" + } + } + }, + "ListDiscoveredResourcesResult":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"Token", + "documentation":"

If there are more discovered resources than the max result, return the next token to be passed to the next call as a bookmark of where to start from.

" + }, + "DiscoveredResourceList":{ + "shape":"DiscoveredResourceList", + "documentation":"

Returned list of discovered resources associated with the given MigrationTask.

" + } + } + }, + "ListMigrationTasksRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"Token", + "documentation":"

If a NextToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in NextToken.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

Value to specify how many results are returned per page.

" + }, + "ResourceName":{ + "shape":"ResourceName", + "documentation":"

Filter migration tasks by discovered resource name.

" + } + } + }, + "ListMigrationTasksResult":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"Token", + "documentation":"

If there are more migration tasks than the max result, return the next token to be passed to the next call as a bookmark of where to start from.

" + }, + "MigrationTaskSummaryList":{ + "shape":"MigrationTaskSummaryList", + "documentation":"

Lists the migration task's summary which includes: MigrationTaskName, ProgressPercent, ProgressUpdateStream, Status, and the UpdateDateTime for each task.

" + } + } + }, + "ListProgressUpdateStreamsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"Token", + "documentation":"

If a NextToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in NextToken.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

Filter to limit the maximum number of results to list per page.

" + } + } + }, + "ListProgressUpdateStreamsResult":{ + "type":"structure", + "members":{ + "ProgressUpdateStreamSummaryList":{ + "shape":"ProgressUpdateStreamSummaryList", + "documentation":"

List of progress update streams up to the max number of results passed in the input.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

If there are more streams created than the max result, return the next token to be passed to the next call as a bookmark of where to start from.

" + } + } + }, + "MaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "MaxResultsCreatedArtifacts":{ + "type":"integer", + "box":true, + "max":10, + "min":1 + }, + "MaxResultsResources":{ + "type":"integer", + "box":true, + "max":10, + "min":1 + }, + "MigrationTask":{ + "type":"structure", + "members":{ + "ProgressUpdateStream":{ + "shape":"ProgressUpdateStream", + "documentation":"

A name that identifies the vendor of the migration tool being used.

" + }, + "MigrationTaskName":{ + "shape":"MigrationTaskName", + "documentation":"

Unique identifier that references the migration task.

" + }, + "Task":{ + "shape":"Task", + "documentation":"

Task object encapsulating task information.

" + }, + "UpdateDateTime":{ + "shape":"UpdateDateTime", + "documentation":"

The timestamp when the task was gathered.

" + }, + "ResourceAttributeList":{ + "shape":"LatestResourceAttributeList", + "documentation":"

" + } + }, + "documentation":"

Represents a migration task in a migration tool.

" + }, + "MigrationTaskName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[^:|]+" + }, + "MigrationTaskSummary":{ + "type":"structure", + "members":{ + "ProgressUpdateStream":{ + "shape":"ProgressUpdateStream", + "documentation":"

An AWS resource used for access control. It should uniquely identify the migration tool as it is used for all updates made by the tool.

" + }, + "MigrationTaskName":{ + "shape":"MigrationTaskName", + "documentation":"

Unique identifier that references the migration task.

" + }, + "Status":{ + "shape":"Status", + "documentation":"

Status of the task.

" + }, + "ProgressPercent":{ + "shape":"ProgressPercent", + "documentation":"

" + }, + "StatusDetail":{ + "shape":"StatusDetail", + "documentation":"

Detail information of what is being done within the overall status state.

" + }, + "UpdateDateTime":{ + "shape":"UpdateDateTime", + "documentation":"

The timestamp when the task was gathered.

" + } + }, + "documentation":"

MigrationTaskSummary includes MigrationTaskName, ProgressPercent, ProgressUpdateStream, Status, and UpdateDateTime for each task.

" + }, + "MigrationTaskSummaryList":{ + "type":"list", + "member":{"shape":"MigrationTaskSummary"} + }, + "NextUpdateSeconds":{ + "type":"integer", + "min":0 + }, + "NotifyApplicationStateRequest":{ + "type":"structure", + "required":[ + "ApplicationId", + "Status" + ], + "members":{ + "ApplicationId":{ + "shape":"ApplicationId", + "documentation":"

The configurationId in ADS that uniquely identifies the grouped application.

" + }, + "Status":{ + "shape":"ApplicationStatus", + "documentation":"

Status of the application - Not Started, In-Progress, Complete.

" + }, + "DryRun":{ + "shape":"DryRun", + "documentation":"

Optional boolean flag to indicate whether any effect should take place. Used to test if the caller has permission to make the call.

" + } + } + }, + "NotifyApplicationStateResult":{ + "type":"structure", + "members":{ + } + }, + "NotifyMigrationTaskStateRequest":{ + "type":"structure", + "required":[ + "ProgressUpdateStream", + "MigrationTaskName", + "Task", + "UpdateDateTime", + "NextUpdateSeconds" + ], + "members":{ + "ProgressUpdateStream":{ + "shape":"ProgressUpdateStream", + "documentation":"

The name of the ProgressUpdateStream.

" + }, + "MigrationTaskName":{ + "shape":"MigrationTaskName", + "documentation":"

Unique identifier that references the migration task.

" + }, + "Task":{ + "shape":"Task", + "documentation":"

Information about the task's progress and status.

" + }, + "UpdateDateTime":{ + "shape":"UpdateDateTime", + "documentation":"

The timestamp when the task was gathered.

" + }, + "NextUpdateSeconds":{ + "shape":"NextUpdateSeconds", + "documentation":"

Number of seconds after the UpdateDateTime within which the Migration Hub can expect an update. If Migration Hub does not receive an update within the specified interval, then the migration task will be considered stale.

" + }, + "DryRun":{ + "shape":"DryRun", + "documentation":"

Optional boolean flag to indicate whether any effect should take place. Used to test if the caller has permission to make the call.

" + } + } + }, + "NotifyMigrationTaskStateResult":{ + "type":"structure", + "members":{ + } + }, + "PolicyErrorException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Exception raised when there are problems accessing ADS (Application Discovery Service); most likely due to a misconfigured policy or the ADSCaller role is missing or not configured correctly.

", + "exception":true + }, + "ProgressPercent":{ + "type":"integer", + "box":true, + "max":100, + "min":0 + }, + "ProgressUpdateStream":{ + "type":"string", + "max":50, + "min":1, + "pattern":"[^/:|\\000-\\037]+" + }, + "ProgressUpdateStreamSummary":{ + "type":"structure", + "members":{ + "ProgressUpdateStreamName":{ + "shape":"ProgressUpdateStream", + "documentation":"

The name of the ProgressUpdateStream.

" + } + }, + "documentation":"

Summary of the AWS resource used for access control that is implicitly linked to your AWS account.

" + }, + "ProgressUpdateStreamSummaryList":{ + "type":"list", + "member":{"shape":"ProgressUpdateStreamSummary"} + }, + "PutResourceAttributesRequest":{ + "type":"structure", + "required":[ + "ProgressUpdateStream", + "MigrationTaskName", + "ResourceAttributeList" + ], + "members":{ + "ProgressUpdateStream":{ + "shape":"ProgressUpdateStream", + "documentation":"

The name of the ProgressUpdateStream.

" + }, + "MigrationTaskName":{ + "shape":"MigrationTaskName", + "documentation":"

Unique identifier that references the migration task.

" + }, + "ResourceAttributeList":{ + "shape":"ResourceAttributeList", + "documentation":"

Information about the resource that is being migrated. This data will be used to map the task to a resource in the Application Discovery Service (ADS)'s repository.

" + }, + "DryRun":{ + "shape":"DryRun", + "documentation":"

Optional boolean flag to indicate whether any effect should take place. Used to test if the caller has permission to make the call.

" + } + } + }, + "PutResourceAttributesResult":{ + "type":"structure", + "members":{ + } + }, + "ResourceAttribute":{ + "type":"structure", + "required":[ + "Type", + "Value" + ], + "members":{ + "Type":{ + "shape":"ResourceAttributeType", + "documentation":"

Type of resource.

" + }, + "Value":{ + "shape":"ResourceAttributeValue", + "documentation":"

Value of the resource type.

" + } + }, + "documentation":"

Attribute associated with a resource.

" + }, + "ResourceAttributeList":{ + "type":"list", + "member":{"shape":"ResourceAttribute"}, + "max":100, + "min":1 + }, + "ResourceAttributeType":{ + "type":"string", + "enum":[ + "IPV4_ADDRESS", + "IPV6_ADDRESS", + "MAC_ADDRESS", + "FQDN", + "VM_MANAGER_ID", + "VM_MANAGED_OBJECT_REFERENCE", + "VM_NAME", + "VM_PATH", + "BIOS_ID", + "MOTHERBOARD_SERIAL_NUMBER", + "LABEL" + ] + }, + "ResourceAttributeValue":{ + "type":"string", + "max":256, + "min":1 + }, + "ResourceName":{ + "type":"string", + "max":1600, + "min":1 + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Exception raised when the request references a resource (ADS configuration, update stream, migration task, etc.) that does not exist in ADS (Application Discovery Service) or in Migration Hub's repository.

", + "exception":true + }, + "ServiceUnavailableException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Exception raised when the service encounters throttled communication with upstream dependencies or is overloaded with requests.

", + "exception":true, + "fault":true + }, + "Status":{ + "type":"string", + "enum":[ + "NOT_STARTED", + "IN_PROGRESS", + "FAILED", + "COMPLETED" + ] + }, + "StatusDetail":{ + "type":"string", + "max":500, + "min":0 + }, + "Task":{ + "type":"structure", + "required":["Status"], + "members":{ + "Status":{ + "shape":"Status", + "documentation":"

Status of the task - Not Started, In-Progress, Complete.

" + }, + "StatusDetail":{ + "shape":"StatusDetail", + "documentation":"

Details of task status as notified by a migration tool. A tool might use this field to provide clarifying information about the status that is unique to that tool or that explains an error state.

" + }, + "ProgressPercent":{ + "shape":"ProgressPercent", + "documentation":"

Indication of the percentage completion of the task.

" + } + }, + "documentation":"

Task object encapsulating task information.

" + }, + "Token":{"type":"string"}, + "UnauthorizedOperation":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Exception raised to indicate a request was not authorized when the DryRun flag is set to \"true\".

", + "exception":true + }, + "UpdateDateTime":{"type":"timestamp"} + }, + "documentation":"

" +} diff --git a/botocore/data/pinpoint/2016-12-01/service-2.json b/botocore/data/pinpoint/2016-12-01/service-2.json index fa7ab4c5..6ceb040b 100644 --- a/botocore/data/pinpoint/2016-12-01/service-2.json +++ b/botocore/data/pinpoint/2016-12-01/service-2.json @@ -9,6 +9,34 @@ "jsonVersion" : "1.1" }, "operations" : { + "CreateApp" : { + "name" : "CreateApp", + "http" : { + "method" : "POST", + "requestUri" : "/v1/apps", + "responseCode" : 201 + }, + "input" : { + "shape" : "CreateAppRequest" + }, + "output" : { + "shape" : "CreateAppResponse" + }, + "errors" : [ { + "shape" : "BadRequestException" + }, { + "shape" : "InternalServerErrorException" + }, { + "shape" : "ForbiddenException" + }, { + "shape" : "NotFoundException" + }, { + "shape" : "MethodNotAllowedException" + }, { + "shape" : "TooManyRequestsException" + } ], + "documentation" : "Used to create an app." + }, "CreateCampaign" : { "name" : "CreateCampaign", "http" : { @@ -149,6 +177,34 @@ } ], "documentation" : "Delete an APNS sandbox channel" }, + "DeleteApp" : { + "name" : "DeleteApp", + "http" : { + "method" : "DELETE", + "requestUri" : "/v1/apps/{application-id}", + "responseCode" : 200 + }, + "input" : { + "shape" : "DeleteAppRequest" + }, + "output" : { + "shape" : "DeleteAppResponse" + }, + "errors" : [ { + "shape" : "BadRequestException" + }, { + "shape" : "InternalServerErrorException" + }, { + "shape" : "ForbiddenException" + }, { + "shape" : "NotFoundException" + }, { + "shape" : "MethodNotAllowedException" + }, { + "shape" : "TooManyRequestsException" + } ], + "documentation" : "Deletes an app." + }, "DeleteCampaign" : { "name" : "DeleteCampaign", "http" : { @@ -373,6 +429,34 @@ } ], "documentation" : "Get an APNS sandbox channel" }, + "GetApp" : { + "name" : "GetApp", + "http" : { + "method" : "GET", + "requestUri" : "/v1/apps/{application-id}", + "responseCode" : 200 + }, + "input" : { + "shape" : "GetAppRequest" + }, + "output" : { + "shape" : "GetAppResponse" + }, + "errors" : [ { + "shape" : "BadRequestException" + }, { + "shape" : "InternalServerErrorException" + }, { + "shape" : "ForbiddenException" + }, { + "shape" : "NotFoundException" + }, { + "shape" : "MethodNotAllowedException" + }, { + "shape" : "TooManyRequestsException" + } ], + "documentation" : "Returns information about an app." + }, "GetApplicationSettings" : { "name" : "GetApplicationSettings", "http" : { @@ -401,6 +485,34 @@ } ], "documentation" : "Used to request the settings for an app." }, + "GetApps" : { + "name" : "GetApps", + "http" : { + "method" : "GET", + "requestUri" : "/v1/apps", + "responseCode" : 200 + }, + "input" : { + "shape" : "GetAppsRequest" + }, + "output" : { + "shape" : "GetAppsResponse" + }, + "errors" : [ { + "shape" : "BadRequestException" + }, { + "shape" : "InternalServerErrorException" + }, { + "shape" : "ForbiddenException" + }, { + "shape" : "NotFoundException" + }, { + "shape" : "MethodNotAllowedException" + }, { + "shape" : "TooManyRequestsException" + } ], + "documentation" : "Returns information about your apps." + }, "GetCampaign" : { "name" : "GetCampaign", "http" : { @@ -1250,7 +1362,7 @@ }, "Id" : { "shape" : "__string", - "documentation" : "Channel ID. Not used, only for backwards compatibility." + "documentation" : "Channel ID. Not used. Present only for backwards compatibility." }, "IsArchived" : { "shape" : "__boolean", @@ -1384,7 +1496,7 @@ }, "Platform" : { "shape" : "__string", - "documentation" : "The platform type. Will be APNS." + "documentation" : "The platform type. Will be APNS_SANDBOX." }, "Version" : { "shape" : "__integer", @@ -1474,7 +1586,7 @@ }, "ChannelType" : { "shape" : "ChannelType", - "documentation" : "Type of channel of this address" + "documentation" : "The channel type.\n\nValid values: GCM | APNS | SMS | EMAIL" }, "Context" : { "shape" : "MapOf__string", @@ -1495,6 +1607,20 @@ }, "documentation" : "Address configuration." }, + "ApplicationResponse" : { + "type" : "structure", + "members" : { + "Id" : { + "shape" : "__string", + "documentation" : "The unique application ID." + }, + "Name" : { + "shape" : "__string", + "documentation" : "The display name of the application." + } + }, + "documentation" : "Application Response." + }, "ApplicationSettingsResource" : { "type" : "structure", "members" : { @@ -1517,6 +1643,20 @@ }, "documentation" : "Application settings." }, + "ApplicationsResponse" : { + "type" : "structure", + "members" : { + "Item" : { + "shape" : "ListOfApplicationResponse", + "documentation" : "List of applications returned in this page." + }, + "NextToken" : { + "shape" : "__string", + "documentation" : "The string that you use in a subsequent request to get the next page of results in a paginated response." + } + }, + "documentation" : "Get Applications Result." + }, "AttributeDimension" : { "type" : "structure", "members" : { @@ -1560,6 +1700,10 @@ "shape" : "__string", "documentation" : "The email text body." }, + "FromAddress" : { + "shape" : "__string", + "documentation" : "The email address used to send the email from. Defaults to use FromAddress specified in the Email Channel." + }, "HtmlBody" : { "shape" : "__string", "documentation" : "The email html body." @@ -1717,6 +1861,36 @@ "type" : "string", "enum" : [ "GCM", "APNS", "APNS_SANDBOX", "ADM", "SMS", "EMAIL" ] }, + "CreateAppRequest" : { + "type" : "structure", + "members" : { + "CreateApplicationRequest" : { + "shape" : "CreateApplicationRequest" + } + }, + "required" : [ "CreateApplicationRequest" ], + "payload" : "CreateApplicationRequest" + }, + "CreateAppResponse" : { + "type" : "structure", + "members" : { + "ApplicationResponse" : { + "shape" : "ApplicationResponse" + } + }, + "required" : [ "ApplicationResponse" ], + "payload" : "ApplicationResponse" + }, + "CreateApplicationRequest" : { + "type" : "structure", + "members" : { + "Name" : { + "shape" : "__string", + "documentation" : "The display name of the application. Used in the Amazon Pinpoint console." + } + }, + "documentation" : "Application Request." + }, "CreateCampaignRequest" : { "type" : "structure", "members" : { @@ -1882,6 +2056,27 @@ "required" : [ "APNSSandboxChannelResponse" ], "payload" : "APNSSandboxChannelResponse" }, + "DeleteAppRequest" : { + "type" : "structure", + "members" : { + "ApplicationId" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "application-id" + } + }, + "required" : [ "ApplicationId" ] + }, + "DeleteAppResponse" : { + "type" : "structure", + "members" : { + "ApplicationResponse" : { + "shape" : "ApplicationResponse" + } + }, + "required" : [ "ApplicationResponse" ], + "payload" : "ApplicationResponse" + }, "DeleteCampaignRequest" : { "type" : "structure", "members" : { @@ -2086,7 +2281,7 @@ "members" : { "ApplicationId" : { "shape" : "__string", - "documentation" : "Application id" + "documentation" : "The unique ID of the application to which the email channel belongs." }, "CreationDate" : { "shape" : "__string", @@ -2148,7 +2343,7 @@ }, "ChannelType" : { "shape" : "ChannelType", - "documentation" : "The channel type.\n\nValid values: APNS, GCM" + "documentation" : "The channel type.\n\nValid values: GCM | APNS | SMS | EMAIL" }, "Demographic" : { "shape" : "EndpointDemographic", @@ -2176,7 +2371,7 @@ }, "OptOut" : { "shape" : "__string", - "documentation" : "Indicates whether a user has opted out of receiving messages with one of the following values:\n\nALL – User receives all messages.\nNONE – User receives no messages." + "documentation" : "Indicates whether a user has opted out of receiving messages with one of the following values:\n\nALL - User has opted out of all messages.\n\nNONE - Users has not opted out and receives all messages." }, "RequestId" : { "shape" : "__string", @@ -2280,7 +2475,7 @@ }, "ChannelType" : { "shape" : "ChannelType", - "documentation" : "The channel type.\n\nValid values: APNS, GCM" + "documentation" : "The channel type.\n\nValid values: GCM | APNS | SMS | EMAIL" }, "Demographic" : { "shape" : "EndpointDemographic", @@ -2304,7 +2499,7 @@ }, "OptOut" : { "shape" : "__string", - "documentation" : "Indicates whether a user has opted out of receiving messages with one of the following values:\n\nALL – User receives all messages.\nNONE – User receives no messages." + "documentation" : "Indicates whether a user has opted out of receiving messages with one of the following values:\n\nALL - User has opted out of all messages.\n\nNONE - Users has not opted out and receives all messages." }, "RequestId" : { "shape" : "__string", @@ -2334,7 +2529,7 @@ }, "ChannelType" : { "shape" : "ChannelType", - "documentation" : "The channel type.\n\nValid values: APNS, GCM" + "documentation" : "The channel type.\n\nValid values: GCM | APNS | SMS | EMAIL" }, "CohortId" : { "shape" : "__string", @@ -2370,7 +2565,7 @@ }, "OptOut" : { "shape" : "__string", - "documentation" : "Indicates whether a user has opted out of receiving messages with one of the following values:\n\nALL – User receives all messages.\nNONE – User receives no messages." + "documentation" : "Indicates whether a user has opted out of receiving messages with one of the following values:\n\nALL - User has opted out of all messages.\n\nNONE - Users has not opted out and receives all messages." }, "RequestId" : { "shape" : "__string", @@ -2492,7 +2687,7 @@ }, "Id" : { "shape" : "__string", - "documentation" : "Channel ID. Not used, only for backwards compatibility." + "documentation" : "Channel ID. Not used. Present only for backwards compatibility." }, "IsArchived" : { "shape" : "__boolean", @@ -2625,6 +2820,27 @@ "required" : [ "APNSSandboxChannelResponse" ], "payload" : "APNSSandboxChannelResponse" }, + "GetAppRequest" : { + "type" : "structure", + "members" : { + "ApplicationId" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "application-id" + } + }, + "required" : [ "ApplicationId" ] + }, + "GetAppResponse" : { + "type" : "structure", + "members" : { + "ApplicationResponse" : { + "shape" : "ApplicationResponse" + } + }, + "required" : [ "ApplicationResponse" ], + "payload" : "ApplicationResponse" + }, "GetApplicationSettingsRequest" : { "type" : "structure", "members" : { @@ -2646,6 +2862,31 @@ "required" : [ "ApplicationSettingsResource" ], "payload" : "ApplicationSettingsResource" }, + "GetAppsRequest" : { + "type" : "structure", + "members" : { + "PageSize" : { + "shape" : "__string", + "location" : "querystring", + "locationName" : "page-size" + }, + "Token" : { + "shape" : "__string", + "location" : "querystring", + "locationName" : "token" + } + } + }, + "GetAppsResponse" : { + "type" : "structure", + "members" : { + "ApplicationsResponse" : { + "shape" : "ApplicationsResponse" + } + }, + "required" : [ "ApplicationsResponse" ], + "payload" : "ApplicationsResponse" + }, "GetCampaignActivitiesRequest" : { "type" : "structure", "members" : { @@ -2662,12 +2903,14 @@ "PageSize" : { "shape" : "__string", "location" : "querystring", - "locationName" : "page-size" + "locationName" : "page-size", + "documentation" : "The number of entries you want on each page in the response." }, "Token" : { "shape" : "__string", "location" : "querystring", - "locationName" : "token" + "locationName" : "token", + "documentation" : "The NextToken string returned on a previous page that you use to get the next page of results in a paginated response." } }, "required" : [ "ApplicationId", "CampaignId" ] @@ -2755,12 +2998,14 @@ "PageSize" : { "shape" : "__string", "location" : "querystring", - "locationName" : "page-size" + "locationName" : "page-size", + "documentation" : "The number of entries you want on each page in the response." }, "Token" : { "shape" : "__string", "location" : "querystring", - "locationName" : "token" + "locationName" : "token", + "documentation" : "The NextToken string returned on a previous page that you use to get the next page of results in a paginated response." } }, "required" : [ "ApplicationId", "CampaignId" ] @@ -2786,12 +3031,14 @@ "PageSize" : { "shape" : "__string", "location" : "querystring", - "locationName" : "page-size" + "locationName" : "page-size", + "documentation" : "The number of entries you want on each page in the response." }, "Token" : { "shape" : "__string", "location" : "querystring", - "locationName" : "token" + "locationName" : "token", + "documentation" : "The NextToken string returned on a previous page that you use to get the next page of results in a paginated response." } }, "required" : [ "ApplicationId" ] @@ -2935,12 +3182,14 @@ "PageSize" : { "shape" : "__string", "location" : "querystring", - "locationName" : "page-size" + "locationName" : "page-size", + "documentation" : "The number of entries you want on each page in the response." }, "Token" : { "shape" : "__string", "location" : "querystring", - "locationName" : "token" + "locationName" : "token", + "documentation" : "The NextToken string returned on a previous page that you use to get the next page of results in a paginated response." } }, "required" : [ "ApplicationId" ] @@ -2966,7 +3215,8 @@ "PageSize" : { "shape" : "__string", "location" : "querystring", - "locationName" : "page-size" + "locationName" : "page-size", + "documentation" : "The number of entries you want on each page in the response." }, "SegmentId" : { "shape" : "__string", @@ -2976,7 +3226,8 @@ "Token" : { "shape" : "__string", "location" : "querystring", - "locationName" : "token" + "locationName" : "token", + "documentation" : "The NextToken string returned on a previous page that you use to get the next page of results in a paginated response." } }, "required" : [ "SegmentId", "ApplicationId" ] @@ -3059,7 +3310,8 @@ "PageSize" : { "shape" : "__string", "location" : "querystring", - "locationName" : "page-size" + "locationName" : "page-size", + "documentation" : "The number of entries you want on each page in the response." }, "SegmentId" : { "shape" : "__string", @@ -3069,7 +3321,8 @@ "Token" : { "shape" : "__string", "location" : "querystring", - "locationName" : "token" + "locationName" : "token", + "documentation" : "The NextToken string returned on a previous page that you use to get the next page of results in a paginated response." } }, "required" : [ "SegmentId", "ApplicationId" ] @@ -3095,12 +3348,14 @@ "PageSize" : { "shape" : "__string", "location" : "querystring", - "locationName" : "page-size" + "locationName" : "page-size", + "documentation" : "The number of entries you want on each page in the response." }, "Token" : { "shape" : "__string", "location" : "querystring", - "locationName" : "token" + "locationName" : "token", + "documentation" : "The NextToken string returned on a previous page that you use to get the next page of results in a paginated response." } }, "required" : [ "ApplicationId" ] @@ -3309,6 +3564,12 @@ "shape" : "ActivityResponse" } }, + "ListOfApplicationResponse" : { + "type" : "list", + "member" : { + "shape" : "ApplicationResponse" + } + }, "ListOfCampaignResponse" : { "type" : "list", "member" : { @@ -3445,6 +3706,10 @@ "shape" : "__string", "documentation" : "The URL that points to the media resource, for example a .mp4 or .gif file." }, + "RawContent" : { + "shape" : "__string", + "documentation" : "The Raw JSON formatted string to be used as the payload. This value overrides the message." + }, "SilentPush" : { "shape" : "__boolean", "documentation" : "Indicates if the message should display on the users device.\n\nSilent pushes can be used for Remote Configuration and Phone Home use cases. " @@ -3623,8 +3888,7 @@ } }, "required" : [ "EventStream" ], - "payload" : "EventStream", - "documentation": "PutEventStream Response" + "payload" : "EventStream" }, "QuietTime" : { "type" : "structure", @@ -3677,7 +3941,7 @@ "members" : { "ApplicationId" : { "shape" : "__string", - "documentation" : "Application id" + "documentation" : "The unique ID of the application to which the SMS channel belongs." }, "CreationDate" : { "shape" : "__string", diff --git a/botocore/data/rds/2014-10-31/service-2.json b/botocore/data/rds/2014-10-31/service-2.json index ccb180e5..b72b47ac 100644 --- a/botocore/data/rds/2014-10-31/service-2.json +++ b/botocore/data/rds/2014-10-31/service-2.json @@ -90,7 +90,7 @@ {"shape":"AuthorizationAlreadyExistsFault"}, {"shape":"AuthorizationQuotaExceededFault"} ], - "documentation":"

Enables ingress to a DBSecurityGroup using one of two forms of authorization. First, EC2 or VPC security groups can be added to the DBSecurityGroup if the application using the database is running on EC2 or VPC instances. Second, IP ranges are available if the application accessing your database is running on the Internet. Required parameters for this API are one of CIDR range, EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId for non-VPC).

You cannot authorize ingress from an EC2 security group in one region to an Amazon RDS DB instance in another. You cannot authorize ingress from a VPC security group in one VPC to an Amazon RDS DB instance in another.

For an overview of CIDR ranges, go to the Wikipedia Tutorial.

" + "documentation":"

Enables ingress to a DBSecurityGroup using one of two forms of authorization. First, EC2 or VPC security groups can be added to the DBSecurityGroup if the application using the database is running on EC2 or VPC instances. Second, IP ranges are available if the application accessing your database is running on the Internet. Required parameters for this API are one of CIDR range, EC2SecurityGroupId for VPC, or (EC2SecurityGroupOwnerId and either EC2SecurityGroupName or EC2SecurityGroupId for non-VPC).

You cannot authorize ingress from an EC2 security group in one AWS Region to an Amazon RDS DB instance in another. You cannot authorize ingress from a VPC security group in one VPC to an Amazon RDS DB instance in another.

For an overview of CIDR ranges, go to the Wikipedia Tutorial.

" }, "CopyDBClusterParameterGroup":{ "name":"CopyDBClusterParameterGroup", @@ -129,7 +129,7 @@ {"shape":"SnapshotQuotaExceededFault"}, {"shape":"KMSKeyNotAccessibleFault"} ], - "documentation":"

Copies a snapshot of a DB cluster.

To copy a DB cluster snapshot from a shared manual DB cluster snapshot, SourceDBClusterSnapshotIdentifier must be the Amazon Resource Name (ARN) of the shared DB cluster snapshot.

You can copy an encrypted DB cluster snapshot from another AWS region. In that case, the region where you call the CopyDBClusterSnapshot action is the destination region for the encrypted DB cluster snapshot to be copied to. To copy an encrypted DB cluster snapshot from another region, you must provide the following values:

To cancel the copy operation once it is in progress, delete the target DB cluster snapshot identified by TargetDBClusterSnapshotIdentifier while that DB cluster snapshot is in \"copying\" status.

For more information on copying encrypted DB cluster snapshots from one region to another, see Copying a DB Cluster Snapshot in the Same Account, Either in the Same Region or Across Regions in the Amazon RDS User Guide.

For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

" + "documentation":"

Copies a snapshot of a DB cluster.

To copy a DB cluster snapshot from a shared manual DB cluster snapshot, SourceDBClusterSnapshotIdentifier must be the Amazon Resource Name (ARN) of the shared DB cluster snapshot.

You can copy an encrypted DB cluster snapshot from another AWS Region. In that case, the AWS Region where you call the CopyDBClusterSnapshot action is the destination AWS Region for the encrypted DB cluster snapshot to be copied to. To copy an encrypted DB cluster snapshot from another AWS Region, you must provide the following values:

To cancel the copy operation once it is in progress, delete the target DB cluster snapshot identified by TargetDBClusterSnapshotIdentifier while that DB cluster snapshot is in \"copying\" status.

For more information on copying encrypted DB cluster snapshots from one AWS Region to another, see Copying a DB Cluster Snapshot in the Same Account, Either in the Same Region or Across Regions in the Amazon RDS User Guide.

For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

" }, "CopyDBParameterGroup":{ "name":"CopyDBParameterGroup", @@ -167,7 +167,7 @@ {"shape":"SnapshotQuotaExceededFault"}, {"shape":"KMSKeyNotAccessibleFault"} ], - "documentation":"

Copies the specified DB snapshot. The source DB snapshot must be in the \"available\" state.

You can copy a snapshot from one AWS region to another. In that case, the region where you call the CopyDBSnapshot action is the destination region for the DB snapshot copy.

You cannot copy an encrypted, shared DB snapshot from one AWS region to another.

For more information about copying snapshots, see Copying a DB Snapshot in the Amazon RDS User Guide.

" + "documentation":"

Copies the specified DB snapshot. The source DB snapshot must be in the \"available\" state.

You can copy a snapshot from one AWS Region to another. In that case, the AWS Region where you call the CopyDBSnapshot action is the destination AWS Region for the DB snapshot copy.

You cannot copy an encrypted, shared DB snapshot from one AWS Region to another.

For more information about copying snapshots, see Copying a DB Snapshot in the Amazon RDS User Guide.

" }, "CopyOptionGroup":{ "name":"CopyOptionGroup", @@ -318,7 +318,7 @@ {"shape":"StorageTypeNotSupportedFault"}, {"shape":"KMSKeyNotAccessibleFault"} ], - "documentation":"

Creates a DB instance for a DB instance running MySQL, MariaDB, or PostgreSQL that acts as a Read Replica of a source DB instance.

Amazon Aurora does not support this action. You must call the CreateDBInstance action to create a DB instance for an Aurora DB cluster.

All Read Replica DB instances are created as Single-AZ deployments with backups disabled. All other DB instance attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance, except as specified below.

The source DB instance must have backup retention enabled.

You can create an encrypted Read Replica in a different AWS Region than the source DB instance. In that case, the region where you call the CreateDBInstanceReadReplica action is the destination region of the encrypted Read Replica. The source DB instance must be encrypted.

To create an encrypted Read Replica in another AWS Region, you must provide the following values:

" + "documentation":"

Creates a new DB instance that acts as a Read Replica for an existing source DB instance. You can create a Read Replica for a DB instance running MySQL, MariaDB, or PostgreSQL.

Amazon Aurora does not support this action. You must call the CreateDBInstance action to create a DB instance for an Aurora DB cluster.

All Read Replica DB instances are created as Single-AZ deployments with backups disabled. All other DB instance attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance, except as specified below.

The source DB instance must have backup retention enabled.

For more information, see Working with PostgreSQL, MySQL, and MariaDB Read Replicas.

" }, "CreateDBParameterGroup":{ "name":"CreateDBParameterGroup", @@ -392,7 +392,7 @@ {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, {"shape":"InvalidSubnet"} ], - "documentation":"

Creates a new DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the region.

" + "documentation":"

Creates a new DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the AWS Region.

" }, "CreateEventSubscription":{ "name":"CreateEventSubscription", @@ -1009,7 +1009,7 @@ "shape":"SourceRegionMessage", "resultWrapper":"DescribeSourceRegionsResult" }, - "documentation":"

Returns a list of the source AWS regions where the current AWS region can create a Read Replica or copy a DB snapshot from. This API action supports pagination.

" + "documentation":"

Returns a list of the source AWS regions where the current AWS Region can create a Read Replica or copy a DB snapshot from. This API action supports pagination.

" }, "DownloadDBLogFilePortion":{ "name":"DownloadDBLogFilePortion", @@ -1225,7 +1225,7 @@ {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, {"shape":"InvalidSubnet"} ], - "documentation":"

Modifies an existing DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the region.

" + "documentation":"

Modifies an existing DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the AWS Region.

" }, "ModifyEventSubscription":{ "name":"ModifyEventSubscription", @@ -1502,7 +1502,7 @@ {"shape":"OptionGroupNotFoundFault"}, {"shape":"StorageQuotaExceededFault"} ], - "documentation":"

Restores a DB cluster to an arbitrary point in time. Users can restore to any point in time before LatestRestorableTime for up to BackupRetentionPeriod days. The target DB cluster is created from the source DB cluster with the same configuration as the original DB cluster, except that the new DB cluster is created with the default DB security group.

For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

" + "documentation":"

Restores a DB cluster to an arbitrary point in time. Users can restore to any point in time before LatestRestorableTime for up to BackupRetentionPeriod days. The target DB cluster is created from the source DB cluster with the same configuration as the original DB cluster, except that the new DB cluster is created with the default DB security group.

This action only restores the DB cluster, not the DB instances for that DB cluster. You must invoke the CreateDBInstance action to create DB instances for the restored DB cluster, specifying the identifier of the restored DB cluster in DBClusterIdentifier. You can create DB instances only after the RestoreDBClusterToPointInTime action has completed and the DB cluster is available.

For more information on Amazon Aurora, see Aurora on Amazon RDS in the Amazon RDS User Guide.

" }, "RestoreDBInstanceFromDBSnapshot":{ "name":"RestoreDBInstanceFromDBSnapshot", @@ -1959,7 +1959,7 @@ "members":{ "SourceDBClusterParameterGroupIdentifier":{ "shape":"String", - "documentation":"

The identifier or Amazon Resource Name (ARN) for the source DB cluster parameter group. For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

Constraints:

" + "documentation":"

The identifier or Amazon Resource Name (ARN) for the source DB cluster parameter group. For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

Constraints:

" }, "TargetDBClusterParameterGroupIdentifier":{ "shape":"String", @@ -1987,7 +1987,7 @@ "members":{ "SourceDBClusterSnapshotIdentifier":{ "shape":"String", - "documentation":"

The identifier of the DB cluster snapshot to copy. This parameter is not case-sensitive.

You cannot copy an encrypted, shared DB cluster snapshot from one AWS region to another.

Constraints:

Example: my-cluster-snapshot1

" + "documentation":"

The identifier of the DB cluster snapshot to copy. This parameter is not case-sensitive.

You cannot copy an encrypted, shared DB cluster snapshot from one AWS Region to another.

Constraints:

Example: my-cluster-snapshot1

" }, "TargetDBClusterSnapshotIdentifier":{ "shape":"String", @@ -1995,11 +1995,11 @@ }, "KmsKeyId":{ "shape":"String", - "documentation":"

The AWS KMS key ID for an encrypted DB cluster snapshot. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.

If you copy an unencrypted DB cluster snapshot and specify a value for the KmsKeyId parameter, Amazon RDS encrypts the target DB cluster snapshot using the specified KMS encryption key.

If you copy an encrypted DB cluster snapshot from your AWS account, you can specify a value for KmsKeyId to encrypt the copy with a new KMS encryption key. If you don't specify a value for KmsKeyId, then the copy of the DB cluster snapshot is encrypted with the same KMS key as the source DB cluster snapshot.

If you copy an encrypted DB cluster snapshot that is shared from another AWS account, then you must specify a value for KmsKeyId.

To copy an encrypted DB cluster snapshot to another region, you must set KmsKeyId to the KMS key ID you want to use to encrypt the copy of the DB cluster snapshot in the destination region. KMS encryption keys are specific to the region that they are created in, and you cannot use encryption keys from one region in another region.

" + "documentation":"

The AWS KMS key ID for an encrypted DB cluster snapshot. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.

If you copy an unencrypted DB cluster snapshot and specify a value for the KmsKeyId parameter, Amazon RDS encrypts the target DB cluster snapshot using the specified KMS encryption key.

If you copy an encrypted DB cluster snapshot from your AWS account, you can specify a value for KmsKeyId to encrypt the copy with a new KMS encryption key. If you don't specify a value for KmsKeyId, then the copy of the DB cluster snapshot is encrypted with the same KMS key as the source DB cluster snapshot.

If you copy an encrypted DB cluster snapshot that is shared from another AWS account, then you must specify a value for KmsKeyId.

To copy an encrypted DB cluster snapshot to another AWS Region, you must set KmsKeyId to the KMS key ID you want to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region. KMS encryption keys are specific to the AWS Region that they are created in, and you cannot use encryption keys from one AWS Region in another AWS Region.

" }, "PreSignedUrl":{ "shape":"String", - "documentation":"

The URL that contains a Signature Version 4 signed request for the CopyDBClusterSnapshot API action in the AWS region that contains the source DB cluster snapshot to copy. The PreSignedUrl parameter must be used when copying an encrypted DB cluster snapshot from another AWS region.

The pre-signed URL must be a valid request for the CopyDBSClusterSnapshot API action that can be executed in the source region that contains the encrypted DB cluster snapshot to be copied. The pre-signed URL request must contain the following parameter values:

To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process.

" + "documentation":"

The URL that contains a Signature Version 4 signed request for the CopyDBClusterSnapshot API action in the AWS Region that contains the source DB cluster snapshot to copy. The PreSignedUrl parameter must be used when copying an encrypted DB cluster snapshot from another AWS Region.

The pre-signed URL must be a valid request for the CopyDBSClusterSnapshot API action that can be executed in the source AWS Region that contains the encrypted DB cluster snapshot to be copied. The pre-signed URL request must contain the following parameter values:

To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process.

" }, "CopyTags":{ "shape":"BooleanOptional", @@ -2054,7 +2054,7 @@ "members":{ "SourceDBSnapshotIdentifier":{ "shape":"String", - "documentation":"

The identifier for the source DB snapshot.

If the source snapshot is in the same region as the copy, specify a valid DB snapshot identifier. For example, rds:mysql-instance1-snapshot-20130805.

If the source snapshot is in a different region than the copy, specify a valid DB snapshot ARN. For example, arn:aws:rds:us-west-2:123456789012:snapshot:mysql-instance1-snapshot-20130805.

If you are copying from a shared manual DB snapshot, this parameter must be the Amazon Resource Name (ARN) of the shared DB snapshot.

If you are copying an encrypted snapshot this parameter must be in the ARN format for the source region, and must match the SourceDBSnapshotIdentifier in the PreSignedUrl parameter.

Constraints:

Example: rds:mydb-2012-04-02-00-01

Example: arn:aws:rds:us-west-2:123456789012:snapshot:mysql-instance1-snapshot-20130805

" + "documentation":"

The identifier for the source DB snapshot.

If the source snapshot is in the same AWS Region as the copy, specify a valid DB snapshot identifier. For example, you might specify rds:mysql-instance1-snapshot-20130805.

If the source snapshot is in a different AWS Region than the copy, specify a valid DB snapshot ARN. For example, you might specify arn:aws:rds:us-west-2:123456789012:snapshot:mysql-instance1-snapshot-20130805.

If you are copying from a shared manual DB snapshot, this parameter must be the Amazon Resource Name (ARN) of the shared DB snapshot.

If you are copying an encrypted snapshot this parameter must be in the ARN format for the source AWS Region, and must match the SourceDBSnapshotIdentifier in the PreSignedUrl parameter.

Constraints:

Example: rds:mydb-2012-04-02-00-01

Example: arn:aws:rds:us-west-2:123456789012:snapshot:mysql-instance1-snapshot-20130805

" }, "TargetDBSnapshotIdentifier":{ "shape":"String", @@ -2062,7 +2062,7 @@ }, "KmsKeyId":{ "shape":"String", - "documentation":"

The AWS KMS key ID for an encrypted DB snapshot. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.

If you copy an encrypted DB snapshot from your AWS account, you can specify a value for this parameter to encrypt the copy with a new KMS encryption key. If you don't specify a value for this parameter, then the copy of the DB snapshot is encrypted with the same KMS key as the source DB snapshot.

If you copy an encrypted DB snapshot that is shared from another AWS account, then you must specify a value for this parameter.

If you specify this parameter when you copy an unencrypted snapshot, the copy is encrypted.

If you copy an encrypted snapshot to a different AWS region, then you must specify a KMS key for the destination AWS region. KMS encryption keys are specific to the region that they are created in, and you cannot use encryption keys from one region in another region.

" + "documentation":"

The AWS KMS key ID for an encrypted DB snapshot. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.

If you copy an encrypted DB snapshot from your AWS account, you can specify a value for this parameter to encrypt the copy with a new KMS encryption key. If you don't specify a value for this parameter, then the copy of the DB snapshot is encrypted with the same KMS key as the source DB snapshot.

If you copy an encrypted DB snapshot that is shared from another AWS account, then you must specify a value for this parameter.

If you specify this parameter when you copy an unencrypted snapshot, the copy is encrypted.

If you copy an encrypted snapshot to a different AWS Region, then you must specify a KMS key for the destination AWS Region. KMS encryption keys are specific to the AWS Region that they are created in, and you cannot use encryption keys from one AWS Region in another AWS Region.

" }, "Tags":{"shape":"TagList"}, "CopyTags":{ @@ -2071,11 +2071,11 @@ }, "PreSignedUrl":{ "shape":"String", - "documentation":"

The URL that contains a Signature Version 4 signed request for the CopyDBSnapshot API action in the source AWS region that contains the source DB snapshot to copy.

You must specify this parameter when you copy an encrypted DB snapshot from another AWS region by using the Amazon RDS API. You can specify the source region option instead of this parameter when you copy an encrypted DB snapshot from another AWS region by using the AWS CLI.

The presigned URL must be a valid request for the CopyDBSnapshot API action that can be executed in the source region that contains the encrypted DB snapshot to be copied. The presigned URL request must contain the following parameter values:

To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process.

" + "documentation":"

The URL that contains a Signature Version 4 signed request for the CopyDBSnapshot API action in the source AWS Region that contains the source DB snapshot to copy.

You must specify this parameter when you copy an encrypted DB snapshot from another AWS Region by using the Amazon RDS API. You can specify the source region option instead of this parameter when you copy an encrypted DB snapshot from another AWS Region by using the AWS CLI.

The presigned URL must be a valid request for the CopyDBSnapshot API action that can be executed in the source AWS Region that contains the encrypted DB snapshot to be copied. The presigned URL request must contain the following parameter values:

To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process.

" }, "OptionGroupName":{ "shape":"String", - "documentation":"

The name of an option group to associate with the copy.

Specify this option if you are copying a snapshot from one AWS region to another, and your DB instance uses a non-default option group. If your source DB instance uses Transparent Data Encryption for Oracle or Microsoft SQL Server, you must specify this option when copying across regions. For more information, see Option Group Considerations.

" + "documentation":"

The name of an option group to associate with the copy of the snapshot.

Specify this option if you are copying a snapshot from one AWS Region to another, and your DB instance uses a nondefault option group. If your source DB instance uses Transparent Data Encryption for Oracle or Microsoft SQL Server, you must specify this option when copying across regions. For more information, see Option Group Considerations.

" } }, "documentation":"

" @@ -2096,7 +2096,7 @@ "members":{ "SourceOptionGroupIdentifier":{ "shape":"String", - "documentation":"

The identifier or ARN for the source option group. For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

Constraints:

" + "documentation":"

The identifier or ARN for the source option group. For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

Constraints:

" }, "TargetOptionGroupIdentifier":{ "shape":"String", @@ -2181,11 +2181,11 @@ }, "PreferredBackupWindow":{ "shape":"String", - "documentation":"

The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter.

Default: A 30-minute window selected at random from an 8-hour block of time per region. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

Constraints:

" + "documentation":"

The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter.

Default: A 30-minute window selected at random from an 8-hour block of time per AWS Region. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

Constraints:

" }, "PreferredMaintenanceWindow":{ "shape":"String", - "documentation":"

The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

Format: ddd:hh24:mi-ddd:hh24:mi

Default: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun

Constraints: Minimum 30-minute window.

" + "documentation":"

The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

Format: ddd:hh24:mi-ddd:hh24:mi

Default: A 30-minute window selected at random from an 8-hour block of time per AWS Region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun

Constraints: Minimum 30-minute window.

" }, "ReplicationSourceIdentifier":{ "shape":"String", @@ -2198,11 +2198,11 @@ }, "KmsKeyId":{ "shape":"String", - "documentation":"

The KMS key identifier for an encrypted DB cluster.

The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are creating a DB cluster with the same AWS account that owns the KMS encryption key used to encrypt the new DB cluster, then you can use the KMS key alias instead of the ARN for the KMS encryption key.

If the StorageEncrypted parameter is true, and you do not specify a value for the KmsKeyId parameter, then Amazon RDS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region.

If you create a Read Replica of an encrypted DB cluster in another region, you must set KmsKeyId to a KMS key ID that is valid in the destination region. This key is used to encrypt the Read Replica in that region.

" + "documentation":"

The KMS key identifier for an encrypted DB cluster.

The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are creating a DB cluster with the same AWS account that owns the KMS encryption key used to encrypt the new DB cluster, then you can use the KMS key alias instead of the ARN for the KMS encryption key.

If the StorageEncrypted parameter is true, and you do not specify a value for the KmsKeyId parameter, then Amazon RDS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.

If you create a Read Replica of an encrypted DB cluster in another AWS Region, you must set KmsKeyId to a KMS key ID that is valid in the destination AWS Region. This key is used to encrypt the Read Replica in that AWS Region.

" }, "PreSignedUrl":{ "shape":"String", - "documentation":"

A URL that contains a Signature Version 4 signed request for the CreateDBCluster action to be called in the source region where the DB cluster will be replicated from. You only need to specify PreSignedUrl when you are performing cross-region replication from an encrypted DB cluster.

The pre-signed URL must be a valid request for the CreateDBCluster API action that can be executed in the source region that contains the encrypted DB cluster to be copied.

The pre-signed URL request must contain the following parameter values:

To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process.

" + "documentation":"

A URL that contains a Signature Version 4 signed request for the CreateDBCluster action to be called in the source AWS Region where the DB cluster will be replicated from. You only need to specify PreSignedUrl when you are performing cross-region replication from an encrypted DB cluster.

The pre-signed URL must be a valid request for the CreateDBCluster API action that can be executed in the source AWS Region that contains the encrypted DB cluster to be copied.

The pre-signed URL request must contain the following parameter values:

To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process.

" }, "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", @@ -2289,7 +2289,7 @@ }, "DBInstanceIdentifier":{ "shape":"String", - "documentation":"

The DB instance identifier. This parameter is stored as a lowercase string.

Constraints:

Example: mydbinstance

" + "documentation":"

The DB instance identifier. This parameter is stored as a lowercase string.

Constraints:

Example: mydbinstance

" }, "AllocatedStorage":{ "shape":"IntegerOptional", @@ -2301,15 +2301,15 @@ }, "Engine":{ "shape":"String", - "documentation":"

The name of the database engine to be used for this instance.

Not every database engine is available for every AWS region.

Valid Values:

" + "documentation":"

The name of the database engine to be used for this instance.

Not every database engine is available for every AWS Region.

Valid Values:

" }, "MasterUsername":{ "shape":"String", - "documentation":"

The name for the master database user.

Amazon Aurora

Not applicable. You specify the name for the master database user when you create your DB cluster.

MariaDB

Constraints:

Microsoft SQL Server

Constraints:

MySQL

Constraints:

Oracle

Constraints:

PostgreSQL

Constraints:

" + "documentation":"

The name for the master user.

Amazon Aurora

Not applicable. The name for the master user is managed by the DB cluster. For more information, see CreateDBCluster.

MariaDB

Constraints:

Microsoft SQL Server

Constraints:

MySQL

Constraints:

Oracle

Constraints:

PostgreSQL

Constraints:

" }, "MasterUserPassword":{ "shape":"String", - "documentation":"

The password for the master database user. Can be any printable ASCII character except \"/\", \"\"\", or \"@\".

Amazon Aurora

Not applicable. You specify the password for the master database user when you create your DB cluster.

MariaDB

Constraints: Must contain from 8 to 41 characters.

Microsoft SQL Server

Constraints: Must contain from 8 to 128 characters.

MySQL

Constraints: Must contain from 8 to 41 characters.

Oracle

Constraints: Must contain from 8 to 30 characters.

PostgreSQL

Constraints: Must contain from 8 to 128 characters.

" + "documentation":"

The password for the master user. Can be any printable ASCII character except \"/\", \"\"\", or \"@\".

Amazon Aurora

Not applicable. The password for the master user is managed by the DB cluster. For more information, see CreateDBCluster.

MariaDB

Constraints: Must contain from 8 to 41 characters.

Microsoft SQL Server

Constraints: Must contain from 8 to 128 characters.

MySQL

Constraints: Must contain from 8 to 41 characters.

Oracle

Constraints: Must contain from 8 to 30 characters.

PostgreSQL

Constraints: Must contain from 8 to 128 characters.

" }, "DBSecurityGroups":{ "shape":"DBSecurityGroupNameList", @@ -2317,11 +2317,11 @@ }, "VpcSecurityGroupIds":{ "shape":"VpcSecurityGroupIdList", - "documentation":"

A list of EC2 VPC security groups to associate with this DB instance.

Default: The default EC2 VPC security group for the DB subnet group's VPC.

" + "documentation":"

A list of EC2 VPC security groups to associate with this DB instance.

Amazon Aurora

Not applicable. The associated list of EC2 VPC security groups is managed by the DB cluster. For more information, see CreateDBCluster.

Default: The default EC2 VPC security group for the DB subnet group's VPC.

" }, "AvailabilityZone":{ "shape":"String", - "documentation":"

The EC2 Availability Zone that the database instance will be created in. For information on regions and Availability Zones, see Regions and Availability Zones.

Default: A random, system-chosen Availability Zone in the endpoint's region.

Example: us-east-1d

Constraint: The AvailabilityZone parameter cannot be specified if the MultiAZ parameter is set to true. The specified Availability Zone must be in the same region as the current endpoint.

" + "documentation":"

The EC2 Availability Zone that the database instance will be created in. For information on regions and Availability Zones, see Regions and Availability Zones.

Default: A random, system-chosen Availability Zone in the endpoint's AWS Region.

Example: us-east-1d

Constraint: The AvailabilityZone parameter cannot be specified if the MultiAZ parameter is set to true. The specified Availability Zone must be in the same AWS Region as the current endpoint.

" }, "DBSubnetGroupName":{ "shape":"String", @@ -2329,7 +2329,7 @@ }, "PreferredMaintenanceWindow":{ "shape":"String", - "documentation":"

The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC). For more information, see DB Instance Maintenance.

Format: ddd:hh24:mi-ddd:hh24:mi

Default: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun

Constraints: Minimum 30-minute window.

" + "documentation":"

The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC). For more information, see DB Instance Maintenance.

Format: ddd:hh24:mi-ddd:hh24:mi

Default: A 30-minute window selected at random from an 8-hour block of time per AWS Region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun

Constraints: Minimum 30-minute window.

" }, "DBParameterGroupName":{ "shape":"String", @@ -2337,11 +2337,11 @@ }, "BackupRetentionPeriod":{ "shape":"IntegerOptional", - "documentation":"

The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

Default: 1

Constraints:

" + "documentation":"

The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

Amazon Aurora

Not applicable. The retention period for automated backups is managed by the DB cluster. For more information, see CreateDBCluster.

Default: 1

Constraints:

" }, "PreferredBackupWindow":{ "shape":"String", - "documentation":"

The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter. For more information, see DB Instance Backups.

Default: A 30-minute window selected at random from an 8-hour block of time per region. To see the time blocks available, see Adjusting the Preferred DB Instance Maintenance Window.

Constraints:

" + "documentation":"

The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter. For more information, see DB Instance Backups.

Amazon Aurora

Not applicable. The daily time range for creating automated backups is managed by the DB cluster. For more information, see CreateDBCluster.

Default: A 30-minute window selected at random from an 8-hour block of time per AWS Region. To see the time blocks available, see Adjusting the Preferred DB Instance Maintenance Window.

Constraints:

" }, "Port":{ "shape":"IntegerOptional", @@ -2353,7 +2353,7 @@ }, "EngineVersion":{ "shape":"String", - "documentation":"

The version number of the database engine to use.

The following are the database engines and major and minor versions that are available with Amazon RDS. Not every database engine is available for every AWS region.

Amazon Aurora

MariaDB

Microsoft SQL Server 2016

Microsoft SQL Server 2014

Microsoft SQL Server 2012

Microsoft SQL Server 2008 R2

MySQL

Oracle 12c

Oracle 11g

PostgreSQL

" + "documentation":"

The version number of the database engine to use.

The following are the database engines and major and minor versions that are available with Amazon RDS. Not every database engine is available for every AWS Region.

Amazon Aurora

Not applicable. The version number of the database engine to be used by the DB instance is managed by the DB cluster. For more information, see CreateDBCluster.

MariaDB

Microsoft SQL Server 2016

Microsoft SQL Server 2014

Microsoft SQL Server 2012

Microsoft SQL Server 2008 R2

MySQL

Oracle 12c

Oracle 11g

PostgreSQL

" }, "AutoMinorVersionUpgrade":{ "shape":"BooleanOptional", @@ -2373,7 +2373,7 @@ }, "CharacterSetName":{ "shape":"String", - "documentation":"

For supported engines, indicates that the DB instance should be associated with the specified CharacterSet.

" + "documentation":"

For supported engines, indicates that the DB instance should be associated with the specified CharacterSet.

Amazon Aurora

Not applicable. The character set is managed by the DB cluster. For more information, see CreateDBCluster.

" }, "PubliclyAccessible":{ "shape":"BooleanOptional", @@ -2398,11 +2398,11 @@ }, "StorageEncrypted":{ "shape":"BooleanOptional", - "documentation":"

Specifies whether the DB instance is encrypted.

Default: false

" + "documentation":"

Specifies whether the DB instance is encrypted.

Amazon Aurora

Not applicable. The encryption for DB instances is managed by the DB cluster. For more information, see CreateDBCluster.

Default: false

" }, "KmsKeyId":{ "shape":"String", - "documentation":"

The KMS key identifier for an encrypted DB instance.

The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are creating a DB instance with the same AWS account that owns the KMS encryption key used to encrypt the new DB instance, then you can use the KMS key alias instead of the ARN for the KM encryption key.

If the StorageEncrypted parameter is true, and you do not specify a value for the KmsKeyId parameter, then Amazon RDS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region.

" + "documentation":"

The KMS key identifier for an encrypted DB instance.

The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are creating a DB instance with the same AWS account that owns the KMS encryption key used to encrypt the new DB instance, then you can use the KMS key alias instead of the ARN for the KM encryption key.

Amazon Aurora

Not applicable. The KMS key identifier is managed by the DB cluster. For more information, see CreateDBCluster.

If the StorageEncrypted parameter is true, and you do not specify a value for the KmsKeyId parameter, then Amazon RDS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.

" }, "Domain":{ "shape":"String", @@ -2434,7 +2434,7 @@ }, "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", - "documentation":"

True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts; otherwise false.

You can enable IAM database authentication for the following database engines:

Default: false

" + "documentation":"

True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts; otherwise false.

You can enable IAM database authentication for the following database engines:

Amazon Aurora

Not applicable. Mapping AWS IAM accounts to database accounts is managed by the DB cluster. For more information, see CreateDBCluster.

MySQL

Default: false

" } }, "documentation":"

" @@ -2452,7 +2452,7 @@ }, "SourceDBInstanceIdentifier":{ "shape":"String", - "documentation":"

The identifier of the DB instance that will act as the source for the Read Replica. Each DB instance can have up to five Read Replicas.

Constraints:

" + "documentation":"

The identifier of the DB instance that will act as the source for the Read Replica. Each DB instance can have up to five Read Replicas.

Constraints:

" }, "DBInstanceClass":{ "shape":"String", @@ -2460,7 +2460,7 @@ }, "AvailabilityZone":{ "shape":"String", - "documentation":"

The Amazon EC2 Availability Zone that the Read Replica will be created in.

Default: A random, system-chosen Availability Zone in the endpoint's region.

Example: us-east-1d

" + "documentation":"

The Amazon EC2 Availability Zone that the Read Replica will be created in.

Default: A random, system-chosen Availability Zone in the endpoint's AWS Region.

Example: us-east-1d

" }, "Port":{ "shape":"IntegerOptional", @@ -2485,7 +2485,7 @@ "Tags":{"shape":"TagList"}, "DBSubnetGroupName":{ "shape":"String", - "documentation":"

Specifies a DB subnet group for the DB instance. The new DB instance will be created in the VPC associated with the DB subnet group. If no DB subnet group is specified, then the new DB instance is not created in a VPC.

Constraints:

Constraints: Must contain no more than 255 alphanumeric characters, periods, underscores, spaces, or hyphens. Must not be default.

Example: mySubnetgroup

" + "documentation":"

Specifies a DB subnet group for the DB instance. The new DB instance will be created in the VPC associated with the DB subnet group. If no DB subnet group is specified, then the new DB instance is not created in a VPC.

Constraints:

Constraints: Must contain no more than 255 alphanumeric characters, periods, underscores, spaces, or hyphens. Must not be default.

Example: mySubnetgroup

" }, "StorageType":{ "shape":"String", @@ -2505,11 +2505,11 @@ }, "KmsKeyId":{ "shape":"String", - "documentation":"

The AWS KMS key ID for an encrypted Read Replica. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.

If you create an unencrypted Read Replica and specify a value for the KmsKeyId parameter, Amazon RDS encrypts the target Read Replica using the specified KMS encryption key.

If you create an encrypted Read Replica from your AWS account, you can specify a value for KmsKeyId to encrypt the Read Replica with a new KMS encryption key. If you don't specify a value for KmsKeyId, then the Read Replica is encrypted with the same KMS key as the source DB instance.

If you create an encrypted Read Replica in a different AWS region, then you must specify a KMS key for the destination AWS region. KMS encryption keys are specific to the region that they are created in, and you cannot use encryption keys from one region in another region.

" + "documentation":"

The AWS KMS key ID for an encrypted Read Replica. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.

If you specify this parameter when you create a Read Replica from an unencrypted DB instance, the Read Replica is encrypted.

If you create an encrypted Read Replica in the same AWS Region as the source DB instance, then you do not have to specify a value for this parameter. The Read Replica is encrypted with the same KMS key as the source DB instance.

If you create an encrypted Read Replica in a different AWS Region, then you must specify a KMS key for the destination AWS Region. KMS encryption keys are specific to the AWS Region that they are created in, and you cannot use encryption keys from one AWS Region in another AWS Region.

" }, "PreSignedUrl":{ "shape":"String", - "documentation":"

The URL that contains a Signature Version 4 signed request for the CreateDBInstanceReadReplica API action in the AWS region that contains the source DB instance. The PreSignedUrl parameter must be used when encrypting a Read Replica from another AWS region.

The presigned URL must be a valid request for the CreateDBInstanceReadReplica API action that can be executed in the source region that contains the encrypted DB instance. The presigned URL request must contain the following parameter values:

To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process.

" + "documentation":"

The URL that contains a Signature Version 4 signed request for the CreateDBInstanceReadReplica API action in the source AWS Region that contains the source DB instance.

You must specify this parameter when you create an encrypted Read Replica from another AWS Region by using the Amazon RDS API. You can specify the source region option instead of this parameter when you create an encrypted Read Replica from another AWS Region by using the AWS CLI.

The presigned URL must be a valid request for the CreateDBInstanceReadReplica API action that can be executed in the source AWS Region that contains the encrypted source DB instance. The presigned URL request must contain the following parameter values:

To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process.

" }, "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", @@ -3957,11 +3957,11 @@ }, "SourceRegion":{ "shape":"String", - "documentation":"

The region that the DB snapshot was created in or copied from.

" + "documentation":"

The AWS Region that the DB snapshot was created in or copied from.

" }, "SourceDBSnapshotIdentifier":{ "shape":"String", - "documentation":"

The DB snapshot Arn that the DB snapshot was copied from. It only has value in case of cross customer or cross region copy.

" + "documentation":"

The DB snapshot Amazon Resource Name (ARN) that the DB snapshot was copied from. It only has value in case of cross-customer or cross-region copy.

" }, "StorageType":{ "shape":"String", @@ -5154,7 +5154,7 @@ "members":{ "RegionName":{ "shape":"String", - "documentation":"

The source region name. For example, us-east-1.

Constraints:

" + "documentation":"

The source AWS Region name. For example, us-east-1.

Constraints:

" }, "MaxRecords":{ "shape":"IntegerOptional", @@ -5871,11 +5871,11 @@ }, "PreferredBackupWindow":{ "shape":"String", - "documentation":"

The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter.

Default: A 30-minute window selected at random from an 8-hour block of time per region. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

Constraints:

" + "documentation":"

The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter.

Default: A 30-minute window selected at random from an 8-hour block of time per AWS Region. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

Constraints:

" }, "PreferredMaintenanceWindow":{ "shape":"String", - "documentation":"

The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

Format: ddd:hh24:mi-ddd:hh24:mi

Default: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun

Constraints: Minimum 30-minute window.

" + "documentation":"

The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

Format: ddd:hh24:mi-ddd:hh24:mi

Default: A 30-minute window selected at random from an 8-hour block of time per AWS Region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun

Constraints: Minimum 30-minute window.

" }, "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", @@ -5966,7 +5966,7 @@ }, "VpcSecurityGroupIds":{ "shape":"VpcSecurityGroupIdList", - "documentation":"

A list of EC2 VPC security groups to authorize on this DB instance. This change is asynchronously applied as soon as possible.

Constraints:

" + "documentation":"

A list of EC2 VPC security groups to authorize on this DB instance. This change is asynchronously applied as soon as possible.

Amazon Aurora

Not applicable. The associated list of EC2 VPC security groups is managed by the DB cluster. For more information, see ModifyDBCluster.

Constraints:

" }, "ApplyImmediately":{ "shape":"Boolean", @@ -5974,7 +5974,7 @@ }, "MasterUserPassword":{ "shape":"String", - "documentation":"

The new password for the DB instance master user. Can be any printable ASCII character except \"/\", \"\"\", or \"@\".

Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword element exists in the PendingModifiedValues element of the operation response.

Default: Uses existing setting

Constraints: Must be 8 to 41 alphanumeric characters (MySQL, MariaDB, and Amazon Aurora), 8 to 30 alphanumeric characters (Oracle), or 8 to 128 alphanumeric characters (SQL Server).

Amazon RDS API actions never return the password, so this action provides a way to regain access to a primary instance user if the password is lost. This includes restoring privileges that might have been accidentally revoked.

" + "documentation":"

The new password for the master user. Can be any printable ASCII character except \"/\", \"\"\", or \"@\".

Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword element exists in the PendingModifiedValues element of the operation response.

Amazon Aurora

Not applicable. The password for the master user is managed by the DB cluster. For more information, see ModifyDBCluster.

Default: Uses existing setting

Constraints: Must be 8 to 41 alphanumeric characters (MySQL, MariaDB, and Amazon Aurora), 8 to 30 alphanumeric characters (Oracle), or 8 to 128 alphanumeric characters (SQL Server).

Amazon RDS API actions never return the password, so this action provides a way to regain access to a primary instance user if the password is lost. This includes restoring privileges that might have been accidentally revoked.

" }, "DBParameterGroupName":{ "shape":"String", @@ -5982,11 +5982,11 @@ }, "BackupRetentionPeriod":{ "shape":"IntegerOptional", - "documentation":"

The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

Changing this parameter can result in an outage if you change from 0 to a non-zero value or from a non-zero value to 0. These changes are applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If you change the parameter from one non-zero value to another non-zero value, the change is asynchronously applied as soon as possible.

Default: Uses existing setting

Constraints:

" + "documentation":"

The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

Changing this parameter can result in an outage if you change from 0 to a non-zero value or from a non-zero value to 0. These changes are applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request. If you change the parameter from one non-zero value to another non-zero value, the change is asynchronously applied as soon as possible.

Amazon Aurora

Not applicable. The retention period for automated backups is managed by the DB cluster. For more information, see ModifyDBCluster.

Default: Uses existing setting

Constraints:

" }, "PreferredBackupWindow":{ "shape":"String", - "documentation":"

The daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod parameter. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.

Constraints:

" + "documentation":"

The daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod parameter. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.

Amazon Aurora

Not applicable. The daily time range for creating automated backups is managed by the DB cluster. For more information, see ModifyDBCluster.

Constraints:

" }, "PreferredMaintenanceWindow":{ "shape":"String", @@ -5998,7 +5998,7 @@ }, "EngineVersion":{ "shape":"String", - "documentation":"

The version number of the database engine to upgrade to. Changing this parameter results in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

For major version upgrades, if a non-default DB parameter group is currently in use, a new DB parameter group in the DB parameter group family for the new engine version must be specified. The new DB parameter group can be the default for that DB parameter group family.

For a list of valid engine versions, see CreateDBInstance.

" + "documentation":"

The version number of the database engine to upgrade to. Changing this parameter results in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true for this request.

For major version upgrades, if a nondefault DB parameter group is currently in use, a new DB parameter group in the DB parameter group family for the new engine version must be specified. The new DB parameter group can be the default for that DB parameter group family.

For a list of valid engine versions, see CreateDBInstance.

" }, "AllowMajorVersionUpgrade":{ "shape":"Boolean", @@ -6074,7 +6074,7 @@ }, "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", - "documentation":"

True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts; otherwise false.

You can enable IAM database authentication for the following database engines

Default: false

" + "documentation":"

True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts; otherwise false.

You can enable IAM database authentication for the following database engines

Amazon Aurora

Not applicable. Mapping AWS IAM accounts to database accounts is managed by the DB cluster. For more information, see ModifyDBCluster.

MySQL

Default: false

" } }, "documentation":"

" @@ -6950,7 +6950,7 @@ }, "PreferredBackupWindow":{ "shape":"String", - "documentation":"

The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter.

Default: A 30-minute window selected at random from an 8-hour block of time per region. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

Constraints:

" + "documentation":"

The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter.

Default: A 30-minute window selected at random from an 8-hour block of time per AWS Region. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

Constraints:

" } }, "documentation":"

" @@ -7462,11 +7462,11 @@ }, "PreferredBackupWindow":{ "shape":"String", - "documentation":"

The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter.

Default: A 30-minute window selected at random from an 8-hour block of time per region. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

Constraints:

" + "documentation":"

The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter.

Default: A 30-minute window selected at random from an 8-hour block of time per AWS Region. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

Constraints:

" }, "PreferredMaintenanceWindow":{ "shape":"String", - "documentation":"

The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

Format: ddd:hh24:mi-ddd:hh24:mi

Default: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun

Constraints: Minimum 30-minute window.

" + "documentation":"

The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

Format: ddd:hh24:mi-ddd:hh24:mi

Default: A 30-minute window selected at random from an 8-hour block of time per AWS Region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun

Constraints: Minimum 30-minute window.

" }, "Tags":{"shape":"TagList"}, "StorageEncrypted":{ @@ -7475,7 +7475,7 @@ }, "KmsKeyId":{ "shape":"String", - "documentation":"

The KMS key identifier for an encrypted DB cluster.

The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are creating a DB cluster with the same AWS account that owns the KMS encryption key used to encrypt the new DB cluster, then you can use the KMS key alias instead of the ARN for the KM encryption key.

If the StorageEncrypted parameter is true, and you do not specify a value for the KmsKeyId parameter, then Amazon RDS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region.

" + "documentation":"

The KMS key identifier for an encrypted DB cluster.

The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are creating a DB cluster with the same AWS account that owns the KMS encryption key used to encrypt the new DB cluster, then you can use the KMS key alias instead of the ARN for the KM encryption key.

If the StorageEncrypted parameter is true, and you do not specify a value for the KmsKeyId parameter, then Amazon RDS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.

" }, "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", @@ -7648,7 +7648,7 @@ "members":{ "DBInstanceIdentifier":{ "shape":"String", - "documentation":"

Name of the DB instance to create from the DB snapshot. This parameter isn't case-sensitive.

Constraints:

Example: my-snapshot-id

" + "documentation":"

Name of the DB instance to create from the DB snapshot. This parameter isn't case-sensitive.

Constraints:

Example: my-snapshot-id

" }, "DBSnapshotIdentifier":{ "shape":"String", @@ -7966,15 +7966,15 @@ "members":{ "RegionName":{ "shape":"String", - "documentation":"

The source region name.

" + "documentation":"

The name of the source AWS Region.

" }, "Endpoint":{ "shape":"String", - "documentation":"

The source region endpoint.

" + "documentation":"

The endpoint for the source AWS Region endpoint.

" }, "Status":{ "shape":"String", - "documentation":"

The status of the source region.

" + "documentation":"

The status of the source AWS Region.

" } }, "documentation":"

Contains an AWS Region name as the result of a successful call to the DescribeSourceRegions action.

" @@ -7995,7 +7995,7 @@ }, "SourceRegions":{ "shape":"SourceRegionList", - "documentation":"

A list of SourceRegion instances that contains each source AWS Region that the current region can get a Read Replica or a DB snapshot from.

" + "documentation":"

A list of SourceRegion instances that contains each source AWS Region that the current AWS Region can get a Read Replica or a DB snapshot from.

" } }, "documentation":"

Contains the result of a successful invocation of the DescribeSourceRegions action.

" diff --git a/botocore/data/route53/2013-04-01/service-2.json b/botocore/data/route53/2013-04-01/service-2.json index e277beaf..7e2c8d79 100644 --- a/botocore/data/route53/2013-04-01/service-2.json +++ b/botocore/data/route53/2013-04-01/service-2.json @@ -2328,6 +2328,7 @@ } }, "documentation":"

This error code is not in use.

", + "deprecated":true, "error":{"httpStatusCode":400}, "exception":true }, @@ -2889,7 +2890,7 @@ }, "StartRecordType":{ "shape":"RRType", - "documentation":"

The type of resource record set to begin the record listing from.

Valid values for basic resource record sets: A | AAAA | CNAME | MX | NAPTR | NS | PTR | SOA | SPF | SRV | TXT

Values for weighted, latency, geo, and failover resource record sets: A | AAAA | CNAME | MX | NAPTR | PTR | SPF | SRV | TXT

Values for alias resource record sets:

Constraint: Specifying type without specifying name returns an InvalidInput error.

", + "documentation":"

The type of resource record set to begin the record listing from.

Valid values for basic resource record sets: A | AAAA | CAA | CNAME | MX | NAPTR | NS | PTR | SOA | SPF | SRV | TXT

Values for weighted, latency, geo, and failover resource record sets: A | AAAA | CAA | CNAME | MX | NAPTR | PTR | SPF | SRV | TXT

Values for alias resource record sets:

Constraint: Specifying type without specifying name returns an InvalidInput error.

", "location":"querystring", "locationName":"type" }, @@ -3586,7 +3587,8 @@ "PTR", "SRV", "SPF", - "AAAA" + "AAAA", + "CAA" ] }, "RecordData":{ @@ -3643,7 +3645,7 @@ }, "Type":{ "shape":"RRType", - "documentation":"

The DNS record type. For information about different record types and how data is encoded for them, see Supported DNS Resource Record Types in the Amazon Route 53 Developer Guide.

Valid values for basic resource record sets: A | AAAA | CNAME | MX | NAPTR | NS | PTR | SOA | SPF | SRV | TXT

Values for weighted, latency, geolocation, and failover resource record sets: A | AAAA | CNAME | MX | NAPTR | PTR | SPF | SRV | TXT. When creating a group of weighted, latency, geolocation, or failover resource record sets, specify the same value for all of the resource record sets in the group.

Valid values for multivalue answer resource record sets: A | AAAA | MX | NAPTR | PTR | SPF | SRV | TXT

SPF records were formerly used to verify the identity of the sender of email messages. However, we no longer recommend that you create resource record sets for which the value of Type is SPF. RFC 7208, Sender Policy Framework (SPF) for Authorizing Use of Domains in Email, Version 1, has been updated to say, \"...[I]ts existence and mechanism defined in [RFC4408] have led to some interoperability issues. Accordingly, its use is no longer appropriate for SPF version 1; implementations are not to use it.\" In RFC 7208, see section 14.1, The SPF DNS Record Type.

Values for alias resource record sets:

" + "documentation":"

The DNS record type. For information about different record types and how data is encoded for them, see Supported DNS Resource Record Types in the Amazon Route 53 Developer Guide.

Valid values for basic resource record sets: A | AAAA | CAA | CNAME | MX | NAPTR | NS | PTR | SOA | SPF | SRV | TXT

Values for weighted, latency, geolocation, and failover resource record sets: A | AAAA | CAA | CNAME | MX | NAPTR | PTR | SPF | SRV | TXT. When creating a group of weighted, latency, geolocation, or failover resource record sets, specify the same value for all of the resource record sets in the group.

Valid values for multivalue answer resource record sets: A | AAAA | MX | NAPTR | PTR | SPF | SRV | TXT

SPF records were formerly used to verify the identity of the sender of email messages. However, we no longer recommend that you create resource record sets for which the value of Type is SPF. RFC 7208, Sender Policy Framework (SPF) for Authorizing Use of Domains in Email, Version 1, has been updated to say, \"...[I]ts existence and mechanism defined in [RFC4408] have led to some interoperability issues. Accordingly, its use is no longer appropriate for SPF version 1; implementations are not to use it.\" In RFC 7208, see section 14.1, The SPF DNS Record Type.

Values for alias resource record sets:

" }, "SetIdentifier":{ "shape":"ResourceRecordSetIdentifier", diff --git a/botocore/data/ses/2010-12-01/service-2.json b/botocore/data/ses/2010-12-01/service-2.json index a9ec298f..39a3b41a 100644 --- a/botocore/data/ses/2010-12-01/service-2.json +++ b/botocore/data/ses/2010-12-01/service-2.json @@ -1,7 +1,6 @@ { "version":"2.0", "metadata":{ - "uid":"email-2010-12-01", "apiVersion":"2010-12-01", "endpointPrefix":"email", "protocol":"query", @@ -9,6 +8,7 @@ "serviceFullName":"Amazon Simple Email Service", "signatureVersion":"v4", "signingName":"ses", + "uid":"email-2010-12-01", "xmlNamespace":"http://ses.amazonaws.com/doc/2010-12-01/" }, "operations":{ @@ -64,9 +64,10 @@ {"shape":"EventDestinationAlreadyExistsException"}, {"shape":"InvalidCloudWatchDestinationException"}, {"shape":"InvalidFirehoseDestinationException"}, + {"shape":"InvalidSNSDestinationException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Creates a configuration set event destination.

When you create or update an event destination, you must provide one, and only one, destination. The destination can be either Amazon CloudWatch or Amazon Kinesis Firehose.

An event destination is the AWS service to which Amazon SES publishes the email sending events associated with a configuration set. For information about using configuration sets, see the Amazon SES Developer Guide.

This action is throttled at one request per second.

" + "documentation":"

Creates a configuration set event destination.

When you create or update an event destination, you must provide one, and only one, destination. The destination can be Amazon CloudWatch, Amazon Kinesis Firehose, or Amazon Simple Notification Service (Amazon SNS).

An event destination is the AWS service to which Amazon SES publishes the email sending events associated with a configuration set. For information about using configuration sets, see the Amazon SES Developer Guide.

This action is throttled at one request per second.

" }, "CreateReceiptFilter":{ "name":"CreateReceiptFilter", @@ -362,7 +363,7 @@ "shape":"GetIdentityVerificationAttributesResponse", "resultWrapper":"GetIdentityVerificationAttributesResult" }, - "documentation":"

Given a list of identities (email addresses and/or domains), returns the verification status and (for domain identities) the verification token for each identity.

This action is throttled at one request per second and can only get verification attributes for up to 100 identities at a time.

" + "documentation":"

Given a list of identities (email addresses and/or domains), returns the verification status and (for domain identities) the verification token for each identity.

The verification status of an email address is \"Pending\" until the email address owner clicks the link within the verification email that Amazon SES sent to that address. If the email address owner clicks the link within 24 hours, the verification status of the email address changes to \"Success\". If the link is not clicked within 24 hours, the verification status changes to \"Failed.\" In that case, if you still want to verify the email address, you must restart the verification process from the beginning.

For domain identities, the domain's verification status is \"Pending\" as Amazon SES searches for the required TXT record in the DNS settings of the domain. When Amazon SES detects the record, the domain's verification status changes to \"Success\". If Amazon SES is unable to detect the record within 72 hours, the domain's verification status changes to \"Failed.\" In that case, if you still want to verify the domain, you must restart the verification process from the beginning.

This action is throttled at one request per second and can only get verification attributes for up to 100 identities at a time.

" }, "GetSendQuota":{ "name":"GetSendQuota", @@ -530,7 +531,7 @@ {"shape":"MailFromDomainNotVerifiedException"}, {"shape":"ConfigurationSetDoesNotExistException"} ], - "documentation":"

Composes an email message based on input data, and then immediately queues the message for sending.

There are several important points to know about SendEmail:

" + "documentation":"

Composes an email message based on input data, and then immediately queues the message for sending.

There are several important points to know about SendEmail:

" }, "SendRawEmail":{ "name":"SendRawEmail", @@ -548,7 +549,7 @@ {"shape":"MailFromDomainNotVerifiedException"}, {"shape":"ConfigurationSetDoesNotExistException"} ], - "documentation":"

Sends an email message, with header and content specified by the client. The SendRawEmail action is useful for sending multipart MIME emails. The raw text of the message must comply with Internet email standards; otherwise, the message cannot be sent.

There are several important points to know about SendRawEmail:

" + "documentation":"

Sends an email message, with header and content specified by the client. The SendRawEmail action is useful for sending multipart MIME emails. The raw text of the message must comply with Internet email standards; otherwise, the message cannot be sent.

There are several important points to know about SendRawEmail:

" }, "SetActiveReceiptRuleSet":{ "name":"SetActiveReceiptRuleSet", @@ -663,9 +664,10 @@ {"shape":"ConfigurationSetDoesNotExistException"}, {"shape":"EventDestinationDoesNotExistException"}, {"shape":"InvalidCloudWatchDestinationException"}, - {"shape":"InvalidFirehoseDestinationException"} + {"shape":"InvalidFirehoseDestinationException"}, + {"shape":"InvalidSNSDestinationException"} ], - "documentation":"

Updates the event destination of a configuration set.

When you create or update an event destination, you must provide one, and only one, destination. The destination can be either Amazon CloudWatch or Amazon Kinesis Firehose.

Event destinations are associated with configuration sets, which enable you to publish email sending events to Amazon CloudWatch or Amazon Kinesis Firehose. For information about using configuration sets, see the Amazon SES Developer Guide.

This action is throttled at one request per second.

" + "documentation":"

Updates the event destination of a configuration set.

When you create or update an event destination, you must provide one, and only one, destination. The destination can be Amazon CloudWatch, Amazon Kinesis Firehose, or Amazon Simple Notification Service (Amazon SNS).

Event destinations are associated with configuration sets, which enable you to publish email sending events to Amazon CloudWatch, Amazon Kinesis Firehose, or Amazon Simple Notification Service (Amazon SNS). For information about using configuration sets, see the Amazon SES Developer Guide.

This action is throttled at one request per second.

" }, "UpdateReceiptRule":{ "name":"UpdateReceiptRule", @@ -1402,7 +1404,8 @@ "type":"string", "enum":[ "messageTag", - "emailHeader" + "emailHeader", + "linkTag" ] }, "DkimAttributes":{ @@ -1449,9 +1452,13 @@ "CloudWatchDestination":{ "shape":"CloudWatchDestination", "documentation":"

An object that contains the names, default values, and sources of the dimensions associated with an Amazon CloudWatch event destination.

" + }, + "SNSDestination":{ + "shape":"SNSDestination", + "documentation":"

An object that contains the topic ARN associated with an Amazon Simple Notification Service (Amazon SNS) event destination.

" } }, - "documentation":"

Contains information about the event destination to which the specified email sending events are published.

When you create or update an event destination, you must provide one, and only one, destination. The destination can be either Amazon CloudWatch or Amazon Kinesis Firehose.

Event destinations are associated with configuration sets, which enable you to publish email sending events to Amazon CloudWatch or Amazon Kinesis Firehose. For information about using configuration sets, see the Amazon SES Developer Guide.

" + "documentation":"

Contains information about the event destination to which the specified email sending events are published.

When you create or update an event destination, you must provide one, and only one, destination. The destination can be Amazon CloudWatch, Amazon Kinesis Firehose or Amazon Simple Notification Service (Amazon SNS).

Event destinations are associated with configuration sets, which enable you to publish email sending events to Amazon CloudWatch, Amazon Kinesis Firehose, or Amazon Simple Notification Service (Amazon SNS). For information about using configuration sets, see the Amazon SES Developer Guide.

" }, "EventDestinationAlreadyExistsException":{ "type":"structure", @@ -1493,7 +1500,9 @@ "reject", "bounce", "complaint", - "delivery" + "delivery", + "open", + "click" ] }, "EventTypes":{ @@ -1862,6 +1871,26 @@ }, "exception":true }, + "InvalidSNSDestinationException":{ + "type":"structure", + "members":{ + "ConfigurationSetName":{ + "shape":"ConfigurationSetName", + "documentation":"

Indicates that the configuration set does not exist.

" + }, + "EventDestinationName":{ + "shape":"EventDestinationName", + "documentation":"

Indicates that the event destination does not exist.

" + } + }, + "documentation":"

Indicates that the Amazon Simple Notification Service (Amazon SNS) destination is invalid. See the error message for details.

", + "error":{ + "code":"InvalidSNSDestination", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "InvalidSnsTopicException":{ "type":"structure", "members":{ @@ -2229,7 +2258,7 @@ "members":{ "Data":{ "shape":"RawMessageData", - "documentation":"

The raw data of the message. The client must ensure that the message format complies with Internet email standards regarding email header fields, MIME types, MIME encoding, and base64 encoding.

The To:, CC:, and BCC: headers in the raw message can contain a group list.

If you are using SendRawEmail with sending authorization, you can include X-headers in the raw message to specify the \"Source,\" \"From,\" and \"Return-Path\" addresses. For more information, see the documentation for SendRawEmail.

Do not include these X-headers in the DKIM signature, because they are removed by Amazon SES before sending the email.

For more information, go to the Amazon SES Developer Guide.

" + "documentation":"

The raw data of the message. This data needs to base64-encoded if you are accessing Amazon SES directly through the HTTPS interface. If you are accessing Amazon SES using an AWS SDK, the SDK takes care of the base 64-encoding for you. In all cases, the client must ensure that the message format complies with Internet email standards regarding email header fields, MIME types, and MIME encoding.

The To:, CC:, and BCC: headers in the raw message can contain a group list.

If you are using SendRawEmail with sending authorization, you can include X-headers in the raw message to specify the \"Source,\" \"From,\" and \"Return-Path\" addresses. For more information, see the documentation for SendRawEmail.

Do not include these X-headers in the DKIM signature, because they are removed by Amazon SES before sending the email.

For more information, go to the Amazon SES Developer Guide.

" } }, "documentation":"

Represents the raw data of the message.

" @@ -2523,6 +2552,17 @@ "Base64" ] }, + "SNSDestination":{ + "type":"structure", + "required":["TopicARN"], + "members":{ + "TopicARN":{ + "shape":"AmazonResourceName", + "documentation":"

The ARN of the Amazon SNS topic to which you want to publish email sending events. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. For more information about Amazon SNS topics, see the Amazon SNS Developer Guide .

" + } + }, + "documentation":"

Contains the topic ARN associated with an Amazon Simple Notification Service (Amazon SNS) event destination.

Event destinations, such as Amazon SNS, are associated with configuration sets, which enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide.

" + }, "SendBounceRequest":{ "type":"structure", "required":[ @@ -2670,7 +2710,7 @@ }, "RawMessage":{ "shape":"RawMessage", - "documentation":"

The raw text of the message. The client is responsible for ensuring the following:

" + "documentation":"

The raw text of the message. The client is responsible for ensuring the following:

" }, "FromArn":{ "shape":"AmazonResourceName", @@ -3015,7 +3055,7 @@ "members":{ "VerificationToken":{ "shape":"VerificationToken", - "documentation":"

A TXT record that must be placed in the DNS settings for the domain, in order to complete domain verification.

" + "documentation":"

A TXT record that you must place in the DNS settings of the domain to complete domain verification with Amazon SES.

As Amazon SES searches for the TXT record, the domain's verification status is \"Pending\". When Amazon SES detects the record, the domain's verification status changes to \"Success\". If Amazon SES is unable to detect the record within 72 hours, the domain's verification status changes to \"Failed.\" In that case, if you still want to verify the domain, you must restart the verification process from the beginning.

" } }, "documentation":"

Returns a TXT record that you must publish to the DNS server of your domain to complete domain verification with Amazon SES.

" diff --git a/botocore/data/ssm/2014-11-06/service-2.json b/botocore/data/ssm/2014-11-06/service-2.json index 5055e338..f8de2f99 100644 --- a/botocore/data/ssm/2014-11-06/service-2.json +++ b/botocore/data/ssm/2014-11-06/service-2.json @@ -253,7 +253,7 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

Delete a list of parameters.

" + "documentation":"

Delete a list of parameters. This API is used to delete parameters by using the Amazon EC2 console.

" }, "DeletePatchBaseline":{ "name":"DeletePatchBaseline", @@ -321,7 +321,8 @@ "output":{"shape":"DeregisterTargetFromMaintenanceWindowResult"}, "errors":[ {"shape":"DoesNotExistException"}, - {"shape":"InternalServerError"} + {"shape":"InternalServerError"}, + {"shape":"TargetInUseException"} ], "documentation":"

Removes a target from a Maintenance Window.

" }, @@ -364,6 +365,7 @@ "output":{"shape":"DescribeAssociationResult"}, "errors":[ {"shape":"AssociationDoesNotExist"}, + {"shape":"InvalidAssociationVersion"}, {"shape":"InternalServerError"}, {"shape":"InvalidDocument"}, {"shape":"InvalidInstanceId"} @@ -574,7 +576,7 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

Lists the executions of a Maintenance Window (meaning, information about when the Maintenance Window was scheduled to be active and information about tasks registered and run with the Maintenance Window).

" + "documentation":"

Lists the executions of a Maintenance Window. This includes information about when the Maintenance Window was scheduled to be active, and information about tasks registered and run with the Maintenance Window.

" }, "DescribeMaintenanceWindowTargets":{ "name":"DescribeMaintenanceWindowTargets", @@ -632,7 +634,7 @@ {"shape":"InvalidFilterValue"}, {"shape":"InvalidNextToken"} ], - "documentation":"

Get information about a parameter.

" + "documentation":"

Get information about a parameter.

Request results are returned on a best-effort basis. If you specify MaxResults in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults. If the service reaches an internal limit while processing the results, it stops the operation and returns the matching values up to that point and a NextToken. You can specify the NextToken in a subsequent call to get the next set of results.

" }, "DescribePatchBaselines":{ "name":"DescribePatchBaselines", @@ -821,6 +823,34 @@ ], "documentation":"

Retrieves the details about a specific task executed as part of a Maintenance Window execution.

" }, + "GetMaintenanceWindowExecutionTaskInvocation":{ + "name":"GetMaintenanceWindowExecutionTaskInvocation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetMaintenanceWindowExecutionTaskInvocationRequest"}, + "output":{"shape":"GetMaintenanceWindowExecutionTaskInvocationResult"}, + "errors":[ + {"shape":"DoesNotExistException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Retrieves a task invocation. A task invocation is a specific task executing on a specific target. Maintenance Windows report status for all invocations.

" + }, + "GetMaintenanceWindowTask":{ + "name":"GetMaintenanceWindowTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetMaintenanceWindowTaskRequest"}, + "output":{"shape":"GetMaintenanceWindowTaskResult"}, + "errors":[ + {"shape":"DoesNotExistException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Lists the tasks in a Maintenance Window.

" + }, "GetParameter":{ "name":"GetParameter", "http":{ @@ -882,7 +912,7 @@ {"shape":"InvalidKeyId"}, {"shape":"InvalidNextToken"} ], - "documentation":"

Retrieve parameters in a specific hierarchy. For more information, see Working with Systems Manager Parameters.

" + "documentation":"

Retrieve parameters in a specific hierarchy. For more information, see Working with Systems Manager Parameters.

Request results are returned on a best-effort basis. If you specify MaxResults in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults. If the service reaches an internal limit while processing the results, it stops the operation and returns the matching values up to that point and a NextToken. You can specify the NextToken in a subsequent call to get the next set of results.

" }, "GetPatchBaseline":{ "name":"GetPatchBaseline", @@ -912,6 +942,21 @@ ], "documentation":"

Retrieves the patch baseline that should be used for the specified patch group.

" }, + "ListAssociationVersions":{ + "name":"ListAssociationVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAssociationVersionsRequest"}, + "output":{"shape":"ListAssociationVersionsResult"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"InvalidNextToken"}, + {"shape":"AssociationDoesNotExist"} + ], + "documentation":"

Retrieves all versions of an association for a specific association ID.

" + }, "ListAssociations":{ "name":"ListAssociations", "http":{ @@ -960,6 +1005,38 @@ ], "documentation":"

Lists the commands requested by users of the AWS account.

" }, + "ListComplianceItems":{ + "name":"ListComplianceItems", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListComplianceItemsRequest"}, + "output":{"shape":"ListComplianceItemsResult"}, + "errors":[ + {"shape":"InvalidResourceType"}, + {"shape":"InvalidResourceId"}, + {"shape":"InternalServerError"}, + {"shape":"InvalidFilter"}, + {"shape":"InvalidNextToken"} + ], + "documentation":"

For a specified resource ID, this API action returns a list of compliance statuses for different resource types. Currently, you can only specify one resource ID per call. List results depend on the criteria specified in the filter.

" + }, + "ListComplianceSummaries":{ + "name":"ListComplianceSummaries", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListComplianceSummariesRequest"}, + "output":{"shape":"ListComplianceSummariesResult"}, + "errors":[ + {"shape":"InvalidFilter"}, + {"shape":"InvalidNextToken"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Returns a summary count of compliant and non-compliant resources for a compliance type. For example, this call can return State Manager associations, patches, or custom compliance types according to the filter criteria that you specify.

" + }, "ListDocumentVersions":{ "name":"ListDocumentVersions", "http":{ @@ -1007,6 +1084,21 @@ ], "documentation":"

A list of inventory items returned by the request.

" }, + "ListResourceComplianceSummaries":{ + "name":"ListResourceComplianceSummaries", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListResourceComplianceSummariesRequest"}, + "output":{"shape":"ListResourceComplianceSummariesResult"}, + "errors":[ + {"shape":"InvalidFilter"}, + {"shape":"InvalidNextToken"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Returns a resource-level summary count. The summary includes information about compliant and non-compliant statuses and detailed compliance-item severity counts, according to the filter criteria you specify.

" + }, "ListResourceDataSync":{ "name":"ListResourceDataSync", "http":{ @@ -1053,6 +1145,25 @@ ], "documentation":"

Shares a Systems Manager document publicly or privately. If you share a document privately, you must specify the AWS user account IDs for those people who can use the document. If you share a document publicly, you must specify All as the account ID.

" }, + "PutComplianceItems":{ + "name":"PutComplianceItems", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutComplianceItemsRequest"}, + "output":{"shape":"PutComplianceItemsResult"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"InvalidItemContentException"}, + {"shape":"TotalSizeLimitExceededException"}, + {"shape":"ItemSizeLimitExceededException"}, + {"shape":"ComplianceTypeCountLimitExceededException"}, + {"shape":"InvalidResourceType"}, + {"shape":"InvalidResourceId"} + ], + "documentation":"

Registers a compliance type and other compliance details on a designated resource. This action lets you register custom compliance details with a resource. This call overwrites existing compliance information on the resource, so you must provide a full list of compliance items each time that you send the request.

" + }, "PutInventory":{ "name":"PutInventory", "http":{ @@ -1070,7 +1181,10 @@ {"shape":"ItemSizeLimitExceededException"}, {"shape":"ItemContentMismatchException"}, {"shape":"CustomSchemaCountLimitExceededException"}, - {"shape":"UnsupportedInventorySchemaVersionException"} + {"shape":"UnsupportedInventorySchemaVersionException"}, + {"shape":"UnsupportedInventoryItemContextException"}, + {"shape":"InvalidInventoryItemContextException"}, + {"shape":"SubTypeCountLimitExceededException"} ], "documentation":"

Bulk update custom inventory items on one more instance. The request adds an inventory item, if it doesn't already exist, or updates an inventory item, if it does exist.

" }, @@ -1156,6 +1270,7 @@ {"shape":"IdempotentParameterMismatch"}, {"shape":"DoesNotExistException"}, {"shape":"ResourceLimitExceededException"}, + {"shape":"FeatureNotAvailableException"}, {"shape":"InternalServerError"} ], "documentation":"

Adds a new task to a Maintenance Window.

" @@ -1175,6 +1290,21 @@ ], "documentation":"

Removes all tags from the specified resource.

" }, + "SendAutomationSignal":{ + "name":"SendAutomationSignal", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SendAutomationSignalRequest"}, + "output":{"shape":"SendAutomationSignalResult"}, + "errors":[ + {"shape":"AutomationExecutionNotFoundException"}, + {"shape":"InvalidAutomationSignalException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Sends a signal to an Automation execution to change the current behavior or status of the execution.

" + }, "SendCommand":{ "name":"SendCommand", "http":{ @@ -1246,9 +1376,11 @@ {"shape":"InvalidUpdate"}, {"shape":"TooManyUpdates"}, {"shape":"InvalidDocument"}, - {"shape":"InvalidTarget"} + {"shape":"InvalidTarget"}, + {"shape":"InvalidAssociationVersion"}, + {"shape":"AssociationVersionLimitExceeded"} ], - "documentation":"

Updates an association. You can only update the document version, schedule, parameters, and Amazon S3 output of an association.

" + "documentation":"

Updates an association. You can update the association name and version, the document version, schedule, parameters, and Amazon S3 output.

" }, "UpdateAssociationStatus":{ "name":"UpdateAssociationStatus", @@ -1318,6 +1450,34 @@ ], "documentation":"

Updates an existing Maintenance Window. Only specified parameters are modified.

" }, + "UpdateMaintenanceWindowTarget":{ + "name":"UpdateMaintenanceWindowTarget", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateMaintenanceWindowTargetRequest"}, + "output":{"shape":"UpdateMaintenanceWindowTargetResult"}, + "errors":[ + {"shape":"DoesNotExistException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Modifies the target of an existing Maintenance Window. You can't change the target type, but you can change the following:

The target from being an ID target to a Tag target, or a Tag target to an ID target.

IDs for an ID target.

Tags for a Tag target.

Owner.

Name.

Description.

If a parameter is null, then the corresponding field is not modified.

" + }, + "UpdateMaintenanceWindowTask":{ + "name":"UpdateMaintenanceWindowTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateMaintenanceWindowTaskRequest"}, + "output":{"shape":"UpdateMaintenanceWindowTaskResult"}, + "errors":[ + {"shape":"DoesNotExistException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Modifies a task assigned to a Maintenance Window. You can't change the task type, but you can change the following values:

Task ARN. For example, you can change a RUN_COMMAND task from AWS-RunPowerShellScript to AWS-RunShellScript.

Service role ARN.

Task parameters.

Task priority.

Task MaxConcurrency and MaxErrors.

Log location.

If a parameter is null, then the corresponding field is not modified. Also, if you set Replace to true, then all fields required by the RegisterTaskWithMaintenanceWindow action are required for this request. Optional fields that aren't specified are set to null.

" + }, "UpdateManagedInstanceRole":{ "name":"UpdateManagedInstanceRole", "http":{ @@ -1491,6 +1651,10 @@ "shape":"AssociationId", "documentation":"

The ID created by the system when you create an association. An association is a binding between a document and a set of targets with a schedule.

" }, + "AssociationVersion":{ + "shape":"AssociationVersion", + "documentation":"

The association version.

" + }, "DocumentVersion":{ "shape":"DocumentVersion", "documentation":"

The version of the document used in the association.

" @@ -1510,6 +1674,10 @@ "ScheduleExpression":{ "shape":"ScheduleExpression", "documentation":"

A cron expression that specifies a schedule when the association runs.

" + }, + "AssociationName":{ + "shape":"AssociationName", + "documentation":"

The association name.

" } }, "documentation":"

Describes an association of a Systems Manager document and an instance.

" @@ -1532,6 +1700,10 @@ "shape":"InstanceId", "documentation":"

The ID of the instance.

" }, + "AssociationVersion":{ + "shape":"AssociationVersion", + "documentation":"

The association version.

" + }, "Date":{ "shape":"DateTime", "documentation":"

The date when the association was made.

" @@ -1579,6 +1751,10 @@ "LastSuccessfulExecutionDate":{ "shape":"DateTime", "documentation":"

The last date on which the association was successfully run.

" + }, + "AssociationName":{ + "shape":"AssociationName", + "documentation":"

The association name.

" } }, "documentation":"

Describes the parameters for a document.

" @@ -1624,7 +1800,8 @@ "AssociationId", "AssociationStatusName", "LastExecutedBefore", - "LastExecutedAfter" + "LastExecutedAfter", + "AssociationName" ] }, "AssociationFilterList":{ @@ -1657,6 +1834,10 @@ "locationName":"Association" } }, + "AssociationName":{ + "type":"string", + "pattern":"^[a-zA-Z0-9_\\-.]{3,128}$" + }, "AssociationOverview":{ "type":"structure", "members":{ @@ -1715,6 +1896,69 @@ "Failed" ] }, + "AssociationVersion":{ + "type":"string", + "pattern":"([$]LATEST)|([1-9][0-9]*)" + }, + "AssociationVersionInfo":{ + "type":"structure", + "members":{ + "AssociationId":{ + "shape":"AssociationId", + "documentation":"

The ID created by the system when the association was created.

" + }, + "AssociationVersion":{ + "shape":"AssociationVersion", + "documentation":"

The association version.

" + }, + "CreatedDate":{ + "shape":"DateTime", + "documentation":"

The date the association version was created.

" + }, + "Name":{ + "shape":"DocumentName", + "documentation":"

The name specified when the association was created.

" + }, + "DocumentVersion":{ + "shape":"DocumentVersion", + "documentation":"

The version of an SSM document used when the association version was created.

" + }, + "Parameters":{ + "shape":"Parameters", + "documentation":"

Parameters specified when the association version was created.

" + }, + "Targets":{ + "shape":"Targets", + "documentation":"

The targets specified for the association when the association version was created.

" + }, + "ScheduleExpression":{ + "shape":"ScheduleExpression", + "documentation":"

The cron or rate schedule specified for the association when the association version was created.

" + }, + "OutputLocation":{ + "shape":"InstanceAssociationOutputLocation", + "documentation":"

The location in Amazon S3 specified for the association when the association version was created.

" + }, + "AssociationName":{ + "shape":"AssociationName", + "documentation":"

The name specified for the association version when the association version was created.

" + } + }, + "documentation":"

Information about the association version.

" + }, + "AssociationVersionLimitExceeded":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

You have reached the maximum number versions allowed for an association. Each association has a limit of 1,000 versions.

", + "exception":true + }, + "AssociationVersionList":{ + "type":"list", + "member":{"shape":"AssociationVersionInfo"}, + "min":1 + }, "AttributeName":{ "type":"string", "max":64, @@ -1722,7 +1966,7 @@ }, "AttributeValue":{ "type":"string", - "max":1024, + "max":4096, "min":0 }, "AutomationActionName":{ @@ -1907,6 +2151,7 @@ "enum":[ "Pending", "InProgress", + "Waiting", "Success", "TimedOut", "Cancelled", @@ -2287,6 +2532,278 @@ "max":100 }, "CompletedCount":{"type":"integer"}, + "ComplianceExecutionId":{ + "type":"string", + "max":100 + }, + "ComplianceExecutionSummary":{ + "type":"structure", + "required":["ExecutionTime"], + "members":{ + "ExecutionTime":{ + "shape":"DateTime", + "documentation":"

The time the execution ran as a datetime object that is saved in the following format: yyyy-MM-dd'T'HH:mm:ss'Z'.

" + }, + "ExecutionId":{ + "shape":"ComplianceExecutionId", + "documentation":"

An ID created by the system when PutComplianceItems was called. For example, CommandID is a valid execution ID. You can use this ID in subsequent calls.

" + }, + "ExecutionType":{ + "shape":"ComplianceExecutionType", + "documentation":"

The type of execution. For example, Command is a valid execution type.

" + } + }, + "documentation":"

A summary of the call execution that includes an execution ID, the type of execution (for example, Command), and the date/time of the execution using a datetime object that is saved in the following format: yyyy-MM-dd'T'HH:mm:ss'Z'.

" + }, + "ComplianceExecutionType":{ + "type":"string", + "max":50 + }, + "ComplianceFilterValue":{"type":"string"}, + "ComplianceItem":{ + "type":"structure", + "members":{ + "ComplianceType":{ + "shape":"ComplianceTypeName", + "documentation":"

The compliance type. For example, Association (for a State Manager association), Patch, or Custom:string are all valid compliance types.

" + }, + "ResourceType":{ + "shape":"ComplianceResourceType", + "documentation":"

The type of resource. ManagedInstance is currently the only supported resource type.

" + }, + "ResourceId":{ + "shape":"ComplianceResourceId", + "documentation":"

An ID for the resource. For a managed instance, this is the instance ID.

" + }, + "Id":{ + "shape":"ComplianceItemId", + "documentation":"

An ID for the compliance item. For example, if the compliance item is a Windows patch, the ID could be the number of the KB article. Here's an example: KB4010320.

" + }, + "Title":{ + "shape":"ComplianceItemTitle", + "documentation":"

A title for the compliance item. For example, if the compliance item is a Windows patch, the title could be the title of the KB article for the patch. Here's an example: Security Update for Active Directory Federation Services.

" + }, + "Status":{ + "shape":"ComplianceStatus", + "documentation":"

The status of the compliance item. An item is either COMPLIANT or NON_COMPLIANT.

" + }, + "Severity":{ + "shape":"ComplianceSeverity", + "documentation":"

The severity of the compliance status. Severity can be one of the following: Critical, High, Medium, Low, Informational, Unspecified.

" + }, + "ExecutionSummary":{ + "shape":"ComplianceExecutionSummary", + "documentation":"

A summary for the compliance item. The summary includes an execution ID, the execution type (for example, command), and the execution time.

" + }, + "Details":{ + "shape":"ComplianceItemDetails", + "documentation":"

A \"Key\": \"Value\" tag combination for the compliance item.

" + } + }, + "documentation":"

Information about the compliance as defined by the resource type. For example, for a patch resource type, Items includes information about the PatchSeverity, Classification, etc.

" + }, + "ComplianceItemContentHash":{ + "type":"string", + "max":256 + }, + "ComplianceItemDetails":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"AttributeValue"} + }, + "ComplianceItemEntry":{ + "type":"structure", + "required":[ + "Severity", + "Status" + ], + "members":{ + "Id":{ + "shape":"ComplianceItemId", + "documentation":"

The compliance item ID. For example, if the compliance item is a Windows patch, the ID could be the number of the KB article.

" + }, + "Title":{ + "shape":"ComplianceItemTitle", + "documentation":"

The title of the compliance item. For example, if the compliance item is a Windows patch, the title could be the title of the KB article for the patch. Here's an example: Security Update for Active Directory Federation Services.

" + }, + "Severity":{ + "shape":"ComplianceSeverity", + "documentation":"

The severity of the compliance status. Severity can be one of the following: Critical, High, Medium, Low, Informational, Unspecified.

" + }, + "Status":{ + "shape":"ComplianceStatus", + "documentation":"

The status of the compliance item. An item is either COMPLIANT or NON_COMPLIANT.

" + }, + "Details":{ + "shape":"ComplianceItemDetails", + "documentation":"

A \"Key\": \"Value\" tag combination for the compliance item.

" + } + }, + "documentation":"

Information about a compliance item.

" + }, + "ComplianceItemEntryList":{ + "type":"list", + "member":{"shape":"ComplianceItemEntry"}, + "max":10000, + "min":0 + }, + "ComplianceItemId":{ + "type":"string", + "max":100, + "min":1 + }, + "ComplianceItemList":{ + "type":"list", + "member":{ + "shape":"ComplianceItem", + "locationName":"Item" + } + }, + "ComplianceItemTitle":{ + "type":"string", + "max":500 + }, + "ComplianceQueryOperatorType":{ + "type":"string", + "enum":[ + "EQUAL", + "NOT_EQUAL", + "BEGIN_WITH", + "LESS_THAN", + "GREATER_THAN" + ] + }, + "ComplianceResourceId":{ + "type":"string", + "max":100, + "min":1 + }, + "ComplianceResourceIdList":{ + "type":"list", + "member":{"shape":"ComplianceResourceId"}, + "min":1 + }, + "ComplianceResourceType":{ + "type":"string", + "max":50, + "min":1 + }, + "ComplianceResourceTypeList":{ + "type":"list", + "member":{"shape":"ComplianceResourceType"}, + "min":1 + }, + "ComplianceSeverity":{ + "type":"string", + "enum":[ + "CRITICAL", + "HIGH", + "MEDIUM", + "LOW", + "INFORMATIONAL", + "UNSPECIFIED" + ] + }, + "ComplianceStatus":{ + "type":"string", + "enum":[ + "COMPLIANT", + "NON_COMPLIANT" + ] + }, + "ComplianceStringFilter":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"ComplianceStringFilterKey", + "documentation":"

The name of the filter.

" + }, + "Values":{ + "shape":"ComplianceStringFilterValueList", + "documentation":"

The value for which to search.

" + }, + "Type":{ + "shape":"ComplianceQueryOperatorType", + "documentation":"

The type of comparison that should be performed for the value: Equal, NotEqual, BeginWith, LessThan, or GreaterThan.

" + } + }, + "documentation":"

One or more filters. Use a filter to return a more specific list of results.

" + }, + "ComplianceStringFilterKey":{ + "type":"string", + "max":200, + "min":1 + }, + "ComplianceStringFilterList":{ + "type":"list", + "member":{ + "shape":"ComplianceStringFilter", + "locationName":"ComplianceFilter" + } + }, + "ComplianceStringFilterValueList":{ + "type":"list", + "member":{ + "shape":"ComplianceFilterValue", + "locationName":"FilterValue" + }, + "max":20, + "min":1 + }, + "ComplianceSummaryCount":{"type":"integer"}, + "ComplianceSummaryItem":{ + "type":"structure", + "members":{ + "ComplianceType":{ + "shape":"ComplianceTypeName", + "documentation":"

The type of compliance item. For example, the compliance type can be Association, Patch, or Custom:string.

" + }, + "CompliantSummary":{ + "shape":"CompliantSummary", + "documentation":"

A list of COMPLIANT items for the specified compliance type.

" + }, + "NonCompliantSummary":{ + "shape":"NonCompliantSummary", + "documentation":"

A list of NON_COMPLIANT items for the specified compliance type.

" + } + }, + "documentation":"

A summary of compliance information by compliance type.

" + }, + "ComplianceSummaryItemList":{ + "type":"list", + "member":{ + "shape":"ComplianceSummaryItem", + "locationName":"Item" + } + }, + "ComplianceTypeCountLimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

You specified too many custom compliance types. You can specify a maximum of 10 different types.

", + "exception":true + }, + "ComplianceTypeName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[A-Za-z0-9_\\-]\\w+|Custom:[a-zA-Z0-9_\\-]\\w+" + }, + "CompliantSummary":{ + "type":"structure", + "members":{ + "CompliantCount":{ + "shape":"ComplianceSummaryCount", + "documentation":"

The total number of resources that are compliant.

" + }, + "SeveritySummary":{ + "shape":"SeveritySummary", + "documentation":"

A summary of the compliance severity by compliance type.

" + } + }, + "documentation":"

A summary of resources that are compliant. The summary is organized according to the resource count for each compliance type.

" + }, "ComputerName":{ "type":"string", "max":255, @@ -2381,6 +2898,10 @@ "OutputLocation":{ "shape":"InstanceAssociationOutputLocation", "documentation":"

An Amazon S3 bucket where you want to store the results of this request.

" + }, + "AssociationName":{ + "shape":"AssociationName", + "documentation":"

Specify a descriptive name for the association.

" } }, "documentation":"

Describes the association of a Systems Manager document and an instance.

" @@ -2429,6 +2950,10 @@ "OutputLocation":{ "shape":"InstanceAssociationOutputLocation", "documentation":"

An Amazon S3 bucket where you want to store the output details of the request.

" + }, + "AssociationName":{ + "shape":"AssociationName", + "documentation":"

Specify a descriptive name for the association.

" } } }, @@ -2485,6 +3010,10 @@ "shape":"MaintenanceWindowName", "documentation":"

The name of the Maintenance Window.

" }, + "Description":{ + "shape":"MaintenanceWindowDescription", + "documentation":"

An optional description for the Maintenance Window. We recommend specifying a description to help you organize your Maintenance Windows.

" + }, "Schedule":{ "shape":"MaintenanceWindowSchedule", "documentation":"

The schedule of the Maintenance Window in the form of a cron or rate expression.

" @@ -2499,7 +3028,7 @@ }, "AllowUnassociatedTargets":{ "shape":"MaintenanceWindowAllowUnassociatedTargets", - "documentation":"

Whether targets must be registered with the Maintenance Window before tasks can be defined for those targets.

" + "documentation":"

Enables a Maintenance Window task to execute on managed instances, even if you have not registered those instances as targets. If enabled, then you must specify the unregistered instances (by instance ID) when you register a task with the Maintenance Window

If you don't enable this option, then you must specify previously-registered targets when you register a task with the Maintenance Window.

" }, "ClientToken":{ "shape":"ClientToken", @@ -2810,6 +3339,11 @@ "WindowTargetId":{ "shape":"MaintenanceWindowTargetId", "documentation":"

The ID of the target definition to remove.

" + }, + "Safe":{ + "shape":"Boolean", + "documentation":"

The system checks if the target is being referenced by a task. If the target is being referenced, the system returns an error and does not deregister the target from the Maintenance Window.

", + "box":true } } }, @@ -2927,6 +3461,10 @@ "AssociationId":{ "shape":"AssociationId", "documentation":"

The association ID for which you want information.

" + }, + "AssociationVersion":{ + "shape":"AssociationVersion", + "documentation":"

Specify the association version to retrieve. To view the latest version, either specify $LATEST for this parameter, or omit this parameter. To view a list of all associations for an instance, use ListInstanceAssociations. To get a list of versions for a specific association, use ListAssociationVersions.

" } } }, @@ -4051,6 +4589,14 @@ "Unknown" ] }, + "FeatureNotAvailableException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

You attempted to register a LAMBDA or STEP_FUNCTION task in a region where the corresponding service is not available.

", + "exception":true + }, "GetAutomationExecutionRequest":{ "type":"structure", "required":["AutomationExecutionId"], @@ -4306,6 +4852,11 @@ "shape":"GetInventorySchemaMaxResults", "documentation":"

The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.

", "box":true + }, + "SubType":{ + "shape":"IsSubTypeSchema", + "documentation":"

Returns the sub-type schema for a specified inventory type.

", + "box":true } } }, @@ -4361,6 +4912,81 @@ } } }, + "GetMaintenanceWindowExecutionTaskInvocationRequest":{ + "type":"structure", + "required":[ + "WindowExecutionId", + "TaskId", + "InvocationId" + ], + "members":{ + "WindowExecutionId":{ + "shape":"MaintenanceWindowExecutionId", + "documentation":"

The ID of the Maintenance Window execution for which the task is a part.

" + }, + "TaskId":{ + "shape":"MaintenanceWindowExecutionTaskId", + "documentation":"

The ID of the specific task in the Maintenance Window task that should be retrieved.

" + }, + "InvocationId":{ + "shape":"MaintenanceWindowExecutionTaskInvocationId", + "documentation":"

The invocation ID to retrieve.

" + } + } + }, + "GetMaintenanceWindowExecutionTaskInvocationResult":{ + "type":"structure", + "members":{ + "WindowExecutionId":{ + "shape":"MaintenanceWindowExecutionId", + "documentation":"

The Maintenance Window execution ID.

" + }, + "TaskExecutionId":{ + "shape":"MaintenanceWindowExecutionTaskId", + "documentation":"

The task execution ID.

" + }, + "InvocationId":{ + "shape":"MaintenanceWindowExecutionTaskInvocationId", + "documentation":"

The invocation ID.

" + }, + "ExecutionId":{ + "shape":"MaintenanceWindowExecutionTaskExecutionId", + "documentation":"

The execution ID.

" + }, + "TaskType":{ + "shape":"MaintenanceWindowTaskType", + "documentation":"

Retrieves the task type for a Maintenance Window. Task types include the following: LAMBDA, STEP_FUNCTION, AUTOMATION, RUN_COMMAND.

" + }, + "Parameters":{ + "shape":"MaintenanceWindowExecutionTaskInvocationParameters", + "documentation":"

The parameters used at the time that the task executed.

" + }, + "Status":{ + "shape":"MaintenanceWindowExecutionStatus", + "documentation":"

The task status for an invocation.

" + }, + "StatusDetails":{ + "shape":"MaintenanceWindowExecutionStatusDetails", + "documentation":"

The details explaining the status. Details are only available for certain status values.

" + }, + "StartTime":{ + "shape":"DateTime", + "documentation":"

The time that the task started executing on the target.

" + }, + "EndTime":{ + "shape":"DateTime", + "documentation":"

The time that the task finished executing on the target.

" + }, + "OwnerInformation":{ + "shape":"OwnerInformation", + "documentation":"

User-provided value to be included in any CloudWatch events raised while running tasks for these targets in this Maintenance Window.

" + }, + "WindowTargetId":{ + "shape":"MaintenanceWindowTaskTargetId", + "documentation":"

The Maintenance Window target ID.

" + } + } + }, "GetMaintenanceWindowExecutionTaskRequest":{ "type":"structure", "required":[ @@ -4456,6 +5082,10 @@ "shape":"MaintenanceWindowName", "documentation":"

The name of the Maintenance Window.

" }, + "Description":{ + "shape":"MaintenanceWindowDescription", + "documentation":"

The description of the Maintenance Window.

" + }, "Schedule":{ "shape":"MaintenanceWindowSchedule", "documentation":"

The schedule of the Maintenance Window in the form of a cron or rate expression.

" @@ -4486,6 +5116,84 @@ } } }, + "GetMaintenanceWindowTaskRequest":{ + "type":"structure", + "required":[ + "WindowId", + "WindowTaskId" + ], + "members":{ + "WindowId":{ + "shape":"MaintenanceWindowId", + "documentation":"

The Maintenance Window ID that includes the task to retrieve.

" + }, + "WindowTaskId":{ + "shape":"MaintenanceWindowTaskId", + "documentation":"

The Maintenance Window task ID to retrieve.

" + } + } + }, + "GetMaintenanceWindowTaskResult":{ + "type":"structure", + "members":{ + "WindowId":{ + "shape":"MaintenanceWindowId", + "documentation":"

The retrieved Maintenance Window ID.

" + }, + "WindowTaskId":{ + "shape":"MaintenanceWindowTaskId", + "documentation":"

The retrieved Maintenance Window task ID.

" + }, + "Targets":{ + "shape":"Targets", + "documentation":"

The targets where the task should execute.

" + }, + "TaskArn":{ + "shape":"MaintenanceWindowTaskArn", + "documentation":"

The resource that the task used during execution. For RUN_COMMAND and AUTOMATION task types, the TaskArn is the SSM Document name/ARN. For LAMBDA tasks, the value is the function name/ARN. For STEP_FUNCTION tasks, the value is the state machine ARN.

" + }, + "ServiceRoleArn":{ + "shape":"ServiceRole", + "documentation":"

The IAM service role to assume during task execution.

" + }, + "TaskType":{ + "shape":"MaintenanceWindowTaskType", + "documentation":"

The type of task to execute.

" + }, + "TaskParameters":{ + "shape":"MaintenanceWindowTaskParameters", + "documentation":"

The parameters to pass to the task when it executes.

" + }, + "TaskInvocationParameters":{ + "shape":"MaintenanceWindowTaskInvocationParameters", + "documentation":"

The parameters to pass to the task when it executes.

" + }, + "Priority":{ + "shape":"MaintenanceWindowTaskPriority", + "documentation":"

The priority of the task when it executes. The lower the number, the higher the priority. Tasks that have the same priority are scheduled in parallel.

" + }, + "MaxConcurrency":{ + "shape":"MaxConcurrency", + "documentation":"

The maximum number of targets allowed to run this task in parallel.

" + }, + "MaxErrors":{ + "shape":"MaxErrors", + "documentation":"

The maximum number of errors allowed before the task stops being scheduled.

" + }, + "LoggingInfo":{ + "shape":"LoggingInfo", + "documentation":"

The location in Amazon S3 where the task results are logged.

" + }, + "Name":{ + "shape":"MaintenanceWindowName", + "documentation":"

The retrieved task name.

" + }, + "Description":{ + "shape":"MaintenanceWindowDescription", + "documentation":"

The retrieved task description.

" + } + } + }, "GetParameterHistoryRequest":{ "type":"structure", "required":["Name"], @@ -4787,6 +5495,10 @@ "Content":{ "shape":"DocumentContent", "documentation":"

The content of the association document for the instance(s).

" + }, + "AssociationVersion":{ + "shape":"AssociationVersion", + "documentation":"

Version information for the association on the instance.

" } }, "documentation":"

One or more association documents on the instance.

" @@ -4840,6 +5552,10 @@ "shape":"DocumentVersion", "documentation":"

The association document verions.

" }, + "AssociationVersion":{ + "shape":"AssociationVersion", + "documentation":"

The version of the association applied to the instance.

" + }, "InstanceId":{ "shape":"InstanceId", "documentation":"

The instance ID where the association was created.

" @@ -4867,6 +5583,10 @@ "OutputUrl":{ "shape":"InstanceAssociationOutputUrl", "documentation":"

A URL for an Amazon S3 bucket where you want to store the results of this request.

" + }, + "AssociationName":{ + "shape":"AssociationName", + "documentation":"

The name of the association applied to the instance.

" } }, "documentation":"

Status information about the instance association.

" @@ -5226,6 +5946,14 @@ "documentation":"

The request does not meet the regular expression requirement.

", "exception":true }, + "InvalidAssociationVersion":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The version you specified is not valid. Use ListAssociationVersions to view all versions of an association according to the association ID. Or, use the $LATEST parameter to view the latest version of the association.

", + "exception":true + }, "InvalidAutomationExecutionParametersException":{ "type":"structure", "members":{ @@ -5234,6 +5962,14 @@ "documentation":"

The supplied parameters for invoking the specified Automation document are incorrect. For example, they may not match the set of parameters permitted for the specified Automation document.

", "exception":true }, + "InvalidAutomationSignalException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The signal is not valid for the current Automation execution.

", + "exception":true + }, "InvalidCommandId":{ "type":"structure", "members":{ @@ -5336,6 +6072,14 @@ "documentation":"

The specified filter value is not valid.

", "exception":true }, + "InvalidInventoryItemContextException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

You specified invalid keys or values in the Context attribute for InventoryItem. Verify the keys and values, and try again.

", + "exception":true + }, "InvalidItemContentException":{ "type":"structure", "members":{ @@ -5548,6 +6292,10 @@ "Content":{ "shape":"InventoryItemEntryList", "documentation":"

The inventory data of the inventory type.

" + }, + "Context":{ + "shape":"InventoryItemContentContext", + "documentation":"

A map of associated properties for a specified inventory type. For example, with this attribute, you can specify the ExecutionId, ExecutionType, ComplianceType properties of the AWS:ComplianceItem type.

" } }, "documentation":"

Information collected from managed instances based on your inventory policy document

" @@ -5584,6 +6332,13 @@ "type":"string", "pattern":"^(20)[0-9][0-9]-(0[1-9]|1[012])-([12][0-9]|3[01]|0[1-9])(T)(2[0-3]|[0-1][0-9])(:[0-5][0-9])(:[0-5][0-9])(Z)$" }, + "InventoryItemContentContext":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"AttributeValue"}, + "max":50, + "min":0 + }, "InventoryItemContentHash":{ "type":"string", "max":256 @@ -5731,6 +6486,7 @@ "type":"string", "max":2500 }, + "IsSubTypeSchema":{"type":"boolean"}, "ItemContentMismatchException":{ "type":"structure", "members":{ @@ -5763,6 +6519,38 @@ }, "LastResourceDataSyncTime":{"type":"timestamp"}, "LastSuccessfulResourceDataSyncTime":{"type":"timestamp"}, + "ListAssociationVersionsRequest":{ + "type":"structure", + "required":["AssociationId"], + "members":{ + "AssociationId":{ + "shape":"AssociationId", + "documentation":"

The association ID for which you want to view all versions.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.

", + "box":true + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token to start the list. Use this token to get the next set of results.

" + } + } + }, + "ListAssociationVersionsResult":{ + "type":"structure", + "members":{ + "AssociationVersions":{ + "shape":"AssociationVersionList", + "documentation":"

Information about all versions of the association for the specified association ID.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of items to return. Use this token to get the next set of results.

" + } + } + }, "ListAssociationsRequest":{ "type":"structure", "members":{ @@ -5876,6 +6664,76 @@ } } }, + "ListComplianceItemsRequest":{ + "type":"structure", + "members":{ + "Filters":{ + "shape":"ComplianceStringFilterList", + "documentation":"

One or more compliance filters. Use a filter to return a more specific list of results.

" + }, + "ResourceIds":{ + "shape":"ComplianceResourceIdList", + "documentation":"

The ID for the resources from which to get compliance information. Currently, you can only specify one resource ID.

" + }, + "ResourceTypes":{ + "shape":"ComplianceResourceTypeList", + "documentation":"

The type of resource from which to get compliance information. Currently, the only supported resource type is ManagedInstance.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token to start the list. Use this token to get the next set of results.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.

", + "box":true + } + } + }, + "ListComplianceItemsResult":{ + "type":"structure", + "members":{ + "ComplianceItems":{ + "shape":"ComplianceItemList", + "documentation":"

A list of compliance information for the specified resource ID.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of items to return. Use this token to get the next set of results.

" + } + } + }, + "ListComplianceSummariesRequest":{ + "type":"structure", + "members":{ + "Filters":{ + "shape":"ComplianceStringFilterList", + "documentation":"

One or more compliance or inventory filters. Use a filter to return a more specific list of results.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token to start the list. Use this token to get the next set of results.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of items to return for this call. Currently, you can specify null or 50. The call also returns a token that you can specify in a subsequent call to get the next set of results.

", + "box":true + } + } + }, + "ListComplianceSummariesResult":{ + "type":"structure", + "members":{ + "ComplianceSummaryItems":{ + "shape":"ComplianceSummaryItemList", + "documentation":"

A list of compliant and non-compliant summary counts based on compliance types. For example, this call returns State Manager associations, patches, or custom compliance types according to the filter criteria that you specified.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of items to return. Use this token to get the next set of results.

" + } + } + }, "ListDocumentVersionsRequest":{ "type":"structure", "required":["Name"], @@ -5998,6 +6856,37 @@ } } }, + "ListResourceComplianceSummariesRequest":{ + "type":"structure", + "members":{ + "Filters":{ + "shape":"ComplianceStringFilterList", + "documentation":"

One or more filters. Use a filter to return a more specific list of results.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token to start the list. Use this token to get the next set of results.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.

", + "box":true + } + } + }, + "ListResourceComplianceSummariesResult":{ + "type":"structure", + "members":{ + "ResourceComplianceSummaryItems":{ + "shape":"ResourceComplianceSummaryItemList", + "documentation":"

A summary count for specified or targeted managed instances. Summary count includes information about compliant and non-compliant State Manager associations, patch status, or custom items according to the filter criteria that you specify.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of items to return. Use this token to get the next set of results.

" + } + } + }, "ListResourceDataSyncRequest":{ "type":"structure", "members":{ @@ -6074,11 +6963,31 @@ "documentation":"

Information about an Amazon S3 bucket to write instance-level logs to.

" }, "MaintenanceWindowAllowUnassociatedTargets":{"type":"boolean"}, + "MaintenanceWindowAutomationParameters":{ + "type":"structure", + "members":{ + "DocumentVersion":{ + "shape":"DocumentVersion", + "documentation":"

The version of an Automation document to use during task execution.

" + }, + "Parameters":{ + "shape":"AutomationParameterMap", + "documentation":"

The parameters for the AUTOMATION task.

" + } + }, + "documentation":"

The parameters for an AUTOMATION task type.

" + }, "MaintenanceWindowCutoff":{ "type":"integer", "max":23, "min":0 }, + "MaintenanceWindowDescription":{ + "type":"string", + "max":128, + "min":1, + "sensitive":true + }, "MaintenanceWindowDurationHours":{ "type":"integer", "max":24, @@ -6221,6 +7130,10 @@ "shape":"MaintenanceWindowExecutionTaskExecutionId", "documentation":"

The ID of the action performed in the service that actually handled the task invocation. If the task type is RUN_COMMAND, this value is the command ID.

" }, + "TaskType":{ + "shape":"MaintenanceWindowTaskType", + "documentation":"

The task type.

" + }, "Parameters":{ "shape":"MaintenanceWindowExecutionTaskInvocationParameters", "documentation":"

The parameters that were provided for the invocation when it was executed.

" @@ -6311,6 +7224,10 @@ "shape":"MaintenanceWindowName", "documentation":"

The name of the Maintenance Window.

" }, + "Description":{ + "shape":"MaintenanceWindowDescription", + "documentation":"

A description of the Maintenance Window.

" + }, "Enabled":{ "shape":"MaintenanceWindowEnabled", "documentation":"

Whether the Maintenance Window is enabled.

" @@ -6330,6 +7247,39 @@ "type":"list", "member":{"shape":"MaintenanceWindowIdentity"} }, + "MaintenanceWindowLambdaClientContext":{ + "type":"string", + "max":8000, + "min":1 + }, + "MaintenanceWindowLambdaParameters":{ + "type":"structure", + "members":{ + "ClientContext":{ + "shape":"MaintenanceWindowLambdaClientContext", + "documentation":"

Pass client-specific information to the Lambda function that you are invoking. You can then process the client information in your Lambda function as you choose through the context variable.

" + }, + "Qualifier":{ + "shape":"MaintenanceWindowLambdaQualifier", + "documentation":"

(Optional) Specify a Lambda function version or alias name. If you specify a function version, the action uses the qualified function ARN to invoke a specific Lambda function. If you specify an alias name, the action uses the alias ARN to invoke the Lambda function version to which the alias points.

" + }, + "Payload":{ + "shape":"MaintenanceWindowLambdaPayload", + "documentation":"

JSON to provide to your Lambda function as input.

" + } + }, + "documentation":"

The parameters for a LAMBDA task type.

" + }, + "MaintenanceWindowLambdaPayload":{ + "type":"blob", + "max":4096, + "sensitive":true + }, + "MaintenanceWindowLambdaQualifier":{ + "type":"string", + "max":128, + "min":1 + }, "MaintenanceWindowMaxResults":{ "type":"integer", "max":100, @@ -6345,11 +7295,78 @@ "type":"string", "enum":["INSTANCE"] }, + "MaintenanceWindowRunCommandParameters":{ + "type":"structure", + "members":{ + "Comment":{ + "shape":"Comment", + "documentation":"

Information about the command(s) to execute.

" + }, + "DocumentHash":{ + "shape":"DocumentHash", + "documentation":"

The SHA-256 or SHA-1 hash created by the system when the document was created. SHA-1 hashes have been deprecated.

" + }, + "DocumentHashType":{ + "shape":"DocumentHashType", + "documentation":"

SHA-256 or SHA-1. SHA-1 hashes have been deprecated.

" + }, + "NotificationConfig":{ + "shape":"NotificationConfig", + "documentation":"

Configurations for sending notifications about command status changes on a per-instance basis.

" + }, + "OutputS3BucketName":{ + "shape":"S3BucketName", + "documentation":"

The name of the Amazon S3 bucket.

" + }, + "OutputS3KeyPrefix":{ + "shape":"S3KeyPrefix", + "documentation":"

The Amazon S3 bucket subfolder.

" + }, + "Parameters":{ + "shape":"Parameters", + "documentation":"

The parameters for the RUN_COMMAND task execution.

" + }, + "ServiceRoleArn":{ + "shape":"ServiceRole", + "documentation":"

The IAM service role to assume during task execution.

" + }, + "TimeoutSeconds":{ + "shape":"TimeoutSeconds", + "documentation":"

If this time is reached and the command has not already started executing, it doesn not execute.

", + "box":true + } + }, + "documentation":"

The parameters for a RUN_COMMAND task type.

" + }, "MaintenanceWindowSchedule":{ "type":"string", "max":256, "min":1 }, + "MaintenanceWindowStepFunctionsInput":{ + "type":"string", + "max":4096, + "sensitive":true + }, + "MaintenanceWindowStepFunctionsName":{ + "type":"string", + "max":80, + "min":1 + }, + "MaintenanceWindowStepFunctionsParameters":{ + "type":"structure", + "members":{ + "Input":{ + "shape":"MaintenanceWindowStepFunctionsInput", + "documentation":"

The inputs for the STEP_FUNCTION task.

" + }, + "Name":{ + "shape":"MaintenanceWindowStepFunctionsName", + "documentation":"

The name of the STEP_FUNCTION task.

" + } + }, + "documentation":"

The parameters for the STEP_FUNCTION execution.

" + }, "MaintenanceWindowTarget":{ "type":"structure", "members":{ @@ -6372,6 +7389,14 @@ "OwnerInformation":{ "shape":"OwnerInformation", "documentation":"

User-provided value that will be included in any CloudWatch events raised while running tasks for these targets in this Maintenance Window.

" + }, + "Name":{ + "shape":"MaintenanceWindowName", + "documentation":"

The target name.

" + }, + "Description":{ + "shape":"MaintenanceWindowDescription", + "documentation":"

A description of the target.

" } }, "documentation":"

The target registered with the Maintenance Window.

" @@ -6399,11 +7424,11 @@ }, "TaskArn":{ "shape":"MaintenanceWindowTaskArn", - "documentation":"

The ARN of the task to execute.

" + "documentation":"

The resource that the task uses during execution. For RUN_COMMAND and AUTOMATION task types, TaskArn is the SSM document name or ARN. For LAMBDA tasks, it's the function name or ARN. For STEP_FUNCTION tasks, it's the state machine ARN.

" }, "Type":{ "shape":"MaintenanceWindowTaskType", - "documentation":"

The type of task.

" + "documentation":"

The type of task. The type can be one of the following: RUN_COMMAND, AUTOMATION, LAMBDA, or STEP_FUNCTION.

" }, "Targets":{ "shape":"Targets", @@ -6415,7 +7440,7 @@ }, "Priority":{ "shape":"MaintenanceWindowTaskPriority", - "documentation":"

The priority of the task in the Maintenance Window, the lower the number the higher the priority. Tasks in a Maintenance Window are scheduled in priority order with tasks that have the same priority scheduled in parallel.

" + "documentation":"

The priority of the task in the Maintenance Window. The lower the number, the higher the priority. Tasks that have the same priority are scheduled in parallel.

" }, "LoggingInfo":{ "shape":"LoggingInfo", @@ -6432,6 +7457,14 @@ "MaxErrors":{ "shape":"MaxErrors", "documentation":"

The maximum number of errors allowed before this task stops being scheduled.

" + }, + "Name":{ + "shape":"MaintenanceWindowName", + "documentation":"

The task name.

" + }, + "Description":{ + "shape":"MaintenanceWindowDescription", + "documentation":"

A description of the task.

" } }, "documentation":"

Information about a task defined for a Maintenance Window.

" @@ -6447,6 +7480,28 @@ "min":36, "pattern":"^[0-9a-fA-F]{8}\\-[0-9a-fA-F]{4}\\-[0-9a-fA-F]{4}\\-[0-9a-fA-F]{4}\\-[0-9a-fA-F]{12}$" }, + "MaintenanceWindowTaskInvocationParameters":{ + "type":"structure", + "members":{ + "RunCommand":{ + "shape":"MaintenanceWindowRunCommandParameters", + "documentation":"

The parameters for a RUN_COMMAND task type.

" + }, + "Automation":{ + "shape":"MaintenanceWindowAutomationParameters", + "documentation":"

The parameters for a AUTOMATION task type.

" + }, + "StepFunctions":{ + "shape":"MaintenanceWindowStepFunctionsParameters", + "documentation":"

The parameters for a STEP_FUNCTION task type.

" + }, + "Lambda":{ + "shape":"MaintenanceWindowLambdaParameters", + "documentation":"

The parameters for a LAMBDA task type.

" + } + }, + "documentation":"

The parameters for task execution.

" + }, "MaintenanceWindowTaskList":{ "type":"list", "member":{"shape":"MaintenanceWindowTask"} @@ -6499,7 +7554,12 @@ }, "MaintenanceWindowTaskType":{ "type":"string", - "enum":["RUN_COMMAND"] + "enum":[ + "RUN_COMMAND", + "AUTOMATION", + "STEP_FUNCTIONS", + "LAMBDA" + ] }, "ManagedInstanceId":{ "type":"string", @@ -6566,6 +7626,20 @@ } }, "NextToken":{"type":"string"}, + "NonCompliantSummary":{ + "type":"structure", + "members":{ + "NonCompliantCount":{ + "shape":"ComplianceSummaryCount", + "documentation":"

The total number of compliance items that are not compliant.

" + }, + "SeveritySummary":{ + "shape":"SeveritySummary", + "documentation":"

A summary of the non-compliance severity by compliance type

" + } + }, + "documentation":"

A summary of resources that are not compliant. The summary is organized according to resource type.

" + }, "NormalStringMap":{ "type":"map", "key":{"shape":"String"}, @@ -7306,6 +8380,47 @@ } }, "Product":{"type":"string"}, + "PutComplianceItemsRequest":{ + "type":"structure", + "required":[ + "ResourceId", + "ResourceType", + "ComplianceType", + "ExecutionSummary", + "Items" + ], + "members":{ + "ResourceId":{ + "shape":"ComplianceResourceId", + "documentation":"

Specify an ID for this resource. For a managed instance, this is the instance ID.

" + }, + "ResourceType":{ + "shape":"ComplianceResourceType", + "documentation":"

Specify the type of resource. ManagedInstance is currently the only supported resource type.

" + }, + "ComplianceType":{ + "shape":"ComplianceTypeName", + "documentation":"

Specify the compliance type. For example, specify Association (for a State Manager association), Patch, or Custom:string.

" + }, + "ExecutionSummary":{ + "shape":"ComplianceExecutionSummary", + "documentation":"

A summary of the call execution that includes an execution ID, the type of execution (for example, Command), and the date/time of the execution using a datetime object that is saved in the following format: yyyy-MM-dd'T'HH:mm:ss'Z'.

" + }, + "Items":{ + "shape":"ComplianceItemEntryList", + "documentation":"

Information about the compliance as defined by the resource type. For example, for a patch compliance type, Items includes information about the PatchSeverity, Classification, etc.

" + }, + "ItemContentHash":{ + "shape":"ComplianceItemContentHash", + "documentation":"

MD5 or SHA-256 content hash. The content hash is used to determine if existing information should be overwritten or ignored. If the content hashes match, the request to put compliance information is ignored.

" + } + } + }, + "PutComplianceItemsResult":{ + "type":"structure", + "members":{ + } + }, "PutInventoryRequest":{ "type":"structure", "required":[ @@ -7445,6 +8560,14 @@ "shape":"OwnerInformation", "documentation":"

User-provided value that will be included in any CloudWatch events raised while running tasks for these targets in this Maintenance Window.

" }, + "Name":{ + "shape":"MaintenanceWindowName", + "documentation":"

An optional name for the target.

" + }, + "Description":{ + "shape":"MaintenanceWindowDescription", + "documentation":"

An optional description for the target.

" + }, "ClientToken":{ "shape":"ClientToken", "documentation":"

User-provided idempotency token.

", @@ -7497,6 +8620,10 @@ "shape":"MaintenanceWindowTaskParameters", "documentation":"

The parameters that should be passed to the task when it is executed.

" }, + "TaskInvocationParameters":{ + "shape":"MaintenanceWindowTaskInvocationParameters", + "documentation":"

The parameters that the task should use during execution. Populate only the fields that match the task type. All other fields should be empty.

" + }, "Priority":{ "shape":"MaintenanceWindowTaskPriority", "documentation":"

The priority of the task in the Maintenance Window, the lower the number the higher the priority. Tasks in a Maintenance Window are scheduled in priority order with tasks that have the same priority scheduled in parallel.

", @@ -7514,6 +8641,14 @@ "shape":"LoggingInfo", "documentation":"

A structure containing information about an Amazon S3 bucket to write instance-level logs to.

" }, + "Name":{ + "shape":"MaintenanceWindowName", + "documentation":"

An optional name for the task.

" + }, + "Description":{ + "shape":"MaintenanceWindowDescription", + "documentation":"

An optional description for the task.

" + }, "ClientToken":{ "shape":"ClientToken", "documentation":"

User-provided idempotency token.

", @@ -7567,6 +8702,51 @@ "members":{ } }, + "ResourceComplianceSummaryItem":{ + "type":"structure", + "members":{ + "ComplianceType":{ + "shape":"ComplianceTypeName", + "documentation":"

The compliance type.

" + }, + "ResourceType":{ + "shape":"ComplianceResourceType", + "documentation":"

The resource type.

" + }, + "ResourceId":{ + "shape":"ComplianceResourceId", + "documentation":"

The resource ID.

" + }, + "Status":{ + "shape":"ComplianceStatus", + "documentation":"

The compliance status for the resource.

" + }, + "OverallSeverity":{ + "shape":"ComplianceSeverity", + "documentation":"

The highest severity item found for the resource. The resource is compliant for this item.

" + }, + "ExecutionSummary":{ + "shape":"ComplianceExecutionSummary", + "documentation":"

Information about the execution.

" + }, + "CompliantSummary":{ + "shape":"CompliantSummary", + "documentation":"

A list of items that are compliant for the resource.

" + }, + "NonCompliantSummary":{ + "shape":"NonCompliantSummary", + "documentation":"

A list of items that aren't compliant for the resource.

" + } + }, + "documentation":"

Compliance summary information for a specific resource.

" + }, + "ResourceComplianceSummaryItemList":{ + "type":"list", + "member":{ + "shape":"ResourceComplianceSummaryItem", + "locationName":"Item" + } + }, "ResourceDataSyncAlreadyExistsException":{ "type":"structure", "members":{ @@ -7786,6 +8966,32 @@ "max":256, "min":1 }, + "SendAutomationSignalRequest":{ + "type":"structure", + "required":[ + "AutomationExecutionId", + "SignalType" + ], + "members":{ + "AutomationExecutionId":{ + "shape":"AutomationExecutionId", + "documentation":"

The unique identifier for an existing Automation execution that you want to send the signal to.

" + }, + "SignalType":{ + "shape":"SignalType", + "documentation":"

The type of signal. Valid signal types include the following: Approve and Reject

" + }, + "Payload":{ + "shape":"AutomationParameterMap", + "documentation":"

The data sent with the signal. The data schema depends on the type of signal used in the request.

" + } + } + }, + "SendAutomationSignalResult":{ + "type":"structure", + "members":{ + } + }, "SendCommandRequest":{ "type":"structure", "required":["DocumentName"], @@ -7863,6 +9069,43 @@ } }, "ServiceRole":{"type":"string"}, + "SeveritySummary":{ + "type":"structure", + "members":{ + "CriticalCount":{ + "shape":"ComplianceSummaryCount", + "documentation":"

The total number of resources or compliance items that have a severity level of critical. Critical severity is determined by the organization that published the compliance items.

" + }, + "HighCount":{ + "shape":"ComplianceSummaryCount", + "documentation":"

The total number of resources or compliance items that have a severity level of high. High severity is determined by the organization that published the compliance items.

" + }, + "MediumCount":{ + "shape":"ComplianceSummaryCount", + "documentation":"

The total number of resources or compliance items that have a severity level of medium. Medium severity is determined by the organization that published the compliance items.

" + }, + "LowCount":{ + "shape":"ComplianceSummaryCount", + "documentation":"

The total number of resources or compliance items that have a severity level of low. Low severity is determined by the organization that published the compliance items.

" + }, + "InformationalCount":{ + "shape":"ComplianceSummaryCount", + "documentation":"

The total number of resources or compliance items that have a severity level of informational. Informational severity is determined by the organization that published the compliance items.

" + }, + "UnspecifiedCount":{ + "shape":"ComplianceSummaryCount", + "documentation":"

The total number of resources or compliance items that have a severity level of unspecified. Unspecified severity is determined by the organization that published the compliance items.

" + } + }, + "documentation":"

The number of managed instances found for each patch severity level defined in the request filter.

" + }, + "SignalType":{ + "type":"string", + "enum":[ + "Approve", + "Reject" + ] + }, "SnapshotDownloadUrl":{"type":"string"}, "SnapshotId":{ "type":"string", @@ -8008,6 +9251,14 @@ "type":"list", "member":{"shape":"String"} }, + "SubTypeCountLimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The sub-type count exceeded the limit for the inventory type.

", + "exception":true + }, "Tag":{ "type":"structure", "required":[ @@ -8057,6 +9308,14 @@ "documentation":"

An array of search criteria that targets instances using a Key,Value combination that you specify. Targets is required if you don't provide one or more instance IDs in the call.

" }, "TargetCount":{"type":"integer"}, + "TargetInUseException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

You specified the Safe option for the DeregisterTargetFromMaintenanceWindow operation, but the target is still referenced in a task.

", + "exception":true + }, "TargetKey":{ "type":"string", "max":128, @@ -8104,6 +9363,15 @@ "documentation":"

The size of inventory data has exceeded the total size limit for the resource.

", "exception":true }, + "UnsupportedInventoryItemContextException":{ + "type":"structure", + "members":{ + "TypeName":{"shape":"InventoryItemTypeName"}, + "Message":{"shape":"String"} + }, + "documentation":"

The Context attribute that you specified for the InventoryItem is not allowed for this inventory type. You can only use the Context attribute with inventory types like AWS:ComplianceItem.

", + "exception":true + }, "UnsupportedInventorySchemaVersionException":{ "type":"structure", "members":{ @@ -8167,6 +9435,14 @@ "Targets":{ "shape":"Targets", "documentation":"

The targets of the association.

" + }, + "AssociationName":{ + "shape":"AssociationName", + "documentation":"

The name of the association that you want to update.

" + }, + "AssociationVersion":{ + "shape":"AssociationVersion", + "documentation":"

This parameter is provided for concurrency control purposes. You must specify the latest association version in the service. If you want to ensure that this request succeeds, either specify $LATEST, or omit this parameter.

" } } }, @@ -8278,6 +9554,10 @@ "shape":"MaintenanceWindowName", "documentation":"

The name of the Maintenance Window.

" }, + "Description":{ + "shape":"MaintenanceWindowDescription", + "documentation":"

An optional description for the update request.

" + }, "Schedule":{ "shape":"MaintenanceWindowSchedule", "documentation":"

The schedule of the Maintenance Window in the form of a cron or rate expression.

" @@ -8301,6 +9581,11 @@ "shape":"MaintenanceWindowEnabled", "documentation":"

Whether the Maintenance Window is enabled.

", "box":true + }, + "Replace":{ + "shape":"Boolean", + "documentation":"

If True, then all fields that are required by the CreateMaintenanceWindow action are also required for this API request. Optional fields that are not specified are set to null.

", + "box":true } } }, @@ -8315,6 +9600,10 @@ "shape":"MaintenanceWindowName", "documentation":"

The name of the Maintenance Window.

" }, + "Description":{ + "shape":"MaintenanceWindowDescription", + "documentation":"

An optional description of the update.

" + }, "Schedule":{ "shape":"MaintenanceWindowSchedule", "documentation":"

The schedule of the Maintenance Window in the form of a cron or rate expression.

" @@ -8337,6 +9626,197 @@ } } }, + "UpdateMaintenanceWindowTargetRequest":{ + "type":"structure", + "required":[ + "WindowId", + "WindowTargetId" + ], + "members":{ + "WindowId":{ + "shape":"MaintenanceWindowId", + "documentation":"

The Maintenance Window ID with which to modify the target.

" + }, + "WindowTargetId":{ + "shape":"MaintenanceWindowTargetId", + "documentation":"

The target ID to modify.

" + }, + "Targets":{ + "shape":"Targets", + "documentation":"

The targets to add or replace.

" + }, + "OwnerInformation":{ + "shape":"OwnerInformation", + "documentation":"

User-provided value that will be included in any CloudWatch events raised while running tasks for these targets in this Maintenance Window.

" + }, + "Name":{ + "shape":"MaintenanceWindowName", + "documentation":"

A name for the update.

" + }, + "Description":{ + "shape":"MaintenanceWindowDescription", + "documentation":"

An optional description for the update.

" + }, + "Replace":{ + "shape":"Boolean", + "documentation":"

If True, then all fields that are required by the RegisterTargetWithMaintenanceWindow action are also required for this API request. Optional fields that are not specified are set to null.

", + "box":true + } + } + }, + "UpdateMaintenanceWindowTargetResult":{ + "type":"structure", + "members":{ + "WindowId":{ + "shape":"MaintenanceWindowId", + "documentation":"

The Maintenance Window ID specified in the update request.

" + }, + "WindowTargetId":{ + "shape":"MaintenanceWindowTargetId", + "documentation":"

The target ID specified in the update request.

" + }, + "Targets":{ + "shape":"Targets", + "documentation":"

The updated targets.

" + }, + "OwnerInformation":{ + "shape":"OwnerInformation", + "documentation":"

The updated owner.

" + }, + "Name":{ + "shape":"MaintenanceWindowName", + "documentation":"

The updated name.

" + }, + "Description":{ + "shape":"MaintenanceWindowDescription", + "documentation":"

The updated description.

" + } + } + }, + "UpdateMaintenanceWindowTaskRequest":{ + "type":"structure", + "required":[ + "WindowId", + "WindowTaskId" + ], + "members":{ + "WindowId":{ + "shape":"MaintenanceWindowId", + "documentation":"

The Maintenance Window ID that contains the task to modify.

" + }, + "WindowTaskId":{ + "shape":"MaintenanceWindowTaskId", + "documentation":"

The task ID to modify.

" + }, + "Targets":{ + "shape":"Targets", + "documentation":"

The targets (either instances or tags) to modify. Instances are specified using Key=instanceids,Values=instanceID_1,instanceID_2. Tags are specified using Key=tag_name,Values=tag_value.

" + }, + "TaskArn":{ + "shape":"MaintenanceWindowTaskArn", + "documentation":"

The task ARN to modify.

" + }, + "ServiceRoleArn":{ + "shape":"ServiceRole", + "documentation":"

The IAM service role ARN to modify. The system assumes this role during task execution.

" + }, + "TaskParameters":{ + "shape":"MaintenanceWindowTaskParameters", + "documentation":"

The parameters to modify. The map has the following format:

Key: string, between 1 and 255 characters

Value: an array of strings, each string is between 1 and 255 characters

" + }, + "TaskInvocationParameters":{ + "shape":"MaintenanceWindowTaskInvocationParameters", + "documentation":"

The parameters that the task should use during execution. Populate only the fields that match the task type. All other fields should be empty.

" + }, + "Priority":{ + "shape":"MaintenanceWindowTaskPriority", + "documentation":"

The new task priority to specify. The lower the number, the higher the priority. Tasks that have the same priority are scheduled in parallel.

", + "box":true + }, + "MaxConcurrency":{ + "shape":"MaxConcurrency", + "documentation":"

The new MaxConcurrency value you want to specify. MaxConcurrency is the number of targets that are allowed to run this task in parallel.

" + }, + "MaxErrors":{ + "shape":"MaxErrors", + "documentation":"

The new MaxErrors value to specify. MaxErrors is the maximum number of errors that are allowed before the task stops being scheduled.

" + }, + "LoggingInfo":{ + "shape":"LoggingInfo", + "documentation":"

The new logging location in Amazon S3 to specify.

" + }, + "Name":{ + "shape":"MaintenanceWindowName", + "documentation":"

The new task name to specify.

" + }, + "Description":{ + "shape":"MaintenanceWindowDescription", + "documentation":"

The new task description to specify.

" + }, + "Replace":{ + "shape":"Boolean", + "documentation":"

If True, then all fields that are required by the RegisterTaskWithMaintenanceWndow action are also required for this API request. Optional fields that are not specified are set to null.

", + "box":true + } + } + }, + "UpdateMaintenanceWindowTaskResult":{ + "type":"structure", + "members":{ + "WindowId":{ + "shape":"MaintenanceWindowId", + "documentation":"

The ID of the Maintenance Window that was updated.

" + }, + "WindowTaskId":{ + "shape":"MaintenanceWindowTaskId", + "documentation":"

The task ID of the Maintenance Window that was updated.

" + }, + "Targets":{ + "shape":"Targets", + "documentation":"

The updated target values.

" + }, + "TaskArn":{ + "shape":"MaintenanceWindowTaskArn", + "documentation":"

The updated task ARN value.

" + }, + "ServiceRoleArn":{ + "shape":"ServiceRole", + "documentation":"

The updated service role ARN value.

" + }, + "TaskParameters":{ + "shape":"MaintenanceWindowTaskParameters", + "documentation":"

The updated parameter values.

" + }, + "TaskInvocationParameters":{ + "shape":"MaintenanceWindowTaskInvocationParameters", + "documentation":"

The updated parameter values.

" + }, + "Priority":{ + "shape":"MaintenanceWindowTaskPriority", + "documentation":"

The updated priority value.

" + }, + "MaxConcurrency":{ + "shape":"MaxConcurrency", + "documentation":"

The updated MaxConcurrency value.

" + }, + "MaxErrors":{ + "shape":"MaxErrors", + "documentation":"

The updated MaxErrors value.

" + }, + "LoggingInfo":{ + "shape":"LoggingInfo", + "documentation":"

The updated logging information in Amazon S3.

" + }, + "Name":{ + "shape":"MaintenanceWindowName", + "documentation":"

The updated task name.

" + }, + "Description":{ + "shape":"MaintenanceWindowDescription", + "documentation":"

The updated task description.

" + } + } + }, "UpdateManagedInstanceRoleRequest":{ "type":"structure", "required":[ diff --git a/botocore/data/storagegateway/2013-06-30/service-2.json b/botocore/data/storagegateway/2013-06-30/service-2.json index 94c1e2ff..632371d8 100644 --- a/botocore/data/storagegateway/2013-06-30/service-2.json +++ b/botocore/data/storagegateway/2013-06-30/service-2.json @@ -193,7 +193,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Creates a virtual tape by using your own barcode. You write data to the virtual tape and then archive the tape. This operation is only supported in the tape gateway architecture.

Cache storage must be allocated to the gateway before you can create a virtual tape. Use the AddCache operation to add cache storage to a gateway.

" + "documentation":"

Creates a virtual tape by using your own barcode. You write data to the virtual tape and then archive the tape. A barcode is unique and can not be reused if it has already been used on a tape . This applies to barcodes used on deleted tapes. This operation is only supported in the tape gateway. architecture.

Cache storage must be allocated to the gateway before you can create a virtual tape. Use the AddCache operation to add cache storage to a gateway.

" }, "CreateTapes":{ "name":"CreateTapes", @@ -669,7 +669,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Refreshes the cache for the specified file share. This operation finds objects in the Amazon S3 bucket that were added or removed since the gateway last listed the bucket's contents and cached the results.

" + "documentation":"

Refreshes the cache for the specified file share. This operation finds objects in the Amazon S3 bucket that were added, removed or replaced since the gateway last listed the bucket's contents and cached the results.

" }, "RemoveTagsFromResource":{ "name":"RemoveTagsFromResource", @@ -1057,7 +1057,7 @@ }, "VolumeSizeInBytes":{ "shape":"long", - "documentation":"

The size of the volume in bytes.

" + "documentation":"

The size, in bytes, of the volume capacity.

" }, "VolumeProgress":{ "shape":"DoubleObject", @@ -1339,7 +1339,7 @@ }, "TargetName":{ "shape":"TargetName", - "documentation":"

The name of the iSCSI target used by initiators to connect to the target and as a suffix for the target ARN. For example, specifying TargetName as myvolume results in the target ARN of arn:aws:storagegateway:us-east-1:111122223333:gateway/sgw-12A3456B/target/iqn.1997-05.com.amazon:myvolume. The target name must be unique across all volumes of a gateway.

" + "documentation":"

The name of the iSCSI target used by initiators to connect to the target and as a suffix for the target ARN. For example, specifying TargetName as myvolume results in the target ARN of arn:aws:storagegateway:us-east-2:111122223333:gateway/sgw-12A3456B/target/iqn.1997-05.com.amazon:myvolume. The target name must be unique across all volumes of a gateway.

" }, "NetworkInterfaceId":{ "shape":"NetworkInterfaceId", @@ -1384,7 +1384,7 @@ }, "TapeBarcode":{ "shape":"TapeBarcode", - "documentation":"

The barcode that you want to assign to the tape.

" + "documentation":"

The barcode that you want to assign to the tape.

Barcodes cannot be reused. This includes barcodes used for tapes that have been deleted.

" } }, "documentation":"

CreateTapeWithBarcodeInput

" @@ -1509,6 +1509,10 @@ "FileShareARN":{ "shape":"FileShareARN", "documentation":"

The Amazon Resource Name (ARN) of the file share to be deleted.

" + }, + "ForceDelete":{ + "shape":"boolean", + "documentation":"

If set to true, deletes a file share immediately and aborts all data uploads to AWS. Otherwise the file share is not deleted until all data is uploaded to AWS. This process aborts the data upload process and the file share enters the FORCE_DELETING status.

" } }, "documentation":"

DeleteFileShareInput

" @@ -3008,7 +3012,7 @@ }, "TapeUsedInBytes":{ "shape":"TapeUsage", - "documentation":"

The size, in bytes, of data written to the virtual tape.

This value is not available for tapes created prior to May,13 2015.

" + "documentation":"

The size, in bytes, of data written to the virtual tape.

This value is not available for tapes created prior to May 13, 2015.

" } }, "documentation":"

Describes a virtual tape object.

" @@ -3054,7 +3058,7 @@ }, "TapeUsedInBytes":{ "shape":"TapeUsage", - "documentation":"

The size, in bytes, of data written to the virtual tape.

This value is not available for tapes created prior to May,13 2015.

" + "documentation":"

The size, in bytes, of data written to the virtual tape.

This value is not available for tapes created prior to May 13, 2015.

" } }, "documentation":"

Represents a virtual tape that is archived in the virtual tape shelf (VTS).

" @@ -3317,7 +3321,7 @@ }, "ReadOnly":{ "shape":"Boolean", - "documentation":"

Sets the write status of a file share: \"true\" if the write status is read-only, and otherwise \"false\".

" + "documentation":"

Sets the write status of a file share: \"true\" if the write status is read-only, otherwise \"false\".

" } }, "documentation":"

UpdateNFSFileShareInput

" @@ -3448,7 +3452,7 @@ "members":{ "VolumeARN":{ "shape":"VolumeARN", - "documentation":"

The Amazon Resource Name (ARN) for the storage volume. For example, the following is a valid ARN:

arn:aws:storagegateway:us-east-1:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABB

Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens (-).

" + "documentation":"

The Amazon Resource Name (ARN) for the storage volume. For example, the following is a valid ARN:

arn:aws:storagegateway:us-east-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABB

Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens (-).

" }, "VolumeId":{ "shape":"VolumeId", @@ -3531,5 +3535,5 @@ "long":{"type":"long"}, "string":{"type":"string"} }, - "documentation":"AWS Storage Gateway Service

AWS Storage Gateway is the service that connects an on-premises software appliance with cloud-based storage to provide seamless and secure integration between an organization's on-premises IT environment and AWS's storage infrastructure. The service enables you to securely upload data to the AWS cloud for cost effective backup and rapid disaster recovery.

Use the following links to get started using the AWS Storage Gateway Service API Reference:

AWS Storage Gateway resource IDs are in uppercase. When you use these resource IDs with the Amazon EC2 API, EC2 expects resource IDs in lowercase. You must change your resource ID to lowercase to use it with the EC2 API. For example, in Storage Gateway the ID for a volume might be vol-AA22BB012345DAF670. When you use this ID with the EC2 API, you must change it to vol-aa22bb012345daf670. Otherwise, the EC2 API might not behave as expected.

IDs for Storage Gateway volumes and Amazon EBS snapshots created from gateway volumes are changing to a longer format. Starting in December 2016, all new volumes and snapshots will be created with a 17-character string. Starting in April 2016, you will be able to use these longer IDs so you can test your systems with the new format. For more information, see Longer EC2 and EBS Resource IDs.

For example, a volume Amazon Resource Name (ARN) with the longer volume ID format looks like the following:

arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG.

A snapshot ID with the longer ID format looks like the following: snap-78e226633445566ee.

For more information, see Announcement: Heads-up – Longer AWS Storage Gateway volume and snapshot IDs coming in 2016.

" + "documentation":"AWS Storage Gateway Service

AWS Storage Gateway is the service that connects an on-premises software appliance with cloud-based storage to provide seamless and secure integration between an organization's on-premises IT environment and AWS's storage infrastructure. The service enables you to securely upload data to the AWS cloud for cost effective backup and rapid disaster recovery.

Use the following links to get started using the AWS Storage Gateway Service API Reference:

AWS Storage Gateway resource IDs are in uppercase. When you use these resource IDs with the Amazon EC2 API, EC2 expects resource IDs in lowercase. You must change your resource ID to lowercase to use it with the EC2 API. For example, in Storage Gateway the ID for a volume might be vol-AA22BB012345DAF670. When you use this ID with the EC2 API, you must change it to vol-aa22bb012345daf670. Otherwise, the EC2 API might not behave as expected.

IDs for Storage Gateway volumes and Amazon EBS snapshots created from gateway volumes are changing to a longer format. Starting in December 2016, all new volumes and snapshots will be created with a 17-character string. Starting in April 2016, you will be able to use these longer IDs so you can test your systems with the new format. For more information, see Longer EC2 and EBS Resource IDs.

For example, a volume Amazon Resource Name (ARN) with the longer volume ID format looks like the following:

arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG.

A snapshot ID with the longer ID format looks like the following: snap-78e226633445566ee.

For more information, see Announcement: Heads-up – Longer AWS Storage Gateway volume and snapshot IDs coming in 2016.

" } diff --git a/botocore/docs/bcdoc/style.py b/botocore/docs/bcdoc/style.py index df483332..ac42d55e 100644 --- a/botocore/docs/bcdoc/style.py +++ b/botocore/docs/bcdoc/style.py @@ -220,19 +220,18 @@ class ReSTStyle(BaseStyle): if ':' in last_write: last_write = last_write.replace(':', r'\:') self.doc.push_write(last_write) - self.doc.hrefs[last_write] = self.a_href - self.doc.write('`_') + self.doc.push_write(' <%s>`__' % self.a_href) elif last_write == '`': # Look at start_a(). It will do a self.doc.write('`') # which is the start of the link title. If that is the # case then there was no link text. We should just # use an inline link. The syntax of this is # ``_ - self.doc.push_write('`<%s>`_' % self.a_href) + self.doc.push_write('`<%s>`__' % self.a_href) else: self.doc.push_write(self.a_href) self.doc.hrefs[self.a_href] = self.a_href - self.doc.write('`_') + self.doc.write('`__') self.a_href = None self.doc.write(' ') diff --git a/botocore/docs/sharedexample.py b/botocore/docs/sharedexample.py index cb08d1f4..1a31b6e4 100644 --- a/botocore/docs/sharedexample.py +++ b/botocore/docs/sharedexample.py @@ -13,6 +13,7 @@ import re import numbers from botocore.utils import parse_timestamp +from botocore.docs.utils import escape_controls from botocore.compat import six @@ -165,7 +166,8 @@ class SharedExampleDocumenter(object): def _document_str(self, section, value, path): # We do the string conversion because this might accept a type that # we don't specifically address. - section.write(u"'%s'," % six.text_type(value)) + safe_value = escape_controls(value) + section.write(u"'%s'," % six.text_type(safe_value)) def _document_number(self, section, value, path): section.write("%s," % str(value)) diff --git a/botocore/docs/utils.py b/botocore/docs/utils.py index 3d79d1ce..a0d2d662 100644 --- a/botocore/docs/utils.py +++ b/botocore/docs/utils.py @@ -10,6 +10,7 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. +import re from collections import namedtuple @@ -177,3 +178,20 @@ class AppendParamDocumentation(object): description_section = section.get_section( 'param-documentation') description_section.writeln(self._doc_string) + + +_CONTROLS = { + '\n': '\\n', + '\r': '\\r', + '\t': '\\t', + '\b': '\\b', + '\f': '\\f', +} +# Combines all CONTROLS keys into a big or regular expression +_ESCAPE_CONTROLS_RE = re.compile('|'.join(map(re.escape, _CONTROLS))) +# Based on the match get the appropriate replacement from CONTROLS +_CONTROLS_MATCH_HANDLER = lambda match: _CONTROLS[match.group(0)] + + +def escape_controls(value): + return _ESCAPE_CONTROLS_RE.sub(_CONTROLS_MATCH_HANDLER, value) diff --git a/botocore/exceptions.py b/botocore/exceptions.py index f0fd1725..d28b3bb3 100644 --- a/botocore/exceptions.py +++ b/botocore/exceptions.py @@ -350,9 +350,10 @@ class ClientError(Exception): def __init__(self, error_response, operation_name): retry_info = self._get_retry_info(error_response) + error = error_response.get('Error', {}) msg = self.MSG_TEMPLATE.format( - error_code=error_response['Error'].get('Code', 'Unknown'), - error_message=error_response['Error'].get('Message', 'Unknown'), + error_code=error.get('Code', 'Unknown'), + error_message=error.get('Message', 'Unknown'), operation_name=operation_name, retry_info=retry_info, ) @@ -399,6 +400,21 @@ class InvalidS3AddressingStyleError(BotoCoreError): ) +class InvalidRetryConfigurationError(BotoCoreError): + """Error when invalid retry configuration is specified""" + fmt = ( + 'Cannot provide retry configuration for "{retry_config_option}". ' + 'Valid retry configuration options are: \'max_attempts\'' + ) + + +class InvalidMaxRetryAttemptsError(InvalidRetryConfigurationError): + """Error when invalid retry configuration is specified""" + fmt = ( + 'Value provided to "max_attempts": {provided_max_attempts} must ' + 'be an integer greater than or equal to zero.' + ) + class StubResponseError(BotoCoreError): fmt = 'Error getting response stub for operation {operation_name}: {reason}' diff --git a/botocore/handlers.py b/botocore/handlers.py index bced6df2..aab5b131 100644 --- a/botocore/handlers.py +++ b/botocore/handlers.py @@ -253,6 +253,14 @@ def _needs_s3_sse_customization(params, sse_member_prefix): sse_member_prefix + 'KeyMD5' not in params) +# NOTE: Retries get registered in two separate places in the botocore +# code: once when creating the client and once when you load the service +# model from the session. While at first this handler seems unneeded, it +# would be a breaking change for the AWS CLI to have it removed. This is +# because it relies on the service model from the session to create commands +# and this handler respects operation granular retry logic while the client +# one does not. If this is ever to be removed the handler, the client +# will have to respect per-operation level retry configuration. def register_retries_for_service(service_data, session, service_name, **kwargs): loader = session.get_component('data_loader') diff --git a/botocore/paginate.py b/botocore/paginate.py index 8cae9749..abb307f9 100644 --- a/botocore/paginate.py +++ b/botocore/paginate.py @@ -545,7 +545,8 @@ class PageIterator(object): class Paginator(object): PAGE_ITERATOR_CLS = PageIterator - def __init__(self, method, pagination_config): + def __init__(self, method, pagination_config, model): + self._model = model self._method = method self._pagination_cfg = pagination_config self._output_token = self._get_output_tokens(self._pagination_cfg) @@ -623,11 +624,17 @@ class Paginator(object): max_items = int(max_items) page_size = pagination_config.get('PageSize', None) if page_size is not None: - if self._pagination_cfg.get('limit_key', None) is None: + if self._limit_key is None: raise PaginationError( message="PageSize parameter is not supported for the " "pagination interface for this operation.") - page_size = int(page_size) + input_members = self._model.input_shape.members + limit_key_shape = input_members.get(self._limit_key) + if limit_key_shape.type_name == 'string': + if not isinstance(page_size, six.string_types): + page_size = str(page_size) + else: + page_size = int(page_size) return { 'MaxItems': max_items, 'StartingToken': pagination_config.get('StartingToken', None), diff --git a/botocore/translate.py b/botocore/translate.py index 60c7d1dc..5a661ab0 100644 --- a/botocore/translate.py +++ b/botocore/translate.py @@ -11,22 +11,51 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. +import copy + from botocore.utils import merge_dicts -def build_retry_config(endpoint_prefix, retry_model, definitions): +def build_retry_config(endpoint_prefix, retry_model, definitions, + client_retry_config=None): service_config = retry_model.get(endpoint_prefix, {}) resolve_references(service_config, definitions) # We want to merge the global defaults with the service specific # defaults, with the service specific defaults taking precedence. # So we use the global defaults as the base. - final_retry_config = {'__default__': retry_model.get('__default__', {})} + # + # A deepcopy is done on the retry defaults because it ensures the + # retry model has no chance of getting mutated when the service specific + # configuration or client retry config is merged in. + final_retry_config = { + '__default__': copy.deepcopy(retry_model.get('__default__', {})) + } resolve_references(final_retry_config, definitions) # The merge the service specific config on top. merge_dicts(final_retry_config, service_config) + if client_retry_config is not None: + _merge_client_retry_config(final_retry_config, client_retry_config) return final_retry_config +def _merge_client_retry_config(retry_config, client_retry_config): + max_retry_attempts_override = client_retry_config.get('max_attempts') + if max_retry_attempts_override is not None: + # In the retry config, the max_attempts refers to the maximum number + # of requests in general will be made. However, for the client's + # retry config it refers to how many retry attempts will be made at + # most. So to translate this number from the client config, one is + # added to convert it to the maximum number request that will be made + # by including the initial request. + # + # It is also important to note that if we ever support per operation + # configuration in the retry model via the client, we will need to + # revisit this logic to make sure max_attempts gets applied + # per operation. + retry_config['__default__'][ + 'max_attempts'] = max_retry_attempts_override + 1 + + def resolve_references(config, definitions): """Recursively replace $ref keys. diff --git a/docs/source/conf.py b/docs/source/conf.py index a93a657a..d4a5aa71 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -52,9 +52,9 @@ copyright = u'2013, Mitch Garnaat' # built documents. # # The short X.Y version. -version = '1.5.' +version = '1.6' # The full version, including alpha/beta/rc tags. -release = '1.5.84' +release = '1.6.6' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/tests/functional/test_retry.py b/tests/functional/test_retry.py new file mode 100644 index 00000000..dcb3d801 --- /dev/null +++ b/tests/functional/test_retry.py @@ -0,0 +1,103 @@ +# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +from tests import BaseSessionTest, mock + +from botocore.exceptions import ClientError +from botocore.config import Config + + +class TestRetry(BaseSessionTest): + def setUp(self): + super(TestRetry, self).setUp() + self.region = 'us-west-2' + self.sleep_patch = mock.patch('time.sleep') + self.sleep_patch.start() + + def tearDown(self): + self.sleep_patch.stop() + + def add_n_retryable_responses(self, mock_send, num_responses): + responses = [] + for _ in range(num_responses): + http_response = mock.Mock() + http_response.status_code = 500 + http_response.headers = {} + http_response.content = b'{}' + responses.append(http_response) + mock_send.side_effect = responses + + def assert_will_retry_n_times(self, method, num_retries): + num_responses = num_retries + 1 + with mock.patch('botocore.endpoint.Session.send') as mock_send: + self.add_n_retryable_responses(mock_send, num_responses) + with self.assertRaisesRegexp( + ClientError, 'reached max retries: %s' % num_retries): + method() + self.assertEqual(mock_send.call_count, num_responses) + + def test_can_override_max_attempts(self): + client = self.session.create_client( + 'dynamodb', self.region, config=Config( + retries={'max_attempts': 1})) + self.assert_will_retry_n_times(client.list_tables, 1) + + def test_do_not_attempt_retries(self): + client = self.session.create_client( + 'dynamodb', self.region, config=Config( + retries={'max_attempts': 0})) + self.assert_will_retry_n_times(client.list_tables, 0) + + def test_setting_max_attempts_does_not_set_for_other_clients(self): + # Make one client with max attempts configured. + self.session.create_client( + 'codecommit', self.region, config=Config( + retries={'max_attempts': 1})) + + # Make another client that has no custom retry configured. + client = self.session.create_client('codecommit', self.region) + # It should use the default max retries, which should be four retries + # for this service. + self.assert_will_retry_n_times(client.list_repositories, 4) + + def test_service_specific_defaults_do_not_mutate_general_defaults(self): + # This tests for a bug where if you created a client for a service + # with specific retry configurations and then created a client for + # a service whose retry configurations fallback to the general + # defaults, the second client would actually use the defaults of + # the first client. + + # Make a dynamodb client. It's a special case client that is + # configured to a make a maximum of 10 requests (9 retries). + client = self.session.create_client('dynamodb', self.region) + self.assert_will_retry_n_times(client.list_tables, 9) + + # A codecommit client is not a special case for retries. It will at + # most make 5 requests (4 retries) for its default. + client = self.session.create_client('codecommit', self.region) + self.assert_will_retry_n_times(client.list_repositories, 4) + + def test_set_max_attempts_on_session(self): + self.session.set_default_client_config( + Config(retries={'max_attempts': 1})) + # Max attempts should be inherited from the session. + client = self.session.create_client('codecommit', self.region) + self.assert_will_retry_n_times(client.list_repositories, 1) + + def test_can_clobber_max_attempts_on_session(self): + self.session.set_default_client_config( + Config(retries={'max_attempts': 1})) + # Max attempts should override the session's configured max attempts. + client = self.session.create_client( + 'codecommit', self.region, config=Config( + retries={'max_attempts': 0})) + self.assert_will_retry_n_times(client.list_repositories, 0) diff --git a/tests/functional/test_route53.py b/tests/functional/test_route53.py new file mode 100644 index 00000000..51344938 --- /dev/null +++ b/tests/functional/test_route53.py @@ -0,0 +1,51 @@ +# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +from tests import unittest + +import botocore.session +from botocore.stub import Stubber + +class TestRoute53Pagination(unittest.TestCase): + def setUp(self): + self.session = botocore.session.get_session() + self.client = self.session.create_client('route53', 'us-west-2') + self.stubber = Stubber(self.client) + # response has required fields + self.response = { + 'HostedZones': [], + 'Marker': '', + 'IsTruncated': True, + 'MaxItems': '1' + } + self.operation_name = 'list_hosted_zones' + + def test_paginate_with_max_items_int(self): + # Route53 has a string type for MaxItems. We need to ensure that this + # still works with integers as the cli auto converts the page size + # argument to an integer. + self.stubber.add_response(self.operation_name, self.response) + paginator = self.client.get_paginator('list_hosted_zones') + with self.stubber: + config={'PageSize': 1} + results = list(paginator.paginate(PaginationConfig=config)) + self.assertTrue(len(results) >= 0) + + def test_paginate_with_max_items_str(self): + # Route53 has a string type for MaxItems. We need to ensure that this + # still works with strings as that's the expected type for this key. + self.stubber.add_response(self.operation_name, self.response) + paginator = self.client.get_paginator('list_hosted_zones') + with self.stubber: + config={'PageSize': '1'} + results = list(paginator.paginate(PaginationConfig=config)) + self.assertTrue(len(results) >= 0) diff --git a/tests/unit/docs/bcdoc/test_document.py b/tests/unit/docs/bcdoc/test_document.py index dfd23c6f..5b736791 100644 --- a/tests/unit/docs/bcdoc/test_document.py +++ b/tests/unit/docs/bcdoc/test_document.py @@ -41,7 +41,7 @@ class TestReSTDocument(unittest.TestCase): doc = ReSTDocument() doc.include_doc_string('

this is a test

') self.assertEqual(doc.getvalue(), six.b('\n\nthis is a ``test`` \n\n')) - + def test_remove_doc_string(self): doc = ReSTDocument() doc.writeln('foo') @@ -49,6 +49,12 @@ class TestReSTDocument(unittest.TestCase): doc.remove_last_doc_string() self.assertEqual(doc.getvalue(), six.b('foo\n')) + def test_add_links(self): + doc = ReSTDocument() + doc.hrefs['foo'] = 'https://example.com/' + self.assertEqual( + doc.getvalue(), six.b('\n\n.. _foo: https://example.com/\n')) + class TestDocumentStructure(unittest.TestCase): def setUp(self): diff --git a/tests/unit/docs/bcdoc/test_style.py b/tests/unit/docs/bcdoc/test_style.py index 31ffd35a..a6278733 100644 --- a/tests/unit/docs/bcdoc/test_style.py +++ b/tests/unit/docs/bcdoc/test_style.py @@ -146,6 +146,16 @@ class TestStyle(unittest.TestCase): style.doc.getvalue(), six.b('')) + def test_href_link(self): + style = ReSTStyle(ReSTDocument()) + style.start_a(attrs=[('href', 'http://example.org')]) + style.doc.write('example') + style.end_a() + self.assertEqual( + style.doc.getvalue(), + six.b('`example `__ ') + ) + def test_escape_href_link(self): style = ReSTStyle(ReSTDocument()) style.start_a(attrs=[('href', 'http://example.org')]) @@ -153,15 +163,14 @@ class TestStyle(unittest.TestCase): style.end_a() self.assertEqual( style.doc.getvalue(), - six.b('`foo\\: the next bar`_ \n\n.. _foo\\: the next ' - 'bar: http://example.org\n')) + six.b('`foo\\: the next bar `__ ')) def test_handle_no_text_hrefs(self): style = ReSTStyle(ReSTDocument()) style.start_a(attrs=[('href', 'http://example.org')]) style.end_a() self.assertEqual(style.doc.getvalue(), - six.b('``_ ')) + six.b('``__ ')) def test_sphinx_reference_label_html(self): style = ReSTStyle(ReSTDocument()) diff --git a/tests/unit/docs/test_sharedexample.py b/tests/unit/docs/test_sharedexample.py index 26931ad9..0a846354 100644 --- a/tests/unit/docs/test_sharedexample.py +++ b/tests/unit/docs/test_sharedexample.py @@ -299,3 +299,22 @@ class TestSharedExampleDocumenter(BaseDocsTest): u" foo='\u2713'", u")" ]) + + def test_escape_character_example(self): + self.add_shape_to_params('foo', 'String') + self.documenter.document_shared_example( + example={ + 'output': { + 'foo': 'good\n\rintentions!\n\r' + } + }, + prefix='foo.bar', + section=self.doc_structure, + operation_model=self.operation_model + ) + self.assert_contains_lines_in_order([ + "Expected Output:", + " {", + " 'foo': 'good\\n\\rintentions!\\n\\r',", + " }", + ]) diff --git a/tests/unit/docs/test_utils.py b/tests/unit/docs/test_utils.py index 7611a7e4..c526d24f 100644 --- a/tests/unit/docs/test_utils.py +++ b/tests/unit/docs/test_utils.py @@ -18,6 +18,7 @@ from botocore.docs.utils import get_official_service_name from botocore.docs.utils import AutoPopulatedParam from botocore.docs.utils import HideParamFromOperations from botocore.docs.utils import AppendParamDocumentation +from botocore.docs.utils import escape_controls class TestPythonTypeName(unittest.TestCase): @@ -217,3 +218,10 @@ class TestAppendParamDocumentation(BaseDocsTest): 'docs.request-params', self.doc_structure) self.assert_contains_line('foo\n') self.assert_contains_line('hello!') + + +class TestEscapeControls(unittest.TestCase): + def test_escapes_controls(self): + escaped = escape_controls('\na\rb\tc\fd\be') + self.assertEquals(escaped, '\\na\\rb\\tc\\fd\\be') + diff --git a/tests/unit/test_args.py b/tests/unit/test_args.py index af04e610..2b854d40 100644 --- a/tests/unit/test_args.py +++ b/tests/unit/test_args.py @@ -203,3 +203,24 @@ class TestCreateClientArgs(unittest.TestCase): service_model, 'us-west-2', True, 'http://other.com/', True, None, {}, config, bridge) self.assertEqual(client_args['client_config'].region_name, None) + + def test_provide_retry_config(self): + self.args_create = args.ClientArgsCreator( + mock.Mock(), None, None, None, None) + service_model = mock.Mock() + service_model.endpoint_prefix = 'ec2' + service_model.metadata = {'protocol': 'query'} + config = botocore.config.Config( + retries={'max_attempts': 10} + ) + bridge = mock.Mock() + bridge.resolve.side_effect = [{ + 'region_name': None, 'signature_version': 'v4', + 'endpoint_url': 'http://other.com/', 'signing_name': 'ec2', + 'signing_region': None, 'metadata': {} + }] + client_args = self.args_create.get_client_args( + service_model, 'us-west-2', True, 'https://ec2/', True, None, + {}, config, bridge) + self.assertEqual( + client_args['client_config'].retries, {'max_attempts': 10}) diff --git a/tests/unit/test_client.py b/tests/unit/test_client.py index f30317ae..562302be 100644 --- a/tests/unit/test_client.py +++ b/tests/unit/test_client.py @@ -25,6 +25,8 @@ from botocore.credentials import Credentials from botocore.exceptions import ParamValidationError from botocore.exceptions import InvalidS3AddressingStyleError from botocore.exceptions import UnknownSignatureVersionError +from botocore.exceptions import InvalidRetryConfigurationError +from botocore.exceptions import InvalidMaxRetryAttemptsError from botocore.errorfactory import ClientExceptionsFactory from botocore.stub import Stubber from botocore import exceptions @@ -819,6 +821,26 @@ class TestAutoGeneratedClient(unittest.TestCase): for call in event_emitter.register.call_args_list: self.assertNotIn('needs-retry', call[0][0]) + def test_can_override_max_attempts(self): + retry_handler_factory = mock.Mock(botocore.retryhandler) + creator = self.create_client_creator( + retry_handler_factory=retry_handler_factory) + creator.create_client( + 'myservice', 'us-west-2', + client_config=botocore.config.Config(retries={'max_attempts': 9})) + + retry_handler_factory.create_retry_handler.assert_called_with({ + '__default__': { + 'delay': { + 'growth_factor': 2, + 'base': 'rand', + 'type': 'exponential' + }, + 'policies': {}, + 'max_attempts': 10 + } + }, 'myservice') + def test_try_to_paginate_non_paginated(self): self.loader.load_service_model.side_effect = [ self.service_description, @@ -1602,6 +1624,20 @@ class TestConfig(unittest.TestCase): self.assertEqual(new_config.region_name, 'us-west-2') self.assertEqual(new_config.signature_version, 's3v4') + def test_can_set_retry_max_attempts(self): + config = botocore.config.Config(retries={'max_attempts': 15}) + self.assertEqual(config.retries['max_attempts'], 15) + + def test_validates_retry_config(self): + with self.assertRaisesRegexp( + InvalidRetryConfigurationError, + 'Cannot provide retry configuration for "not-allowed"'): + botocore.config.Config(retries={'not-allowed': True}) + + def test_validates_max_retry_attempts(self): + with self.assertRaises(InvalidMaxRetryAttemptsError): + botocore.config.Config(retries={'max_attempts': -1}) + class TestClientEndpointBridge(unittest.TestCase): def setUp(self): diff --git a/tests/unit/test_exceptions.py b/tests/unit/test_exceptions.py index cf40c434..d6a00389 100644 --- a/tests/unit/test_exceptions.py +++ b/tests/unit/test_exceptions.py @@ -62,3 +62,19 @@ def test_retry_info_not_added_if_retry_attempts_not_present(): raise AssertionError("Retry information should not be in exception " "message when retry attempts not in response " "metadata: %s" % error_msg) + + +def test_can_handle_when_response_missing_error_key(): + response = { + 'ResponseMetadata': { + 'HTTPHeaders': {}, + 'HTTPStatusCode': 503, + 'MaxAttemptsReached': True, + 'RetryAttempts': 4 + } + } + e = exceptions.ClientError(response, 'SomeOperation') + if 'An error occurred (Unknown)' not in str(e): + raise AssertionError( + "Error code should default to 'Unknown' " + "when missing error response, instead got: %s" % str(e)) diff --git a/tests/unit/test_paginate.py b/tests/unit/test_paginate.py index 0c10e2e4..39179e5d 100644 --- a/tests/unit/test_paginate.py +++ b/tests/unit/test_paginate.py @@ -12,6 +12,7 @@ # language governing permissions and limitations under the License. from tests import unittest +from botocore import model from botocore.paginate import Paginator from botocore.paginate import PaginatorModel from botocore.paginate import TokenDecoder @@ -95,12 +96,13 @@ class TestPaginatorModel(unittest.TestCase): class TestPagination(unittest.TestCase): def setUp(self): self.method = mock.Mock() + self.model = mock.Mock() self.paginate_config = { 'output_token': 'NextToken', 'input_token': 'NextToken', 'result_key': 'Foo', } - self.paginator = Paginator(self.method, self.paginate_config) + self.paginator = Paginator(self.method, self.paginate_config, self.model) def test_result_key_available(self): self.assertEqual( @@ -134,7 +136,7 @@ class TestPagination(unittest.TestCase): "result_key": "Users", "limit_key": "MaxKeys", } - self.paginator = Paginator(self.method, self.paginate_config) + self.paginator = Paginator(self.method, self.paginate_config, self.model) responses = [ {"Users": ["User1"], "Marker": "m1"}, {"Users": ["User2"], "Marker": "m2"}, @@ -173,7 +175,7 @@ class TestPagination(unittest.TestCase): 'input_token': 'NextToken', 'result_key': 'Foo', } - self.paginator = Paginator(self.method, self.pagination_config) + self.paginator = Paginator(self.method, self.pagination_config, self.model) # Verify that despite varying between NextToken and NextToken2 # we still can extract the right next tokens. responses = [ @@ -203,7 +205,7 @@ class TestPagination(unittest.TestCase): 'input_token': 'NextToken', 'result_key': 'Foo', } - self.paginator = Paginator(self.method, self.paginate_config) + self.paginator = Paginator(self.method, self.paginate_config, self.model) responses = [ {'Foo': [1], 'IsTruncated': True, 'NextToken': 'token1'}, {'Foo': [2], 'IsTruncated': True, 'NextToken': 'token2'}, @@ -225,7 +227,7 @@ class TestPagination(unittest.TestCase): 'input_token': 'NextToken', 'result_key': 'Bar', } - self.paginator = Paginator(self.method, self.paginate_config) + self.paginator = Paginator(self.method, self.paginate_config, self.model) responses = [ {'Foo': {'IsTruncated': True}, 'NextToken': 'token1'}, {'Foo': {'IsTruncated': False}, 'NextToken': 'token2'}, @@ -244,7 +246,7 @@ class TestPagination(unittest.TestCase): "result_key": "Users", "limit_key": "MaxKeys", } - self.paginator = Paginator(self.method, self.paginate_config) + self.paginator = Paginator(self.method, self.paginate_config, self.model) responses = [ {"Users": ["User1"], "Marker": "m1"}, {"Users": ["User2"], "Marker": "m2"}, @@ -285,7 +287,7 @@ class TestPagination(unittest.TestCase): "result_key": "Users", "limit_key": "MaxKeys", } - self.paginator = Paginator(self.method, self.paginate_config) + self.paginator = Paginator(self.method, self.paginate_config, self.model) responses = [ {"Users": ["User1"], "Marker": "m1"}, {"Users": ["User2"], "Marker": "m2"}, @@ -303,7 +305,7 @@ class TestPagination(unittest.TestCase): "result_key": "Users", "limit_key": "MaxKeys", } - self.paginator = Paginator(self.method, self.paginate_config) + self.paginator = Paginator(self.method, self.paginate_config, self.model) max_items = 3 page_size = 2 @@ -343,13 +345,14 @@ class TestPagination(unittest.TestCase): class TestPaginatorPageSize(unittest.TestCase): def setUp(self): self.method = mock.Mock() + self.model = mock.Mock() self.paginate_config = { "output_token": "Marker", "input_token": "Marker", "result_key": ["Users", "Groups"], 'limit_key': 'MaxKeys', } - self.paginator = Paginator(self.method, self.paginate_config) + self.paginator = Paginator(self.method, self.paginate_config, self.model) self.endpoint = mock.Mock() def test_no_page_size(self): @@ -374,14 +377,16 @@ class TestPaginatorPageSize(unittest.TestCase): kwargs = {'arg1': 'foo', 'arg2': 'bar', 'PaginationConfig': {'PageSize': 5}} del self.paginate_config['limit_key'] + paginator = Paginator(self.method, self.paginate_config, self.model) with self.assertRaises(PaginationError): - self.paginator.paginate(**kwargs) + paginator.paginate(**kwargs) class TestPaginatorWithPathExpressions(unittest.TestCase): def setUp(self): self.method = mock.Mock() + self.model = mock.Mock() # This is something we'd see in s3 pagination. self.paginate_config = { 'output_token': [ @@ -389,7 +394,7 @@ class TestPaginatorWithPathExpressions(unittest.TestCase): 'input_token': 'next_marker', 'result_key': 'Contents', } - self.paginator = Paginator(self.method, self.paginate_config) + self.paginator = Paginator(self.method, self.paginate_config, self.model) def test_s3_list_objects(self): responses = [ @@ -422,12 +427,13 @@ class TestPaginatorWithPathExpressions(unittest.TestCase): class TestBinaryTokens(unittest.TestCase): def setUp(self): self.method = mock.Mock() + self.model = mock.Mock() self.paginate_config = { "output_token": "Marker", "input_token": "Marker", "result_key": "Users" } - self.paginator = Paginator(self.method, self.paginate_config) + self.paginator = Paginator(self.method, self.paginate_config, self.model) def test_build_full_result_with_bytes(self): responses = [ @@ -592,6 +598,7 @@ class TestBinaryTokens(unittest.TestCase): class TestMultipleTokens(unittest.TestCase): def setUp(self): self.method = mock.Mock() + self.model = mock.Mock() # This is something we'd see in s3 pagination. self.paginate_config = { "output_token": ["ListBucketResults.NextKeyMarker", @@ -599,7 +606,7 @@ class TestMultipleTokens(unittest.TestCase): "input_token": ["key_marker", "upload_id_marker"], "result_key": 'Foo', } - self.paginator = Paginator(self.method, self.paginate_config) + self.paginator = Paginator(self.method, self.paginate_config, self.model) def test_s3_list_multipart_uploads(self): responses = [ @@ -633,6 +640,7 @@ class TestOptionalTokens(unittest.TestCase): """ def setUp(self): self.method = mock.Mock() + self.model = mock.Mock() # This is based on Route53 pagination. self.paginate_config = { "output_token": ["NextRecordName", @@ -643,7 +651,7 @@ class TestOptionalTokens(unittest.TestCase): "StartRecordIdentifier"], "result_key": 'Foo', } - self.paginator = Paginator(self.method, self.paginate_config) + self.paginator = Paginator(self.method, self.paginate_config, self.model) def test_clean_token(self): responses = [ @@ -673,13 +681,14 @@ class TestOptionalTokens(unittest.TestCase): class TestKeyIterators(unittest.TestCase): def setUp(self): self.method = mock.Mock() + self.model = mock.Mock() # This is something we'd see in s3 pagination. self.paginate_config = { "output_token": "Marker", "input_token": "Marker", "result_key": "Users" } - self.paginator = Paginator(self.method, self.paginate_config) + self.paginator = Paginator(self.method, self.paginate_config, self.model) def test_result_key_iters(self): responses = [ @@ -706,7 +715,7 @@ class TestKeyIterators(unittest.TestCase): self.assertEqual(complete, {'Users': ['User1', 'User2', 'User3']}) def test_max_items_can_be_specified(self): - paginator = Paginator(self.method, self.paginate_config) + paginator = Paginator(self.method, self.paginate_config, self.model) responses = [ {"Users": ["User1"], "Marker": "m1"}, {"Users": ["User2"], "Marker": "m2"}, @@ -722,7 +731,7 @@ class TestKeyIterators(unittest.TestCase): def test_max_items_as_strings(self): # Some services (route53) model MaxItems as a string type. # We need to be able to handle this case. - paginator = Paginator(self.method, self.paginate_config) + paginator = Paginator(self.method, self.paginate_config, self.model) responses = [ {"Users": ["User1"], "Marker": "m1"}, {"Users": ["User2"], "Marker": "m2"}, @@ -737,7 +746,7 @@ class TestKeyIterators(unittest.TestCase): {'Users': ['User1'], 'NextToken': expected_token}) def test_next_token_on_page_boundary(self): - paginator = Paginator(self.method, self.paginate_config) + paginator = Paginator(self.method, self.paginate_config, self.model) responses = [ {"Users": ["User1"], "Marker": "m1"}, {"Users": ["User2"], "Marker": "m2"}, @@ -754,7 +763,7 @@ class TestKeyIterators(unittest.TestCase): # We're saying we only want 4 items, but notice that the second # page of results returns users 4-6 so we have to truncated # part of that second page. - paginator = Paginator(self.method, self.paginate_config) + paginator = Paginator(self.method, self.paginate_config, self.model) responses = [ {"Users": ["User1", "User2", "User3"], "Marker": "m1"}, {"Users": ["User4", "User5", "User6"], "Marker": "m2"}, @@ -774,7 +783,7 @@ class TestKeyIterators(unittest.TestCase): # from test_MaxItems_can_be_specified_truncates_response # We got the first 4 users, when we pick up we should get # User5 - User7. - paginator = Paginator(self.method, self.paginate_config) + paginator = Paginator(self.method, self.paginate_config, self.model) responses = [ {"Users": ["User4", "User5", "User6"], "Marker": "m2"}, {"Users": ["User7"]}, @@ -795,7 +804,7 @@ class TestKeyIterators(unittest.TestCase): def test_max_items_exceeds_actual_amount(self): # Because MaxItems=10 > number of users (3), we should just return # all of the users. - paginator = Paginator(self.method, self.paginate_config) + paginator = Paginator(self.method, self.paginate_config, self.model) responses = [ {"Users": ["User1"], "Marker": "m1"}, {"Users": ["User2"], "Marker": "m2"}, @@ -823,13 +832,14 @@ class TestKeyIterators(unittest.TestCase): class TestMultipleResultKeys(unittest.TestCase): def setUp(self): self.method = mock.Mock() + self.model = mock.Mock() # This is something we'd see in s3 pagination. self.paginate_config = { "output_token": "Marker", "input_token": "Marker", "result_key": ["Users", "Groups"], } - self.paginator = Paginator(self.method, self.paginate_config) + self.paginator = Paginator(self.method, self.paginate_config, self.model) def test_build_full_result_with_multiple_result_keys(self): responses = [ @@ -944,6 +954,7 @@ class TestMultipleResultKeys(unittest.TestCase): class TestMultipleInputKeys(unittest.TestCase): def setUp(self): self.method = mock.Mock() + self.model = mock.Mock() # Probably the most complicated example we'll see: # multiple input/output/result keys. self.paginate_config = { @@ -951,7 +962,7 @@ class TestMultipleInputKeys(unittest.TestCase): "input_token": ["InMarker1", "InMarker2"], "result_key": ["Users", "Groups"], } - self.paginator = Paginator(self.method, self.paginate_config) + self.paginator = Paginator(self.method, self.paginate_config, self.model) def test_build_full_result_with_multiple_input_keys(self): responses = [ @@ -1022,6 +1033,7 @@ class TestMultipleInputKeys(unittest.TestCase): class TestExpressionKeyIterators(unittest.TestCase): def setUp(self): self.method = mock.Mock() + self.model = mock.Mock() # This is something like what we'd see in RDS. self.paginate_config = { "input_token": "Marker", @@ -1029,7 +1041,7 @@ class TestExpressionKeyIterators(unittest.TestCase): "limit_key": "MaxRecords", "result_key": "EngineDefaults.Parameters" } - self.paginator = Paginator(self.method, self.paginate_config) + self.paginator = Paginator(self.method, self.paginate_config, self.model) self.responses = [ {"EngineDefaults": {"Parameters": ["One", "Two"]}, "Marker": "m1"}, @@ -1060,12 +1072,13 @@ class TestExpressionKeyIterators(unittest.TestCase): class TestIncludeResultKeys(unittest.TestCase): def setUp(self): self.method = mock.Mock() + self.model = mock.Mock() self.paginate_config = { 'output_token': 'Marker', 'input_token': 'Marker', 'result_key': ['ResultKey', 'Count', 'Log'], } - self.paginator = Paginator(self.method, self.paginate_config) + self.paginator = Paginator(self.method, self.paginate_config, self.model) def test_different_kinds_of_result_key(self): self.method.side_effect = [ @@ -1097,13 +1110,14 @@ class TestIncludeNonResultKeys(unittest.TestCase): def setUp(self): self.method = mock.Mock() + self.model = mock.Mock() self.paginate_config = { 'output_token': 'NextToken', 'input_token': 'NextToken', 'result_key': 'ResultKey', 'non_aggregate_keys': ['NotResultKey'], } - self.paginator = Paginator(self.method, self.paginate_config) + self.paginator = Paginator(self.method, self.paginate_config, self.model) def test_include_non_aggregate_keys(self): self.method.side_effect = [ @@ -1122,7 +1136,7 @@ class TestIncludeNonResultKeys(unittest.TestCase): def test_include_with_multiple_result_keys(self): self.paginate_config['result_key'] = ['ResultKey1', 'ResultKey2'] - self.paginator = Paginator(self.method, self.paginate_config) + self.paginator = Paginator(self.method, self.paginate_config, self.model) self.method.side_effect = [ {'ResultKey1': ['a', 'b'], 'ResultKey2': ['u', 'v'], 'NotResultKey': 'a', 'NextToken': 'token1'}, @@ -1145,7 +1159,7 @@ class TestIncludeNonResultKeys(unittest.TestCase): self.paginate_config['non_aggregate_keys'] = [ 'Outer', 'Result.Inner', ] - self.paginator = Paginator(self.method, self.paginate_config) + self.paginator = Paginator(self.method, self.paginate_config, self.model) self.method.side_effect = [ # The non result keys shows hypothetical # example. This doesn't actually happen, @@ -1173,13 +1187,14 @@ class TestIncludeNonResultKeys(unittest.TestCase): class TestSearchOverResults(unittest.TestCase): def setUp(self): self.method = mock.Mock() + self.model = mock.Mock() self.paginate_config = { 'more_results': 'IsTruncated', 'output_token': 'NextToken', 'input_token': 'NextToken', 'result_key': 'Foo', } - self.paginator = Paginator(self.method, self.paginate_config) + self.paginator = Paginator(self.method, self.paginate_config, self.model) responses = [ {'Foo': [{'a': 1}, {'b': 2}], 'IsTruncated': True, 'NextToken': '1'}, @@ -1209,6 +1224,7 @@ class TestSearchOverResults(unittest.TestCase): class TestDeprecatedStartingToken(unittest.TestCase): def setUp(self): self.method = mock.Mock() + self.model = mock.Mock() def create_paginator(self, multiple_tokens=False): if multiple_tokens: @@ -1223,7 +1239,7 @@ class TestDeprecatedStartingToken(unittest.TestCase): 'input_token': 'Marker', 'result_key': 'Users', } - return Paginator(self.method, paginator_config) + return Paginator(self.method, paginator_config, self.model) def assert_pagination_result(self, expected, pagination_config, multiple_tokens=False): @@ -1333,5 +1349,72 @@ class TestDeprecatedStartingToken(unittest.TestCase): self.assertEqual(actual, expected) +class TestStringPageSize(unittest.TestCase): + def setUp(self): + self.service_model = { + 'metadata': { + 'protocol': 'query', + 'endpointPrefix': 'prefix' + }, + 'documentation': 'best service ever', + 'operations': { + 'ListStuff': { + 'name': 'ListStuff', + 'http': { + 'method': 'GET', + 'requestUri': '/things' + }, + 'input': {'shape': 'ListStuffInputShape'}, + 'output': {'shape': 'ListStuffOutputShape'}, + 'errors': [], + 'documentation': 'Lists stuff' + } + }, + 'shapes': { + 'String': {'type': 'string'}, + 'ListOfStuff': { + 'type': 'list', + 'member': {'type': 'string'} + }, + 'ListStuffInputShape': { + 'type': 'structure', + 'required': [], + 'members': { + 'NextToken': {'shape': 'String'}, + 'MaxItems': {'shape': 'String'} + } + }, + 'ListStuffOutputShape': { + 'type': 'structure', + 'required': [], + 'members': { + 'NextToken': {'shape': 'String'}, + 'Stuff': {'shape': 'ListOfStuff'}, + 'IsTruncated': {'type': 'boolean'} + }, + } + } + } + self.paginate_config = { + 'input_token': 'NextToken', + 'output_token': 'NextToken', + 'limit_key': 'MaxItems', + 'result_key': 'Stuff', + } + self.service = model.ServiceModel(self.service_model) + self.model = self.service.operation_model('ListStuff') + self.method = mock.Mock() + self.method.side_effect = [] + self.paginator = Paginator(self.method, self.paginate_config, self.model) + + def test_int_page_size(self): + res = list(self.paginator.paginate(PaginationConfig={'PageSize': 1})) + self.method.assert_called_with(MaxItems='1') + + def test_str_page_size(self): + res = list(self.paginator.paginate(PaginationConfig={'PageSize': '1'})) + self.method.assert_called_with(MaxItems='1') + + if __name__ == '__main__': unittest.main() diff --git a/tests/unit/test_translate.py b/tests/unit/test_translate.py index 1626aff3..15fbd047 100644 --- a/tests/unit/test_translate.py +++ b/tests/unit/test_translate.py @@ -75,3 +75,43 @@ class TestBuildRetryConfig(unittest.TestCase): # And we should resolve references. self.assertEqual(operation_config['policies']['other'], {"from": {"definition": "file"}}) + + def test_service_specific_defaults_no_mutate_default_retry(self): + retry = translate.build_retry_config('sts', self.retry['retry'], + self.retry['definitions']) + # sts has a specific policy + self.assertEqual( + retry['__default__'], { + "max_attempts": 5, + "delay": "service_specific_delay", + "policies": { + "global_one": "global", + "override_me": "service", + "service_one": "service", + } + } + ) + + # The general defaults for the upstream model should not have been + # mutated from building the retry config + self.assertEqual( + self.retry['retry']['__default__'], + { + "max_attempts": 5, + "delay": "global_delay", + "policies": { + "global_one": "global", + "override_me": "global", + } + } + ) + + def test_client_override_max_attempts(self): + retry = translate.build_retry_config( + 'sts', self.retry['retry'], self.retry['definitions'], + client_retry_config={'max_attempts': 9} + ) + self.assertEqual(retry['__default__']['max_attempts'], 10) + # But it should not mutate the original retry model + self.assertEqual( + self.retry['retry']['__default__']['max_attempts'], 5)